repo_name stringlengths 7 71 | file_path stringlengths 5 118 | context list | import_statement stringlengths 45 12.5k | token_num int64 641 99.4k | cropped_code stringlengths 44 17k | all_code stringlengths 43 754k | next_line stringlengths 2 330 | gold_snippet_index int64 0 68 | created_at stringlengths 25 25 | level stringclasses 9
values |
|---|---|---|---|---|---|---|---|---|---|---|
DLYuanGod/TinyGPT-V | minigpt4/datasets/builders/image_text_pair_builder.py | [
{
"identifier": "registry",
"path": "minigpt4/common/registry.py",
"snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def ... | import os
import logging
import warnings
from minigpt4.common.registry import registry
from minigpt4.datasets.builders.base_dataset_builder import BaseDatasetBuilder
from minigpt4.datasets.datasets.laion_dataset import LaionDataset
from minigpt4.datasets.datasets.cc_sbu_dataset import CCSBUDataset, CCSBUAlignDataset
from minigpt4.datasets.datasets.text_caps import TextCapDataset
from minigpt4.datasets.datasets.llava_dataset import LlavaDetailDataset, LlavaReasonDataset, LlavaConversationDataset
from minigpt4.datasets.datasets.unnatural_instruction import UnnaturalDataset
from minigpt4.datasets.datasets.multitask_conversation import MultiTaskConversationDataset
from minigpt4.datasets.datasets.flickr import GroundedDetailDataset,CaptionToObjectDataset,PhraseToObjectDataset
from minigpt4.datasets.datasets.vg_dataset import ReferVisualGenomeDataset
from minigpt4.datasets.datasets.coco_dataset import ReferCOCODataset, InvReferCOCODataset
from minigpt4.datasets.datasets.gqa_datasets import GQADataset
from minigpt4.datasets.datasets.aok_vqa_datasets import AOKVQADataset
from minigpt4.datasets.datasets.coco_vqa_datasets import COCOVQADataset
from minigpt4.datasets.datasets.ocrvqa_dataset import OCRVQADataset
from minigpt4.datasets.datasets.coco_caption import COCOCapDataset | 10,369 |
@registry.register_builder("multitask_conversation")
class MultitaskConversationBuilder(BaseDatasetBuilder):
train_dataset_cls = MultiTaskConversationDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/multitask_conversation/default.yaml",
}
def build_datasets(self):
# at this point, all the annotations and image/videos should be all downloaded to the specified locations.
logging.info("Building datasets...")
self.build_processors()
build_info = self.config.build_info
datasets = dict()
# create datasets
dataset_cls = self.train_dataset_cls
datasets['train'] = dataset_cls(
vis_processor=self.vis_processors["train"],
text_processor=self.text_processors["train"],
ann_path=build_info.ann_path,
vis_root=build_info.image_path,
)
return datasets
@registry.register_builder("unnatural_instruction")
class UnnaturalInstructionBuilder(BaseDatasetBuilder):
train_dataset_cls = UnnaturalDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/nlp/unnatural_instruction.yaml",
}
def build_datasets(self):
# at this point, all the annotations and image/videos should be all downloaded to the specified locations.
logging.info("Building datasets...")
self.build_processors()
build_info = self.config.build_info
datasets = dict()
# create datasets
dataset_cls = self.train_dataset_cls
datasets['train'] = dataset_cls(
text_processor=self.text_processors["train"],
ann_path=build_info.ann_path,
)
return datasets
@registry.register_builder("llava_detail")
class LlavaDetailBuilder(BaseDatasetBuilder):
|
@registry.register_builder("multitask_conversation")
class MultitaskConversationBuilder(BaseDatasetBuilder):
train_dataset_cls = MultiTaskConversationDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/multitask_conversation/default.yaml",
}
def build_datasets(self):
# at this point, all the annotations and image/videos should be all downloaded to the specified locations.
logging.info("Building datasets...")
self.build_processors()
build_info = self.config.build_info
datasets = dict()
# create datasets
dataset_cls = self.train_dataset_cls
datasets['train'] = dataset_cls(
vis_processor=self.vis_processors["train"],
text_processor=self.text_processors["train"],
ann_path=build_info.ann_path,
vis_root=build_info.image_path,
)
return datasets
@registry.register_builder("unnatural_instruction")
class UnnaturalInstructionBuilder(BaseDatasetBuilder):
train_dataset_cls = UnnaturalDataset
DATASET_CONFIG_DICT = {
"default": "configs/datasets/nlp/unnatural_instruction.yaml",
}
def build_datasets(self):
# at this point, all the annotations and image/videos should be all downloaded to the specified locations.
logging.info("Building datasets...")
self.build_processors()
build_info = self.config.build_info
datasets = dict()
# create datasets
dataset_cls = self.train_dataset_cls
datasets['train'] = dataset_cls(
text_processor=self.text_processors["train"],
ann_path=build_info.ann_path,
)
return datasets
@registry.register_builder("llava_detail")
class LlavaDetailBuilder(BaseDatasetBuilder): | train_dataset_cls = LlavaDetailDataset | 6 | 2023-12-28 05:47:18+00:00 | 12k |
ali-vilab/dreamtalk | inference_for_demo_video.py | [
{
"identifier": "get_cfg_defaults",
"path": "configs/default.py",
"snippet": "def get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n return _C.clone()"
},
{
"identifier": "DiffusionNet",
"path": "core/networks/diffusion_net.py",
"snipp... | import argparse
import json
import os
import shutil
import subprocess
import numpy as np
import torch
import torchaudio
from scipy.io import loadmat
from transformers import Wav2Vec2Processor
from transformers.models.wav2vec2.modeling_wav2vec2 import Wav2Vec2Model
from configs.default import get_cfg_defaults
from core.networks.diffusion_net import DiffusionNet
from core.networks.diffusion_util import NoisePredictor, VarianceSchedule
from core.utils import (
crop_src_image,
get_pose_params,
get_video_style_clip,
get_wav2vec_audio_window,
)
from generators.utils import get_netG, render_video | 7,935 | gen_exp = gen_exp_stack[0].cpu().numpy()
pose_ext = pose_path[-3:]
pose = None
pose = get_pose_params(pose_path)
# (L, 9)
selected_pose = None
if len(pose) >= len(gen_exp):
selected_pose = pose[: len(gen_exp)]
else:
selected_pose = pose[-1].unsqueeze(0).repeat(len(gen_exp), 1)
selected_pose[: len(pose)] = pose
gen_exp_pose = np.concatenate((gen_exp, selected_pose), axis=1)
np.save(output_path, gen_exp_pose)
return output_path
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="inference for demo")
parser.add_argument("--wav_path", type=str, default="", help="path for wav")
parser.add_argument("--image_path", type=str, default="", help="path for image")
parser.add_argument("--disable_img_crop", dest="img_crop", action="store_false")
parser.set_defaults(img_crop=True)
parser.add_argument(
"--style_clip_path", type=str, default="", help="path for style_clip_mat"
)
parser.add_argument("--pose_path", type=str, default="", help="path for pose")
parser.add_argument(
"--max_gen_len",
type=int,
default=1000,
help="The maximum length (seconds) limitation for generating videos",
)
parser.add_argument(
"--cfg_scale",
type=float,
default=1.0,
help="The scale of classifier-free guidance",
)
parser.add_argument(
"--output_name",
type=str,
default="test",
)
parser.add_argument(
"--device",
type=str,
default="cuda",
)
args = parser.parse_args()
if args.device == "cuda" and not torch.cuda.is_available():
print("CUDA is not available, set --device=cpu to use CPU.")
exit(1)
device = torch.device(args.device)
cfg = get_cfg_defaults()
cfg.CF_GUIDANCE.SCALE = args.cfg_scale
cfg.freeze()
tmp_dir = f"tmp/{args.output_name}"
os.makedirs(tmp_dir, exist_ok=True)
# get audio in 16000Hz
wav_16k_path = os.path.join(tmp_dir, f"{args.output_name}_16K.wav")
command = f"ffmpeg -y -i {args.wav_path} -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 {wav_16k_path}"
subprocess.run(command.split())
# get wav2vec feat from audio
wav2vec_processor = Wav2Vec2Processor.from_pretrained(
"jonatasgrosman/wav2vec2-large-xlsr-53-english"
)
wav2vec_model = (
Wav2Vec2Model.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
.eval()
.to(device)
)
speech_array, sampling_rate = torchaudio.load(wav_16k_path)
audio_data = speech_array.squeeze().numpy()
inputs = wav2vec_processor(
audio_data, sampling_rate=16_000, return_tensors="pt", padding=True
)
with torch.no_grad():
audio_embedding = wav2vec_model(
inputs.input_values.to(device), return_dict=False
)[0]
audio_feat_path = os.path.join(tmp_dir, f"{args.output_name}_wav2vec.npy")
np.save(audio_feat_path, audio_embedding[0].cpu().numpy())
# get src image
src_img_path = os.path.join(tmp_dir, "src_img.png")
if args.img_crop:
crop_src_image(args.image_path, src_img_path, 0.4)
else:
shutil.copy(args.image_path, src_img_path)
with torch.no_grad():
# get diff model and load checkpoint
diff_net = get_diff_net(cfg, device).to(device)
# generate face motion
face_motion_path = os.path.join(tmp_dir, f"{args.output_name}_facemotion.npy")
inference_one_video(
cfg,
audio_feat_path,
args.style_clip_path,
args.pose_path,
face_motion_path,
diff_net,
device,
max_audio_len=args.max_gen_len,
)
# get renderer
|
@torch.no_grad()
def get_diff_net(cfg, device):
diff_net = DiffusionNet(
cfg=cfg,
net=NoisePredictor(cfg),
var_sched=VarianceSchedule(
num_steps=cfg.DIFFUSION.SCHEDULE.NUM_STEPS,
beta_1=cfg.DIFFUSION.SCHEDULE.BETA_1,
beta_T=cfg.DIFFUSION.SCHEDULE.BETA_T,
mode=cfg.DIFFUSION.SCHEDULE.MODE,
),
)
checkpoint = torch.load(cfg.INFERENCE.CHECKPOINT, map_location=device)
model_state_dict = checkpoint["model_state_dict"]
diff_net_dict = {
k[9:]: v for k, v in model_state_dict.items() if k[:9] == "diff_net."
}
diff_net.load_state_dict(diff_net_dict, strict=True)
diff_net.eval()
return diff_net
@torch.no_grad()
def get_audio_feat(wav_path, output_name, wav2vec_model):
audio_feat_dir = os.path.dirname(audio_feat_path)
pass
@torch.no_grad()
def inference_one_video(
cfg,
audio_path,
style_clip_path,
pose_path,
output_path,
diff_net,
device,
max_audio_len=None,
sample_method="ddim",
ddim_num_step=10,
):
audio_raw = audio_data = np.load(audio_path)
if max_audio_len is not None:
audio_raw = audio_raw[: max_audio_len * 50]
gen_num_frames = len(audio_raw) // 2
audio_win_array = get_wav2vec_audio_window(
audio_raw,
start_idx=0,
num_frames=gen_num_frames,
win_size=cfg.WIN_SIZE,
)
audio_win = torch.tensor(audio_win_array).to(device)
audio = audio_win.unsqueeze(0)
# the second parameter is "" because of bad interface design...
style_clip_raw, style_pad_mask_raw = get_video_style_clip(
style_clip_path, "", style_max_len=256, start_idx=0
)
style_clip = style_clip_raw.unsqueeze(0).to(device)
style_pad_mask = (
style_pad_mask_raw.unsqueeze(0).to(device)
if style_pad_mask_raw is not None
else None
)
gen_exp_stack = diff_net.sample(
audio,
style_clip,
style_pad_mask,
output_dim=cfg.DATASET.FACE3D_DIM,
use_cf_guidance=cfg.CF_GUIDANCE.INFERENCE,
cfg_scale=cfg.CF_GUIDANCE.SCALE,
sample_method=sample_method,
ddim_num_step=ddim_num_step,
)
gen_exp = gen_exp_stack[0].cpu().numpy()
pose_ext = pose_path[-3:]
pose = None
pose = get_pose_params(pose_path)
# (L, 9)
selected_pose = None
if len(pose) >= len(gen_exp):
selected_pose = pose[: len(gen_exp)]
else:
selected_pose = pose[-1].unsqueeze(0).repeat(len(gen_exp), 1)
selected_pose[: len(pose)] = pose
gen_exp_pose = np.concatenate((gen_exp, selected_pose), axis=1)
np.save(output_path, gen_exp_pose)
return output_path
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="inference for demo")
parser.add_argument("--wav_path", type=str, default="", help="path for wav")
parser.add_argument("--image_path", type=str, default="", help="path for image")
parser.add_argument("--disable_img_crop", dest="img_crop", action="store_false")
parser.set_defaults(img_crop=True)
parser.add_argument(
"--style_clip_path", type=str, default="", help="path for style_clip_mat"
)
parser.add_argument("--pose_path", type=str, default="", help="path for pose")
parser.add_argument(
"--max_gen_len",
type=int,
default=1000,
help="The maximum length (seconds) limitation for generating videos",
)
parser.add_argument(
"--cfg_scale",
type=float,
default=1.0,
help="The scale of classifier-free guidance",
)
parser.add_argument(
"--output_name",
type=str,
default="test",
)
parser.add_argument(
"--device",
type=str,
default="cuda",
)
args = parser.parse_args()
if args.device == "cuda" and not torch.cuda.is_available():
print("CUDA is not available, set --device=cpu to use CPU.")
exit(1)
device = torch.device(args.device)
cfg = get_cfg_defaults()
cfg.CF_GUIDANCE.SCALE = args.cfg_scale
cfg.freeze()
tmp_dir = f"tmp/{args.output_name}"
os.makedirs(tmp_dir, exist_ok=True)
# get audio in 16000Hz
wav_16k_path = os.path.join(tmp_dir, f"{args.output_name}_16K.wav")
command = f"ffmpeg -y -i {args.wav_path} -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 {wav_16k_path}"
subprocess.run(command.split())
# get wav2vec feat from audio
wav2vec_processor = Wav2Vec2Processor.from_pretrained(
"jonatasgrosman/wav2vec2-large-xlsr-53-english"
)
wav2vec_model = (
Wav2Vec2Model.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
.eval()
.to(device)
)
speech_array, sampling_rate = torchaudio.load(wav_16k_path)
audio_data = speech_array.squeeze().numpy()
inputs = wav2vec_processor(
audio_data, sampling_rate=16_000, return_tensors="pt", padding=True
)
with torch.no_grad():
audio_embedding = wav2vec_model(
inputs.input_values.to(device), return_dict=False
)[0]
audio_feat_path = os.path.join(tmp_dir, f"{args.output_name}_wav2vec.npy")
np.save(audio_feat_path, audio_embedding[0].cpu().numpy())
# get src image
src_img_path = os.path.join(tmp_dir, "src_img.png")
if args.img_crop:
crop_src_image(args.image_path, src_img_path, 0.4)
else:
shutil.copy(args.image_path, src_img_path)
with torch.no_grad():
# get diff model and load checkpoint
diff_net = get_diff_net(cfg, device).to(device)
# generate face motion
face_motion_path = os.path.join(tmp_dir, f"{args.output_name}_facemotion.npy")
inference_one_video(
cfg,
audio_feat_path,
args.style_clip_path,
args.pose_path,
face_motion_path,
diff_net,
device,
max_audio_len=args.max_gen_len,
)
# get renderer | renderer = get_netG("checkpoints/renderer.pt", device) | 8 | 2023-12-28 05:39:31+00:00 | 12k |
jiawei-ren/dreamgaussian4d | diffusers/src/diffusers/schedulers/scheduling_sde_ve.py | [
{
"identifier": "ConfigMixin",
"path": "diffusers/src/diffusers/configuration_utils.py",
"snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [`~Conf... | import math
import torch
from dataclasses import dataclass
from typing import Optional, Tuple, Union
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from ..utils.torch_utils import randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput | 9,419 | # Copyright 2023 Google Brain and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
@dataclass
class SdeVeOutput(BaseOutput):
"""
Output class for the scheduler's `step` function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
prev_sample_mean (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Mean averaged `prev_sample` over previous timesteps.
"""
prev_sample: torch.FloatTensor
prev_sample_mean: torch.FloatTensor
| # Copyright 2023 Google Brain and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
@dataclass
class SdeVeOutput(BaseOutput):
"""
Output class for the scheduler's `step` function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
prev_sample_mean (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Mean averaged `prev_sample` over previous timesteps.
"""
prev_sample: torch.FloatTensor
prev_sample_mean: torch.FloatTensor
| class ScoreSdeVeScheduler(SchedulerMixin, ConfigMixin): | 4 | 2023-12-28 08:17:40+00:00 | 12k |
FoundationVision/UniRef | projects/UniRef/uniref/data/datasets/ytvis.py | [
{
"identifier": "Boxes",
"path": "detectron2/structures/boxes.py",
"snippet": "class Boxes:\n \"\"\"\n This structure stores a list of boxes as a Nx4 torch.Tensor.\n It supports some common methods about boxes\n (`area`, `clip`, `nonempty`, etc),\n and also behaves like a Tensor\n (sup... | import contextlib
import io
import json
import logging
import numpy as np
import os
import pycocotools.mask as mask_util
import detectron2.data.datasets # noqa # add pre-defined metadata
import sys
import copy
from fvcore.common.file_io import PathManager
from fvcore.common.timer import Timer
from detectron2.structures import Boxes, BoxMode, PolygonMasks
from detectron2.data import DatasetCatalog, MetadataCatalog
from pycocotools.ytvos import YTVOS
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
from PIL import Image | 10,338 | logger.info("Loaded {} videos in YTVIS format from {}".format(len(vids_anns), json_file))
dataset_dicts = []
ann_keys = ["iscrowd", "category_id", "id"] + (extra_annotation_keys or [])
num_instances_without_valid_segmentation = 0
for (vid_dict, anno_dict_list) in vids_anns:
record = {}
record["file_names"] = [os.path.join(image_root, vid_dict["file_names"][i]) for i in range(vid_dict["length"])]
record["height"] = vid_dict["height"]
record["width"] = vid_dict["width"]
record["length"] = vid_dict["length"]
if "expressions" in vid_dict:
# for ref-youtube-vos and ref-davis
record["expressions"] = vid_dict["expressions"]
# for evaluation
if "exp_id" in vid_dict:
record["exp_id"] = vid_dict["exp_id"]
if "video" in vid_dict:
record["video"] = vid_dict["video"]
video_id = record["video_id"] = vid_dict["id"]
# store the dataset name
if "expressions" in vid_dict:
record["task"] = "rvos"
if dataset_name.startswith("video-refcoco"):
record["dataset_name"] = "video-refcoco"
elif dataset_name.startswith("refytvos"):
record["dataset_name"] = "refytvos"
elif dataset_name.startswith("refdavis"):
record["dataset_name"] = dataset_name # refdavis-val-0,1,2,3
else:
record["task"] = "vos"
if dataset_name.startswith("video-coco"):
record["dataset_name"] = "video-coco"
elif dataset_name.startswith("ytbvos18"):
record["dataset_name"] = "ytbvos18"
elif dataset_name.startswith("ytbvos19"):
record["dataset_name"] = "ytbvos19"
elif dataset_name.startswith("davis17"):
record["dataset_name"] = "davis17"
elif dataset_name.startswith("ovis"):
record["dataset_name"] = "ovis"
elif dataset_name.startswith("vos-lvos"):
record["dataset_name"] = "vos-lvos"
elif dataset_name.startswith("mose"):
record["dataset_name"] = "mose"
video_objs = []
for frame_idx in range(record["length"]):
frame_objs = []
for anno in anno_dict_list:
assert anno["video_id"] == video_id
obj = {key: anno[key] for key in ann_keys if key in anno}
_bboxes = anno.get("bboxes", None)
_segm = anno.get("segmentations", None)
if not (_bboxes and _segm and _bboxes[frame_idx] and _segm[frame_idx]):
continue
if "ori_id" in anno:
# for VOS inference
obj["ori_id"] = anno["ori_id"]
bbox = _bboxes[frame_idx]
segm = _segm[frame_idx]
obj["bbox"] = bbox
obj["bbox_mode"] = BoxMode.XYWH_ABS
if isinstance(segm, dict):
if isinstance(segm["counts"], list):
# convert to compressed RLE
segm = mask_util.frPyObjects(segm, *segm["size"])
elif segm:
# filter out invalid polygons (< 3 points)
segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
if len(segm) == 0:
num_instances_without_valid_segmentation += 1
continue # ignore this instance
obj["segmentation"] = segm
if id_map:
obj["category_id"] = id_map[obj["category_id"]]
frame_objs.append(obj)
video_objs.append(frame_objs)
record["annotations"] = video_objs
dataset_dicts.append(record)
if num_instances_without_valid_segmentation > 0:
logger.warning(
"Filtered out {} instances without valid segmentation. ".format(
num_instances_without_valid_segmentation
)
+ "There might be issues in your dataset generation process. "
"A valid polygon should be a list[float] with even length >= 6."
)
return dataset_dicts
def register_ytvis_instances(name, metadata, json_file, image_root):
"""
Register a dataset in YTVIS's json annotation format for
instance tracking.
Args:
name (str): the name that identifies a dataset, e.g. "ytvis_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
json_file (str): path to the json instance annotation file.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(json_file, (str, os.PathLike)), json_file
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file contains functions to parse YTVIS dataset of
COCO-format annotations into dicts in "Detectron2 format".
"""
logger = logging.getLogger(__name__)
__all__ = ["load_ytvis_json", "register_ytvis_instances"]
YTVIS_CATEGORIES_2019 = [
{"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
{"color": [0, 82, 0], "isthing": 1, "id": 2, "name": "giant_panda"},
{"color": [119, 11, 32], "isthing": 1, "id": 3, "name": "lizard"},
{"color": [165, 42, 42], "isthing": 1, "id": 4, "name": "parrot"},
{"color": [134, 134, 103], "isthing": 1, "id": 5, "name": "skateboard"},
{"color": [0, 0, 142], "isthing": 1, "id": 6, "name": "sedan"},
{"color": [255, 109, 65], "isthing": 1, "id": 7, "name": "ape"},
{"color": [0, 226, 252], "isthing": 1, "id": 8, "name": "dog"},
{"color": [5, 121, 0], "isthing": 1, "id": 9, "name": "snake"},
{"color": [0, 60, 100], "isthing": 1, "id": 10, "name": "monkey"},
{"color": [250, 170, 30], "isthing": 1, "id": 11, "name": "hand"},
{"color": [100, 170, 30], "isthing": 1, "id": 12, "name": "rabbit"},
{"color": [179, 0, 194], "isthing": 1, "id": 13, "name": "duck"},
{"color": [255, 77, 255], "isthing": 1, "id": 14, "name": "cat"},
{"color": [120, 166, 157], "isthing": 1, "id": 15, "name": "cow"},
{"color": [73, 77, 174], "isthing": 1, "id": 16, "name": "fish"},
{"color": [0, 80, 100], "isthing": 1, "id": 17, "name": "train"},
{"color": [182, 182, 255], "isthing": 1, "id": 18, "name": "horse"},
{"color": [0, 143, 149], "isthing": 1, "id": 19, "name": "turtle"},
{"color": [174, 57, 255], "isthing": 1, "id": 20, "name": "bear"},
{"color": [0, 0, 230], "isthing": 1, "id": 21, "name": "motorbike"},
{"color": [72, 0, 118], "isthing": 1, "id": 22, "name": "giraffe"},
{"color": [255, 179, 240], "isthing": 1, "id": 23, "name": "leopard"},
{"color": [0, 125, 92], "isthing": 1, "id": 24, "name": "fox"},
{"color": [209, 0, 151], "isthing": 1, "id": 25, "name": "deer"},
{"color": [188, 208, 182], "isthing": 1, "id": 26, "name": "owl"},
{"color": [145, 148, 174], "isthing": 1, "id": 27, "name": "surfboard"},
{"color": [106, 0, 228], "isthing": 1, "id": 28, "name": "airplane"},
{"color": [0, 0, 70], "isthing": 1, "id": 29, "name": "truck"},
{"color": [199, 100, 0], "isthing": 1, "id": 30, "name": "zebra"},
{"color": [166, 196, 102], "isthing": 1, "id": 31, "name": "tiger"},
{"color": [110, 76, 0], "isthing": 1, "id": 32, "name": "elephant"},
{"color": [133, 129, 255], "isthing": 1, "id": 33, "name": "snowboard"},
{"color": [0, 0, 192], "isthing": 1, "id": 34, "name": "boat"},
{"color": [183, 130, 88], "isthing": 1, "id": 35, "name": "shark"},
{"color": [130, 114, 135], "isthing": 1, "id": 36, "name": "mouse"},
{"color": [107, 142, 35], "isthing": 1, "id": 37, "name": "frog"},
{"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "eagle"},
{"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "earless_seal"},
{"color": [255, 208, 186], "isthing": 1, "id": 40, "name": "tennis_racket"},
]
YTVIS_CATEGORIES_2021 = [
{"color": [106, 0, 228], "isthing": 1, "id": 1, "name": "airplane"},
{"color": [174, 57, 255], "isthing": 1, "id": 2, "name": "bear"},
{"color": [255, 109, 65], "isthing": 1, "id": 3, "name": "bird"},
{"color": [0, 0, 192], "isthing": 1, "id": 4, "name": "boat"},
{"color": [0, 0, 142], "isthing": 1, "id": 5, "name": "car"},
{"color": [255, 77, 255], "isthing": 1, "id": 6, "name": "cat"},
{"color": [120, 166, 157], "isthing": 1, "id": 7, "name": "cow"},
{"color": [209, 0, 151], "isthing": 1, "id": 8, "name": "deer"},
{"color": [0, 226, 252], "isthing": 1, "id": 9, "name": "dog"},
{"color": [179, 0, 194], "isthing": 1, "id": 10, "name": "duck"},
{"color": [174, 255, 243], "isthing": 1, "id": 11, "name": "earless_seal"},
{"color": [110, 76, 0], "isthing": 1, "id": 12, "name": "elephant"},
{"color": [73, 77, 174], "isthing": 1, "id": 13, "name": "fish"},
{"color": [250, 170, 30], "isthing": 1, "id": 14, "name": "flying_disc"},
{"color": [0, 125, 92], "isthing": 1, "id": 15, "name": "fox"},
{"color": [107, 142, 35], "isthing": 1, "id": 16, "name": "frog"},
{"color": [0, 82, 0], "isthing": 1, "id": 17, "name": "giant_panda"},
{"color": [72, 0, 118], "isthing": 1, "id": 18, "name": "giraffe"},
{"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
{"color": [255, 179, 240], "isthing": 1, "id": 20, "name": "leopard"},
{"color": [119, 11, 32], "isthing": 1, "id": 21, "name": "lizard"},
{"color": [0, 60, 100], "isthing": 1, "id": 22, "name": "monkey"},
{"color": [0, 0, 230], "isthing": 1, "id": 23, "name": "motorbike"},
{"color": [130, 114, 135], "isthing": 1, "id": 24, "name": "mouse"},
{"color": [165, 42, 42], "isthing": 1, "id": 25, "name": "parrot"},
{"color": [220, 20, 60], "isthing": 1, "id": 26, "name": "person"},
{"color": [100, 170, 30], "isthing": 1, "id": 27, "name": "rabbit"},
{"color": [183, 130, 88], "isthing": 1, "id": 28, "name": "shark"},
{"color": [134, 134, 103], "isthing": 1, "id": 29, "name": "skateboard"},
{"color": [5, 121, 0], "isthing": 1, "id": 30, "name": "snake"},
{"color": [133, 129, 255], "isthing": 1, "id": 31, "name": "snowboard"},
{"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "squirrel"},
{"color": [145, 148, 174], "isthing": 1, "id": 33, "name": "surfboard"},
{"color": [255, 208, 186], "isthing": 1, "id": 34, "name": "tennis_racket"},
{"color": [166, 196, 102], "isthing": 1, "id": 35, "name": "tiger"},
{"color": [0, 80, 100], "isthing": 1, "id": 36, "name": "train"},
{"color": [0, 0, 70], "isthing": 1, "id": 37, "name": "truck"},
{"color": [0, 143, 149], "isthing": 1, "id": 38, "name": "turtle"},
{"color": [0, 228, 0], "isthing": 1, "id": 39, "name": "whale"},
{"color": [199, 100, 0], "isthing": 1, "id": 40, "name": "zebra"},
]
def _get_ytvis_2019_instances_meta():
thing_ids = [k["id"] for k in YTVIS_CATEGORIES_2019 if k["isthing"] == 1]
thing_colors = [k["color"] for k in YTVIS_CATEGORIES_2019 if k["isthing"] == 1]
assert len(thing_ids) == 40, len(thing_ids)
# Mapping from the incontiguous YTVIS category id to an id in [0, 39]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in YTVIS_CATEGORIES_2019 if k["isthing"] == 1]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"thing_colors": thing_colors,
}
return ret
def _get_ytvis_2021_instances_meta():
thing_ids = [k["id"] for k in YTVIS_CATEGORIES_2021 if k["isthing"] == 1]
thing_colors = [k["color"] for k in YTVIS_CATEGORIES_2021 if k["isthing"] == 1]
assert len(thing_ids) == 40, len(thing_ids)
# Mapping from the incontiguous YTVIS category id to an id in [0, 39]
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
thing_classes = [k["name"] for k in YTVIS_CATEGORIES_2021 if k["isthing"] == 1]
ret = {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes,
"thing_colors": thing_colors,
}
return ret
def load_ytvis_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None):
timer = Timer()
json_file = PathManager.get_local_path(json_file)
with contextlib.redirect_stdout(io.StringIO()):
ytvis_api = YTVOS(json_file)
if timer.seconds() > 1:
logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
id_map = None
if dataset_name is not None:
meta = MetadataCatalog.get(dataset_name)
cat_ids = sorted(ytvis_api.getCatIds())
cats = ytvis_api.loadCats(cat_ids)
# The categories in a custom json file may not be sorted.
thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])]
meta.thing_classes = thing_classes
# In COCO, certain category ids are artificially removed,
# and by convention they are always ignored.
# We deal with COCO's id issue and translate
# the category ids to contiguous ids in [0, 80).
# It works by looking at the "categories" field in the json, therefore
# if users' own json also have incontiguous ids, we'll
# apply this mapping as well but print a warning.
if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):
if "coco" not in dataset_name:
logger.warning(
"""
Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.
"""
)
id_map = {v: i for i, v in enumerate(cat_ids)}
meta.thing_dataset_id_to_contiguous_id = id_map
# sort indices for reproducible results
vid_ids = sorted(ytvis_api.vids.keys())
# vids is a list of dicts, each looks something like:
# {'license': 1,
# 'flickr_url': ' ',
# 'file_names': ['ff25f55852/00000.jpg', 'ff25f55852/00005.jpg', ..., 'ff25f55852/00175.jpg'],
# 'height': 720,
# 'width': 1280,
# 'length': 36,
# 'date_captured': '2019-04-11 00:55:41.903902',
# 'id': 2232}
vids = ytvis_api.loadVids(vid_ids)
anns = [ytvis_api.vidToAnns[vid_id] for vid_id in vid_ids]
total_num_valid_anns = sum([len(x) for x in anns])
total_num_anns = len(ytvis_api.anns)
if total_num_valid_anns < total_num_anns:
logger.warning(
f"{json_file} contains {total_num_anns} annotations, but only "
f"{total_num_valid_anns} of them match to images in the file."
)
vids_anns = list(zip(vids, anns))
logger.info("Loaded {} videos in YTVIS format from {}".format(len(vids_anns), json_file))
dataset_dicts = []
ann_keys = ["iscrowd", "category_id", "id"] + (extra_annotation_keys or [])
num_instances_without_valid_segmentation = 0
for (vid_dict, anno_dict_list) in vids_anns:
record = {}
record["file_names"] = [os.path.join(image_root, vid_dict["file_names"][i]) for i in range(vid_dict["length"])]
record["height"] = vid_dict["height"]
record["width"] = vid_dict["width"]
record["length"] = vid_dict["length"]
if "expressions" in vid_dict:
# for ref-youtube-vos and ref-davis
record["expressions"] = vid_dict["expressions"]
# for evaluation
if "exp_id" in vid_dict:
record["exp_id"] = vid_dict["exp_id"]
if "video" in vid_dict:
record["video"] = vid_dict["video"]
video_id = record["video_id"] = vid_dict["id"]
# store the dataset name
if "expressions" in vid_dict:
record["task"] = "rvos"
if dataset_name.startswith("video-refcoco"):
record["dataset_name"] = "video-refcoco"
elif dataset_name.startswith("refytvos"):
record["dataset_name"] = "refytvos"
elif dataset_name.startswith("refdavis"):
record["dataset_name"] = dataset_name # refdavis-val-0,1,2,3
else:
record["task"] = "vos"
if dataset_name.startswith("video-coco"):
record["dataset_name"] = "video-coco"
elif dataset_name.startswith("ytbvos18"):
record["dataset_name"] = "ytbvos18"
elif dataset_name.startswith("ytbvos19"):
record["dataset_name"] = "ytbvos19"
elif dataset_name.startswith("davis17"):
record["dataset_name"] = "davis17"
elif dataset_name.startswith("ovis"):
record["dataset_name"] = "ovis"
elif dataset_name.startswith("vos-lvos"):
record["dataset_name"] = "vos-lvos"
elif dataset_name.startswith("mose"):
record["dataset_name"] = "mose"
video_objs = []
for frame_idx in range(record["length"]):
frame_objs = []
for anno in anno_dict_list:
assert anno["video_id"] == video_id
obj = {key: anno[key] for key in ann_keys if key in anno}
_bboxes = anno.get("bboxes", None)
_segm = anno.get("segmentations", None)
if not (_bboxes and _segm and _bboxes[frame_idx] and _segm[frame_idx]):
continue
if "ori_id" in anno:
# for VOS inference
obj["ori_id"] = anno["ori_id"]
bbox = _bboxes[frame_idx]
segm = _segm[frame_idx]
obj["bbox"] = bbox
obj["bbox_mode"] = BoxMode.XYWH_ABS
if isinstance(segm, dict):
if isinstance(segm["counts"], list):
# convert to compressed RLE
segm = mask_util.frPyObjects(segm, *segm["size"])
elif segm:
# filter out invalid polygons (< 3 points)
segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
if len(segm) == 0:
num_instances_without_valid_segmentation += 1
continue # ignore this instance
obj["segmentation"] = segm
if id_map:
obj["category_id"] = id_map[obj["category_id"]]
frame_objs.append(obj)
video_objs.append(frame_objs)
record["annotations"] = video_objs
dataset_dicts.append(record)
if num_instances_without_valid_segmentation > 0:
logger.warning(
"Filtered out {} instances without valid segmentation. ".format(
num_instances_without_valid_segmentation
)
+ "There might be issues in your dataset generation process. "
"A valid polygon should be a list[float] with even length >= 6."
)
return dataset_dicts
def register_ytvis_instances(name, metadata, json_file, image_root):
"""
Register a dataset in YTVIS's json annotation format for
instance tracking.
Args:
name (str): the name that identifies a dataset, e.g. "ytvis_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
json_file (str): path to the json instance annotation file.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(json_file, (str, os.PathLike)), json_file
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts | DatasetCatalog.register(name, lambda: load_ytvis_json(json_file, image_root, name)) | 3 | 2023-12-22 13:31:33+00:00 | 12k |
xhuangcv/humannorm | threestudio/models/renderers/gan_volume_renderer.py | [
{
"identifier": "BaseBackground",
"path": "threestudio/models/background/base.py",
"snippet": "class BaseBackground(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n def configure(self):\n pass\n\n def forward(self, dirs: Float[Tensor, \... | from dataclasses import dataclass
from threestudio.models.background.base import BaseBackground
from threestudio.models.geometry.base import BaseImplicitGeometry
from threestudio.models.materials.base import BaseMaterial
from threestudio.models.renderers.base import VolumeRenderer
from threestudio.utils.GAN.discriminator import NLayerDiscriminator, weights_init
from threestudio.utils.GAN.distribution import DiagonalGaussianDistribution
from threestudio.utils.GAN.mobilenet import MobileNetV3 as GlobalEncoder
from threestudio.utils.GAN.vae import Decoder as Generator
from threestudio.utils.GAN.vae import Encoder as LocalEncoder
from threestudio.utils.typing import *
import torch
import torch.nn.functional as F
import threestudio | 7,243 |
@threestudio.register("gan-volume-renderer")
class GANVolumeRenderer(VolumeRenderer):
@dataclass
class Config(VolumeRenderer.Config):
base_renderer_type: str = ""
base_renderer: Optional[VolumeRenderer.Config] = None
cfg: Config
def configure(
self,
geometry: BaseImplicitGeometry,
material: BaseMaterial,
background: BaseBackground,
) -> None:
self.base_renderer = threestudio.find(self.cfg.base_renderer_type)(
self.cfg.base_renderer,
geometry=geometry,
material=material,
background=background,
)
self.ch_mult = [1, 2, 4]
self.generator = Generator(
ch=64,
out_ch=3,
ch_mult=self.ch_mult,
num_res_blocks=1,
attn_resolutions=[],
dropout=0.0,
resamp_with_conv=True,
in_channels=7,
resolution=512,
z_channels=4,
)
self.local_encoder = LocalEncoder(
ch=32,
out_ch=3,
ch_mult=self.ch_mult,
num_res_blocks=1,
attn_resolutions=[],
dropout=0.0,
resamp_with_conv=True,
in_channels=3,
resolution=512,
z_channels=4,
)
self.global_encoder = GlobalEncoder(n_class=64)
|
@threestudio.register("gan-volume-renderer")
class GANVolumeRenderer(VolumeRenderer):
@dataclass
class Config(VolumeRenderer.Config):
base_renderer_type: str = ""
base_renderer: Optional[VolumeRenderer.Config] = None
cfg: Config
def configure(
self,
geometry: BaseImplicitGeometry,
material: BaseMaterial,
background: BaseBackground,
) -> None:
self.base_renderer = threestudio.find(self.cfg.base_renderer_type)(
self.cfg.base_renderer,
geometry=geometry,
material=material,
background=background,
)
self.ch_mult = [1, 2, 4]
self.generator = Generator(
ch=64,
out_ch=3,
ch_mult=self.ch_mult,
num_res_blocks=1,
attn_resolutions=[],
dropout=0.0,
resamp_with_conv=True,
in_channels=7,
resolution=512,
z_channels=4,
)
self.local_encoder = LocalEncoder(
ch=32,
out_ch=3,
ch_mult=self.ch_mult,
num_res_blocks=1,
attn_resolutions=[],
dropout=0.0,
resamp_with_conv=True,
in_channels=3,
resolution=512,
z_channels=4,
)
self.global_encoder = GlobalEncoder(n_class=64) | self.discriminator = NLayerDiscriminator( | 4 | 2023-12-23 12:37:48+00:00 | 12k |
jesenzhang/ComfyUI_StreamDiffusion | streamdiffusion/wrapper.py | [
{
"identifier": "StreamDiffusion",
"path": "streamdiffusion/pipeline.py",
"snippet": "class StreamDiffusion:\n def __init__(\n self,\n pipe: StableDiffusionPipeline,\n t_index_list: List[int],\n torch_dtype: torch.dtype = torch.float16,\n width: int = 512,\n ... | import gc
import os
import traceback
import numpy as np
import torch
from pathlib import Path
from typing import List, Literal, Optional, Union, Dict
from diffusers import AutoencoderTiny, StableDiffusionPipeline
from PIL import Image
from .pipeline import StreamDiffusion
from .image_utils import postprocess_image
from transformers import CLIPFeatureExtractor
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
from polygraphy import cuda
from streamdiffusion.acceleration.tensorrt import (
TorchVAEEncoder,
compile_unet,
compile_vae_decoder,
compile_vae_encoder,
)
from streamdiffusion.acceleration.tensorrt.engine import (
AutoencoderKLEngine,
UNet2DConditionModelEngine,
)
from streamdiffusion.acceleration.tensorrt.models import (
VAE,
UNet,
VAEEncoder,
)
from streamdiffusion.acceleration.sfast import (
accelerate_with_stable_fast,
)
from transformers import CLIPFeatureExtractor
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
) | 7,470 | width: int = 512,
height: int = 512,
warmup: int = 10,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
do_add_noise: bool = True,
device_ids: Optional[List[int]] = None,
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
enable_similar_image_filter: bool = False,
similar_image_filter_threshold: float = 0.98,
similar_image_filter_max_skip_frame: int = 10,
use_denoising_batch: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
seed: int = 2,
use_safety_checker: bool = False,
):
"""
Initializes the StreamDiffusionWrapper.
Parameters
----------
model_id_or_path : str
The model id or path to load.
t_index_list : List[int]
The t_index_list to use for inference.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
mode : Literal["img2img", "txt2img"], optional
txt2img or img2img, by default "img2img".
output_type : Literal["pil", "pt", "np", "latent"], optional
The output type of image, by default "pil".
lcm_lora_id : Optional[str], optional
The lcm_lora_id to load, by default None.
If None, the default LCM-LoRA
("latent-consistency/lcm-lora-sdv1-5") will be used.
vae_id : Optional[str], optional
The vae_id to load, by default None.
If None, the default TinyVAE
("madebyollin/taesd") will be used.
device : Literal["cpu", "cuda"], optional
The device to use for inference, by default "cuda".
dtype : torch.dtype, optional
The dtype for inference, by default torch.float16.
frame_buffer_size : int, optional
The frame buffer size for denoising batch, by default 1.
width : int, optional
The width of the image, by default 512.
height : int, optional
The height of the image, by default 512.
warmup : int, optional
The number of warmup steps to perform, by default 10.
acceleration : Literal["none", "xformers", "tensorrt"], optional
The acceleration method, by default "tensorrt".
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
device_ids : Optional[List[int]], optional
The device ids to use for DataParallel, by default None.
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
enable_similar_image_filter : bool, optional
Whether to enable similar image filter or not,
by default False.
similar_image_filter_threshold : float, optional
The threshold for similar image filter, by default 0.98.
similar_image_filter_max_skip_frame : int, optional
The max skip frame for similar image filter, by default 10.
use_denoising_batch : bool, optional
Whether to use denoising batch or not, by default True.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
seed : int, optional
The seed, by default 2.
use_safety_checker : bool, optional
Whether to use safety checker or not, by default False.
"""
self.sd_turbo = "turbo" in model_id_or_path
if mode == "txt2img":
if cfg_type != "none":
raise ValueError(
f"txt2img mode accepts only cfg_type = 'none', but got {cfg_type}"
)
if use_denoising_batch and frame_buffer_size > 1:
if not self.sd_turbo:
raise ValueError(
"txt2img mode cannot use denoising batch with frame_buffer_size > 1."
)
if mode == "img2img":
if not use_denoising_batch:
raise NotImplementedError(
"img2img mode must use denoising batch for now."
)
self.device = device
self.dtype = dtype
self.width = width
self.height = height
self.mode = mode
self.output_type = output_type
self.frame_buffer_size = frame_buffer_size
self.batch_size = (
len(t_index_list) * frame_buffer_size
if use_denoising_batch
else frame_buffer_size
)
self.t_index_list =t_index_list
self.cfg_type =cfg_type
self.use_denoising_batch = use_denoising_batch
self.use_safety_checker = use_safety_checker
self.do_add_noise =do_add_noise
self.seed=seed
|
torch.set_grad_enabled(False)
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
class StreamDiffusionWrapper:
def __init__(
self,
model_id_or_path: str,
t_index_list: List[int],
lora_dict: Optional[Dict[str, float]] = None,
mode: Literal["img2img", "txt2img"] = "img2img",
output_type: Literal["pil", "pt", "np", "latent"] = "pil",
lcm_lora_id: Optional[str] = None,
vae_id: Optional[str] = None,
device: Literal["cpu", "cuda"] = "cuda",
dtype: torch.dtype = torch.float16,
frame_buffer_size: int = 1,
width: int = 512,
height: int = 512,
warmup: int = 10,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
do_add_noise: bool = True,
device_ids: Optional[List[int]] = None,
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
enable_similar_image_filter: bool = False,
similar_image_filter_threshold: float = 0.98,
similar_image_filter_max_skip_frame: int = 10,
use_denoising_batch: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
seed: int = 2,
use_safety_checker: bool = False,
):
"""
Initializes the StreamDiffusionWrapper.
Parameters
----------
model_id_or_path : str
The model id or path to load.
t_index_list : List[int]
The t_index_list to use for inference.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
mode : Literal["img2img", "txt2img"], optional
txt2img or img2img, by default "img2img".
output_type : Literal["pil", "pt", "np", "latent"], optional
The output type of image, by default "pil".
lcm_lora_id : Optional[str], optional
The lcm_lora_id to load, by default None.
If None, the default LCM-LoRA
("latent-consistency/lcm-lora-sdv1-5") will be used.
vae_id : Optional[str], optional
The vae_id to load, by default None.
If None, the default TinyVAE
("madebyollin/taesd") will be used.
device : Literal["cpu", "cuda"], optional
The device to use for inference, by default "cuda".
dtype : torch.dtype, optional
The dtype for inference, by default torch.float16.
frame_buffer_size : int, optional
The frame buffer size for denoising batch, by default 1.
width : int, optional
The width of the image, by default 512.
height : int, optional
The height of the image, by default 512.
warmup : int, optional
The number of warmup steps to perform, by default 10.
acceleration : Literal["none", "xformers", "tensorrt"], optional
The acceleration method, by default "tensorrt".
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
device_ids : Optional[List[int]], optional
The device ids to use for DataParallel, by default None.
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
enable_similar_image_filter : bool, optional
Whether to enable similar image filter or not,
by default False.
similar_image_filter_threshold : float, optional
The threshold for similar image filter, by default 0.98.
similar_image_filter_max_skip_frame : int, optional
The max skip frame for similar image filter, by default 10.
use_denoising_batch : bool, optional
Whether to use denoising batch or not, by default True.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
seed : int, optional
The seed, by default 2.
use_safety_checker : bool, optional
Whether to use safety checker or not, by default False.
"""
self.sd_turbo = "turbo" in model_id_or_path
if mode == "txt2img":
if cfg_type != "none":
raise ValueError(
f"txt2img mode accepts only cfg_type = 'none', but got {cfg_type}"
)
if use_denoising_batch and frame_buffer_size > 1:
if not self.sd_turbo:
raise ValueError(
"txt2img mode cannot use denoising batch with frame_buffer_size > 1."
)
if mode == "img2img":
if not use_denoising_batch:
raise NotImplementedError(
"img2img mode must use denoising batch for now."
)
self.device = device
self.dtype = dtype
self.width = width
self.height = height
self.mode = mode
self.output_type = output_type
self.frame_buffer_size = frame_buffer_size
self.batch_size = (
len(t_index_list) * frame_buffer_size
if use_denoising_batch
else frame_buffer_size
)
self.t_index_list =t_index_list
self.cfg_type =cfg_type
self.use_denoising_batch = use_denoising_batch
self.use_safety_checker = use_safety_checker
self.do_add_noise =do_add_noise
self.seed=seed
| self.stream: StreamDiffusion = self._load_model( | 0 | 2023-12-29 09:00:03+00:00 | 12k |
neobundy/MLX-Stable-Diffusion-WebUI | stable_diffusion/model_io.py | [
{
"identifier": "CLIPTextModel",
"path": "stable_diffusion/clip.py",
"snippet": "class CLIPTextModel(nn.Module):\n \"\"\"Implements the text encoder transformer from CLIP.\"\"\"\n\n def __init__(self, config: CLIPTextModelConfig):\n super().__init__()\n\n self.token_embedding = nn.Em... | from typing import Optional
from functools import partial
from huggingface_hub import hf_hub_download
from mlx.utils import tree_unflatten
from safetensors import safe_open as safetensor_open
from .clip import CLIPTextModel
from .config import AutoencoderConfig, CLIPTextModelConfig, DiffusionConfig, UNetConfig
from .tokenizer import Tokenizer
from .unet import UNetModel
from .vae import Autoencoder
from .models import _DEFAULT_MODEL, _MODELS
from .config import DiffuserModelPathConfig
from tqdm import tqdm
import json
import mlx.core as mx
import numpy as np | 7,581 | def _flatten(params):
return [(k, v) for p in params for (k, v) in p]
# The weights of the model can be loaded as 16-bit floating point numbers, which is a form of quantization known as half-precision floating point.
# This can reduce the memory requirements of the model by half compared to 32-bit floating point numbers, at the cost of reduced numerical precision.
def _load_safetensor_weights(mapper, model, weight_file, float16: bool = False):
dtype = np.float16 if float16 else np.float32
_debug_print(f"Loading weights from {weight_file}")
with safetensor_open(weight_file, framework="numpy") as f:
keys = list(f.keys())
weights = _flatten([mapper(k, f.get_tensor(k).astype(dtype)) for k in tqdm(keys, desc=f"Loading weights from {weight_file}...")])
model.update(tree_unflatten(weights))
def _check_key(key: str, part: str):
if key not in _MODELS:
raise ValueError(
f"[{part}] '{key}' model not found, choose one of {{{','.join(_MODELS.keys())}}}"
)
def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion UNet from Hugging Face Hub."""
_check_key(key, "load_unet")
# Download the config and create the model
unet_config = _MODELS[key]["unet_config"]
with open(hf_hub_download(key, unet_config)) as f:
config = json.load(f)
n_blocks = len(config["block_out_channels"])
model = UNetModel(
UNetConfig(
in_channels=config["in_channels"],
out_channels=config["out_channels"],
block_out_channels=config["block_out_channels"],
layers_per_block=[config["layers_per_block"]] * n_blocks,
num_attention_heads=[config["attention_head_dim"]] * n_blocks
if isinstance(config["attention_head_dim"], int)
else config["attention_head_dim"],
cross_attention_dim=[config["cross_attention_dim"]] * n_blocks,
norm_num_groups=config["norm_num_groups"],
)
)
# Download the weights and map them into the model
unet_weights = _MODELS[key]["unet"]
weight_file = hf_hub_download(key, unet_weights)
_load_safetensor_weights(map_unet_weights, model, weight_file, float16)
return model
def load_text_encoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion text encoder from Hugging Face Hub."""
_check_key(key, "load_text_encoder")
# Download the config and create the model
text_encoder_config = _MODELS[key]["text_encoder_config"]
with open(hf_hub_download(key, text_encoder_config)) as f:
config = json.load(f)
model = CLIPTextModel(
CLIPTextModelConfig(
num_layers=config["num_hidden_layers"],
model_dims=config["hidden_size"],
num_heads=config["num_attention_heads"],
max_length=config["max_position_embeddings"],
vocab_size=config["vocab_size"],
)
)
# Download the weights and map them into the model
text_encoder_weights = _MODELS[key]["text_encoder"]
weight_file = hf_hub_download(key, text_encoder_weights)
_load_safetensor_weights(map_clip_text_encoder_weights, model, weight_file, float16)
return model
def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion autoencoder from Hugging Face Hub."""
_check_key(key, "load_autoencoder")
# Download the config and create the model
vae_config = _MODELS[key]["vae_config"]
with open(hf_hub_download(key, vae_config)) as f:
config = json.load(f)
model = Autoencoder(
AutoencoderConfig(
in_channels=config["in_channels"],
out_channels=config["out_channels"],
latent_channels_out=2 * config["latent_channels"],
latent_channels_in=config["latent_channels"],
block_out_channels=config["block_out_channels"],
layers_per_block=config["layers_per_block"],
norm_num_groups=config["norm_num_groups"],
)
)
# Download the weights and map them into the model
vae_weights = _MODELS[key]["vae"]
weight_file = hf_hub_download(key, vae_weights)
_load_safetensor_weights(map_vae_weights, model, weight_file, float16)
return model
def load_diffusion_config(key: str = _DEFAULT_MODEL):
"""Load the stable diffusion config from Hugging Face Hub."""
_check_key(key, "load_diffusion_config")
diffusion_config = _MODELS[key]["diffusion_config"]
with open(hf_hub_download(key, diffusion_config)) as f:
config = json.load(f)
| # Copyright © 2023 Apple Inc.
logfile = 'log.txt'
_DEBUG = False
def _debug_print(*args, **kwargs):
if _DEBUG:
# Convert the arguments to a string
message = ' '.join(map(str, args))
# Print the message to the console
print(message, **kwargs)
# Open the log file in append mode and write the message
with open(logfile, 'a') as f:
f.write(message + '\n')
def _from_numpy(x):
return mx.array(np.ascontiguousarray(x))
# The `map_*_weights` functions are used to adjust the weights of a model when loading it from a file.
# The weights of the model in the file might be in a different format than the weights of the model in the current codebase.
# When you load a pre-trained model, the weights are stored in a dictionary where the keys are the names of the parameters in the model.
# If the architecture of your model is different from the architecture of the model that the weights were trained on, you might need to adjust the keys and/or the weights to match your model's architecture.
# This is what the `map_*_weights` functions are doing. They are adjusting the keys and the weights to match the architecture of the models in the current codebase.
def map_unet_weights(key, value):
# Map up/downsampling
if "downsamplers" in key:
key = key.replace("downsamplers.0.conv", "downsample")
_debug_print(f"Replaced 'downsamplers.0.conv' with 'downsample' in {key}")
if "upsamplers" in key:
key = key.replace("upsamplers.0.conv", "upsample")
_debug_print(f"Replaced 'upsamplers.0.conv' with 'upsample' in {key}")
# Map the mid block
if "mid_block.resnets.0" in key:
key = key.replace("mid_block.resnets.0", "mid_blocks.0")
_debug_print(f"Replaced 'mid_block.resnets.0' with 'mid_blocks.0' in {key}")
if "mid_block.attentions.0" in key:
key = key.replace("mid_block.attentions.0", "mid_blocks.1")
_debug_print(f"Replaced 'mid_block.attentions.0' with 'mid_blocks.1' in {key}")
if "mid_block.resnets.1" in key:
key = key.replace("mid_block.resnets.1", "mid_blocks.2")
_debug_print(f"Replaced 'mid_block.resnets.1' with 'mid_blocks.2' in {key}")
# Map attention layers
if "to_k" in key:
key = key.replace("to_k", "key_proj")
_debug_print(f"Replaced 'to_k' with 'key_proj' in {key}")
if "to_out.0" in key:
key = key.replace("to_out.0", "out_proj")
_debug_print(f"Replaced 'to_out.0' with 'out_proj' in {key}")
if "to_q" in key:
key = key.replace("to_q", "query_proj")
_debug_print(f"Replaced 'to_q' with 'query_proj' in {key}")
if "to_v" in key:
key = key.replace("to_v", "value_proj")
_debug_print(f"Replaced 'to_v' with 'value_proj' in {key}")
# Map transformer ffn
if "ff.net.2" in key:
key = key.replace("ff.net.2", "linear3")
_debug_print(f"Replaced 'ff.net.2' with 'linear3' in {key}")
if "ff.net.0" in key:
k1 = key.replace("ff.net.0.proj", "linear1")
k2 = key.replace("ff.net.0.proj", "linear2")
v1, v2 = np.split(value, 2)
_debug_print(f"Replaced 'ff.net.0.proj' with 'linear1' and 'linear2' in {key}")
return [(k1, _from_numpy(v1)), (k2, _from_numpy(v2))]
# The weights of this 1x1 convolutional layer would be a 4-dimensional tensor
# with shape [out_channels, in_channels, 1, 1].
# The squeeze() function is used to remove the dimensions of size 1 from this tensor,
# converting it to a 2-dimensional tensor with shape [out_channels, in_channels].
# This is because the corresponding layer in the current model might be a linear layer
# rather than a convolutional layer, and the weights for a linear layer are expected to be a 2-dimensional tensor.
if "conv_shortcut.weight" in key:
value = value.squeeze()
_debug_print(f"Squeezed 'conv_shortcut.weight' in {key}")
# Transform the weights from 1x1 convs to linear
if len(value.shape) == 4 and ("proj_in" in key or "proj_out" in key):
value = value.squeeze()
_debug_print(f"Squeezed 'proj_in' or 'proj_out' in {key}")
if len(value.shape) == 4:
value = value.transpose(0, 2, 3, 1)
_debug_print(f"Transposed dimensions in {key}")
return [(key, _from_numpy(value))]
def map_clip_text_encoder_weights(key, value):
# Remove prefixes
if key.startswith("text_model."):
key = key[11:]
_debug_print(f"Removed 'text_model.' prefix from {key}")
if key.startswith("embeddings."):
key = key[11:]
_debug_print(f"Removed 'embeddings.' prefix from {key}")
if key.startswith("encoder."):
key = key[8:]
_debug_print(f"Removed 'encoder.' prefix from {key}")
# Map attention layers
if "self_attn." in key:
key = key.replace("self_attn.", "attention.")
_debug_print(f"Replaced 'self_attn.' with 'attention.' in {key}")
if "q_proj." in key:
key = key.replace("q_proj.", "query_proj.")
_debug_print(f"Replaced 'q_proj.' with 'query_proj.' in {key}")
if "k_proj." in key:
key = key.replace("k_proj.", "key_proj.")
_debug_print(f"Replaced 'k_proj.' with 'key_proj.' in {key}")
if "v_proj." in key:
key = key.replace("v_proj.", "value_proj.")
_debug_print(f"Replaced 'v_proj.' with 'value_proj.' in {key}")
# Map ffn layers
if "mlp.fc1" in key:
key = key.replace("mlp.fc1", "linear1")
_debug_print(f"Replaced 'mlp.fc1' with 'linear1' in {key}")
if "mlp.fc2" in key:
key = key.replace("mlp.fc2", "linear2")
_debug_print(f"Replaced 'mlp.fc2' with 'linear2' in {key}")
return [(key, _from_numpy(value))]
def map_vae_weights(key, value):
# Map up/downsampling
if "downsamplers" in key:
key = key.replace("downsamplers.0.conv", "downsample")
_debug_print(f"Replaced 'downsamplers.0.conv' with 'downsample' in {key}")
if "upsamplers" in key:
key = key.replace("upsamplers.0.conv", "upsample")
_debug_print(f"Replaced 'upsamplers.0.conv' with 'upsample' in {key}")
# Map attention layers
if "to_k" in key:
key = key.replace("to_k", "key_proj")
_debug_print(f"Replaced 'to_k' with 'key_proj' in {key}")
if "to_out.0" in key:
key = key.replace("to_out.0", "out_proj")
_debug_print(f"Replaced 'to_out.0' with 'out_proj' in {key}")
if "to_q" in key:
key = key.replace("to_q", "query_proj")
_debug_print(f"Replaced 'to_q' with 'query_proj' in {key}")
if "to_v" in key:
key = key.replace("to_v", "value_proj")
_debug_print(f"Replaced 'to_v' with 'value_proj' in {key}")
# Map the mid block
if "mid_block.resnets.0" in key:
key = key.replace("mid_block.resnets.0", "mid_blocks.0")
_debug_print(f"Replaced 'mid_block.resnets.0' with 'mid_blocks.0' in {key}")
if "mid_block.attentions.0" in key:
key = key.replace("mid_block.attentions.0", "mid_blocks.1")
_debug_print(f"Replaced 'mid_block.attentions.0' with 'mid_blocks.1' in {key}")
if "mid_block.resnets.1" in key:
key = key.replace("mid_block.resnets.1", "mid_blocks.2")
_debug_print(f"Replaced 'mid_block.resnets.1' with 'mid_blocks.2' in {key}")
# Map the quant/post_quant layers
if "quant_conv" in key:
key = key.replace("quant_conv", "quant_proj")
value = value.squeeze()
_debug_print(f"Replaced 'quant_conv' with 'quant_proj' and squeezed value in {key}")
# Map the conv_shortcut to linear
if "conv_shortcut.weight" in key:
value = value.squeeze()
_debug_print(f"Squeezed 'conv_shortcut.weight' in {key}")
# Rearrange the dimensions to [B, H, W, C] - Autoencoder expects: B, H, W, C = x.shape
if len(value.shape) == 4:
value = value.transpose(0, 2, 3, 1)
_debug_print(f"Transposed dimensions in {key}")
return [(key, _from_numpy(value))]
def _flatten(params):
return [(k, v) for p in params for (k, v) in p]
# The weights of the model can be loaded as 16-bit floating point numbers, which is a form of quantization known as half-precision floating point.
# This can reduce the memory requirements of the model by half compared to 32-bit floating point numbers, at the cost of reduced numerical precision.
def _load_safetensor_weights(mapper, model, weight_file, float16: bool = False):
dtype = np.float16 if float16 else np.float32
_debug_print(f"Loading weights from {weight_file}")
with safetensor_open(weight_file, framework="numpy") as f:
keys = list(f.keys())
weights = _flatten([mapper(k, f.get_tensor(k).astype(dtype)) for k in tqdm(keys, desc=f"Loading weights from {weight_file}...")])
model.update(tree_unflatten(weights))
def _check_key(key: str, part: str):
if key not in _MODELS:
raise ValueError(
f"[{part}] '{key}' model not found, choose one of {{{','.join(_MODELS.keys())}}}"
)
def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion UNet from Hugging Face Hub."""
_check_key(key, "load_unet")
# Download the config and create the model
unet_config = _MODELS[key]["unet_config"]
with open(hf_hub_download(key, unet_config)) as f:
config = json.load(f)
n_blocks = len(config["block_out_channels"])
model = UNetModel(
UNetConfig(
in_channels=config["in_channels"],
out_channels=config["out_channels"],
block_out_channels=config["block_out_channels"],
layers_per_block=[config["layers_per_block"]] * n_blocks,
num_attention_heads=[config["attention_head_dim"]] * n_blocks
if isinstance(config["attention_head_dim"], int)
else config["attention_head_dim"],
cross_attention_dim=[config["cross_attention_dim"]] * n_blocks,
norm_num_groups=config["norm_num_groups"],
)
)
# Download the weights and map them into the model
unet_weights = _MODELS[key]["unet"]
weight_file = hf_hub_download(key, unet_weights)
_load_safetensor_weights(map_unet_weights, model, weight_file, float16)
return model
def load_text_encoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion text encoder from Hugging Face Hub."""
_check_key(key, "load_text_encoder")
# Download the config and create the model
text_encoder_config = _MODELS[key]["text_encoder_config"]
with open(hf_hub_download(key, text_encoder_config)) as f:
config = json.load(f)
model = CLIPTextModel(
CLIPTextModelConfig(
num_layers=config["num_hidden_layers"],
model_dims=config["hidden_size"],
num_heads=config["num_attention_heads"],
max_length=config["max_position_embeddings"],
vocab_size=config["vocab_size"],
)
)
# Download the weights and map them into the model
text_encoder_weights = _MODELS[key]["text_encoder"]
weight_file = hf_hub_download(key, text_encoder_weights)
_load_safetensor_weights(map_clip_text_encoder_weights, model, weight_file, float16)
return model
def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion autoencoder from Hugging Face Hub."""
_check_key(key, "load_autoencoder")
# Download the config and create the model
vae_config = _MODELS[key]["vae_config"]
with open(hf_hub_download(key, vae_config)) as f:
config = json.load(f)
model = Autoencoder(
AutoencoderConfig(
in_channels=config["in_channels"],
out_channels=config["out_channels"],
latent_channels_out=2 * config["latent_channels"],
latent_channels_in=config["latent_channels"],
block_out_channels=config["block_out_channels"],
layers_per_block=config["layers_per_block"],
norm_num_groups=config["norm_num_groups"],
)
)
# Download the weights and map them into the model
vae_weights = _MODELS[key]["vae"]
weight_file = hf_hub_download(key, vae_weights)
_load_safetensor_weights(map_vae_weights, model, weight_file, float16)
return model
def load_diffusion_config(key: str = _DEFAULT_MODEL):
"""Load the stable diffusion config from Hugging Face Hub."""
_check_key(key, "load_diffusion_config")
diffusion_config = _MODELS[key]["diffusion_config"]
with open(hf_hub_download(key, diffusion_config)) as f:
config = json.load(f)
| return DiffusionConfig( | 3 | 2023-12-25 05:49:34+00:00 | 12k |
Con6924/SPM | train_spm_xl.py | [
{
"identifier": "SPMNetwork",
"path": "src/models/spm.py",
"snippet": "class SPMNetwork(nn.Module):\n UNET_TARGET_REPLACE_MODULE_TRANSFORMER = [\n \"Transformer2DModel\",\n ]\n UNET_TARGET_REPLACE_MODULE_CONV = [\n \"ResnetBlock2D\",\n \"Downsample2D\",\n \"Upsample2... | import argparse
import gc
import torch
import src.engine.train_util as train_util
import wandb
from pathlib import Path
from tqdm import tqdm
from src.models.spm import (
SPMNetwork,
SPMLayer,
)
from src.engine.sampling import sample_xl
from src.models import model_util
from src.evaluation import eval_util
from src.configs import config as config_pkg
from src.configs import prompt as prompt_pkg
from src.configs.config import RootConfig
from src.configs.prompt import PromptEmbedsCache, PromptEmbedsPair, PromptSettings, PromptEmbedsXL | 7,349 | text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.text_embeds,
prompt_pair.target.text_embeds,
prompt_pair.batch_size,
),
add_text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.pooled_embeds,
prompt_pair.target.pooled_embeds,
prompt_pair.batch_size,
),
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
# ------------------------- latent anchoring part -----------------------------
if prompt_pair.action == "erase_with_la":
# noise sampling
anchors_text, anchors_pool = sample_xl(prompt_pair, tokenizers=tokenizers, text_encoders=text_encoders)
# get latents
repeat = prompt_pair.sampling_batch_size // prompt_pair.batch_size
# TODO: target or positive?
with network:
anchor_latents = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents.repeat(repeat, 1, 1, 1),
text_embeddings=anchors_text,
add_text_embeddings=anchors_pool,
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.sampling_batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
with torch.no_grad():
anchor_latents_ori = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents.repeat(repeat, 1, 1, 1),
text_embeddings=anchors_text,
add_text_embeddings=anchors_pool,
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.sampling_batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
anchor_latents_ori.requires_grad_ = False
else:
anchor_latents = None
anchor_latents_ori = None
# ----------------------------------------------------------------
positive_latents.requires_grad = False
neutral_latents.requires_grad = False
loss = prompt_pair.loss(
target_latents=target_latents,
positive_latents=positive_latents,
neutral_latents=neutral_latents,
anchor_latents=anchor_latents,
anchor_latents_ori=anchor_latents_ori,
)
loss["loss"].backward()
if config.train.max_grad_norm > 0:
torch.nn.utils.clip_grad_norm_(
trainable_params, config.train.max_grad_norm, norm_type=2
)
optimizer.step()
lr_scheduler.step()
pbar.set_description(f"Loss*1k: {loss['loss'].item()*1000:.4f}")
# logging
if config.logging.use_wandb:
log_dict = {"iteration": i}
loss = {k: v.detach().cpu().item() for k, v in loss.items()}
log_dict.update(loss)
lrs = lr_scheduler.get_last_lr()
if len(lrs) == 1:
log_dict["lr"] = float(lrs[0])
else:
log_dict["lr/textencoder"] = float(lrs[0])
log_dict["lr/unet"] = float(lrs[-1])
if config.train.optimizer_type.lower().startswith("dadapt"):
log_dict["lr/d*lr"] = (
optimizer.param_groups[0]["d"] * optimizer.param_groups[0]["lr"]
)
# generate sample images
if config.logging.interval > 0 and (
i % config.logging.interval == 0 or i == config.train.iterations - 1
):
print("Generating samples...")
with network:
samples = train_util.text2img(
pipe,
prompts=config.logging.prompts,
negative_prompt=config.logging.negative_prompt,
width=config.logging.width,
height=config.logging.height,
num_inference_steps=config.logging.num_inference_steps,
guidance_scale=config.logging.guidance_scale,
generate_num=config.logging.generate_num,
seed=config.logging.seed,
)
for text, img in samples:
log_dict[text] = wandb.Image(img)
# evaluate on the generated images
print("Evaluating CLIPScore and CLIPAccuracy...")
with network:
| # ref:
# - https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L566
# - https://huggingface.co/spaces/baulab/Erasing-Concepts-In-Diffusion/blob/main/train.py
# - https://github.com/p1atdev/LECO/blob/main/train_lora_xl.py
DEVICE_CUDA = torch.device("cuda:0")
NUM_IMAGES_PER_PROMPT = 1
def flush():
torch.cuda.empty_cache()
gc.collect()
def train(
config: RootConfig,
prompts: list[PromptSettings],
):
metadata = {
"prompts": ",".join([prompt.json() for prompt in prompts]),
"config": config.json(),
}
model_metadata = {
"prompts": ",".join([prompt.target for prompt in prompts]),
"rank": str(config.network.rank),
"alpha": str(config.network.alpha),
}
save_path = Path(config.save.path)
if config.logging.verbose:
print(metadata)
weight_dtype = config_pkg.parse_precision(config.train.precision)
save_weight_dtype = config_pkg.parse_precision(config.train.precision)
if config.logging.use_wandb:
wandb.init(project=f"SPM",
config=metadata,
name=config.logging.run_name,
settings=wandb.Settings(symlink=False))
(
tokenizers,
text_encoders,
unet,
noise_scheduler,
pipe
) = model_util.load_models_xl(
config.pretrained_model.name_or_path,
scheduler_name=config.train.noise_scheduler,
)
for text_encoder in text_encoders:
text_encoder.to(DEVICE_CUDA, dtype=weight_dtype)
text_encoder.requires_grad_(False)
text_encoder.eval()
unet.to(DEVICE_CUDA, dtype=weight_dtype)
unet.enable_xformers_memory_efficient_attention()
unet.requires_grad_(False)
unet.eval()
network = SPMNetwork(
unet,
rank=config.network.rank,
multiplier=1.0,
alpha=config.network.alpha,
module=SPMLayer,
).to(DEVICE_CUDA, dtype=weight_dtype)
trainable_params = network.prepare_optimizer_params(
config.train.text_encoder_lr, config.train.unet_lr, config.train.lr
)
optimizer_name, optimizer_args, optimizer = train_util.get_optimizer(
config, trainable_params
)
lr_scheduler = train_util.get_scheduler_fix(config, optimizer)
criteria = torch.nn.MSELoss()
print("Prompts")
for settings in prompts:
print(settings)
cache = PromptEmbedsCache()
prompt_pairs: list[PromptEmbedsPair] = []
with torch.no_grad():
for settings in prompts:
for prompt in [
settings.target,
settings.positive,
settings.neutral,
settings.unconditional,
]:
if cache[prompt] == None:
cache[prompt] = PromptEmbedsXL(
train_util.encode_prompts_xl(
tokenizers,
text_encoders,
[prompt],
num_images_per_prompt=NUM_IMAGES_PER_PROMPT,
)
)
prompt_pair = PromptEmbedsPair(
criteria,
cache[settings.target],
cache[settings.positive],
cache[settings.unconditional],
cache[settings.neutral],
settings,
)
assert prompt_pair.sampling_batch_size % prompt_pair.batch_size == 0
prompt_pairs.append(prompt_pair)
flush()
pbar = tqdm(range(config.train.iterations))
loss = None
for i in pbar:
with torch.no_grad():
noise_scheduler.set_timesteps(
config.train.max_denoising_steps, device=DEVICE_CUDA
)
optimizer.zero_grad()
prompt_pair: PromptEmbedsPair = prompt_pairs[
torch.randint(0, len(prompt_pairs), (1,)).item()
]
timesteps_to = torch.randint(
1, config.train.max_denoising_steps, (1,)
).item()
height, width = (
prompt_pair.resolution,
prompt_pair.resolution,
)
if prompt_pair.dynamic_resolution:
height, width = train_util.get_random_resolution_in_bucket(
prompt_pair.resolution
)
if config.logging.verbose:
print("guidance_scale:", prompt_pair.guidance_scale)
print("resolution:", prompt_pair.resolution)
print("dynamic_resolution:", prompt_pair.dynamic_resolution)
if prompt_pair.dynamic_resolution:
print("bucketed resolution:", (height, width))
print("batch_size:", prompt_pair.batch_size)
print("dynamic_crops:", prompt_pair.dynamic_crops)
latents = train_util.get_initial_latents(
noise_scheduler, prompt_pair.batch_size, height, width, 1
).to(DEVICE_CUDA, dtype=weight_dtype)
add_time_ids = train_util.get_add_time_ids(
height,
width,
dynamic_crops=prompt_pair.dynamic_crops,
dtype=weight_dtype,
).to(DEVICE_CUDA, dtype=weight_dtype)
with network:
denoised_latents = train_util.diffusion_xl(
unet,
noise_scheduler,
latents,
text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.text_embeds,
prompt_pair.target.text_embeds,
prompt_pair.batch_size,
),
add_text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.pooled_embeds,
prompt_pair.target.pooled_embeds,
prompt_pair.batch_size,
),
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.batch_size
),
start_timesteps=0,
total_timesteps=timesteps_to,
guidance_scale=3,
)
noise_scheduler.set_timesteps(1000)
current_timestep = noise_scheduler.timesteps[
int(timesteps_to * 1000 / config.train.max_denoising_steps)
]
positive_latents = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents,
text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.text_embeds,
prompt_pair.positive.text_embeds,
prompt_pair.batch_size,
),
add_text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.pooled_embeds,
prompt_pair.positive.pooled_embeds,
prompt_pair.batch_size,
),
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
neutral_latents = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents,
text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.text_embeds,
prompt_pair.neutral.text_embeds,
prompt_pair.batch_size,
),
add_text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.pooled_embeds,
prompt_pair.neutral.pooled_embeds,
prompt_pair.batch_size,
),
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
with network:
target_latents = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents,
text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.text_embeds,
prompt_pair.target.text_embeds,
prompt_pair.batch_size,
),
add_text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.pooled_embeds,
prompt_pair.target.pooled_embeds,
prompt_pair.batch_size,
),
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
# ------------------------- latent anchoring part -----------------------------
if prompt_pair.action == "erase_with_la":
# noise sampling
anchors_text, anchors_pool = sample_xl(prompt_pair, tokenizers=tokenizers, text_encoders=text_encoders)
# get latents
repeat = prompt_pair.sampling_batch_size // prompt_pair.batch_size
# TODO: target or positive?
with network:
anchor_latents = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents.repeat(repeat, 1, 1, 1),
text_embeddings=anchors_text,
add_text_embeddings=anchors_pool,
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.sampling_batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
with torch.no_grad():
anchor_latents_ori = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents.repeat(repeat, 1, 1, 1),
text_embeddings=anchors_text,
add_text_embeddings=anchors_pool,
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.sampling_batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
anchor_latents_ori.requires_grad_ = False
else:
anchor_latents = None
anchor_latents_ori = None
# ----------------------------------------------------------------
positive_latents.requires_grad = False
neutral_latents.requires_grad = False
loss = prompt_pair.loss(
target_latents=target_latents,
positive_latents=positive_latents,
neutral_latents=neutral_latents,
anchor_latents=anchor_latents,
anchor_latents_ori=anchor_latents_ori,
)
loss["loss"].backward()
if config.train.max_grad_norm > 0:
torch.nn.utils.clip_grad_norm_(
trainable_params, config.train.max_grad_norm, norm_type=2
)
optimizer.step()
lr_scheduler.step()
pbar.set_description(f"Loss*1k: {loss['loss'].item()*1000:.4f}")
# logging
if config.logging.use_wandb:
log_dict = {"iteration": i}
loss = {k: v.detach().cpu().item() for k, v in loss.items()}
log_dict.update(loss)
lrs = lr_scheduler.get_last_lr()
if len(lrs) == 1:
log_dict["lr"] = float(lrs[0])
else:
log_dict["lr/textencoder"] = float(lrs[0])
log_dict["lr/unet"] = float(lrs[-1])
if config.train.optimizer_type.lower().startswith("dadapt"):
log_dict["lr/d*lr"] = (
optimizer.param_groups[0]["d"] * optimizer.param_groups[0]["lr"]
)
# generate sample images
if config.logging.interval > 0 and (
i % config.logging.interval == 0 or i == config.train.iterations - 1
):
print("Generating samples...")
with network:
samples = train_util.text2img(
pipe,
prompts=config.logging.prompts,
negative_prompt=config.logging.negative_prompt,
width=config.logging.width,
height=config.logging.height,
num_inference_steps=config.logging.num_inference_steps,
guidance_scale=config.logging.guidance_scale,
generate_num=config.logging.generate_num,
seed=config.logging.seed,
)
for text, img in samples:
log_dict[text] = wandb.Image(img)
# evaluate on the generated images
print("Evaluating CLIPScore and CLIPAccuracy...")
with network: | clip_scores, clip_accs = eval_util.clip_eval(pipe, config) | 4 | 2023-12-26 03:19:16+00:00 | 12k |
dakpinaroglu/Frame2seq | frame2seq/model/Frame2seq.py | [
{
"identifier": "Rigid",
"path": "frame2seq/utils/rigid_utils.py",
"snippet": "class Rigid:\n \"\"\"\n A class representing a rigid transformation. Little more than a wrapper\n around two objects: a Rotation object and a [*, 3] translation\n Designed to behave approximately like ... | import torch.nn as nn
import pytorch_lightning as pl
from torch.utils.checkpoint import checkpoint
from frame2seq.utils.rigid_utils import Rigid
from frame2seq.openfold.model.primitives import LayerNorm
from frame2seq.openfold.model.structure_module import InvariantPointAttention, StructureModuleTransition
from frame2seq.model.edge_update import EdgeTransition
from frame2seq.utils.featurize import make_s_init, make_z_init | 10,317 |
class frame2seq(pl.LightningModule):
def __init__(self, config):
super(frame2seq, self).__init__()
self.save_hyperparameters()
config = self.hparams.config
self.config = config
ipa_depth = config['ipa_depth']
ipa_dim = config['ipa_dim']
ipa_heads = config['ipa_heads']
ipa_pairwise_repr_dim = config['ipa_pairwise_repr_dim']
self.st_mod_tsit_factor = config['st_mod_tsit_factor']
self.sequence_dim = config['sequence_dim']
self.single_dim = config['single_dim']
self.torsion_bin_width = 8
self.torsion_bins = 360 // self.torsion_bin_width
self.relpos_k = 32
self.dist_bin_width = 0.5
self.dist_bins = 24
self.pair_dim = 16 * self.dist_bins + 2 * self.relpos_k + 1
self.sequence_to_single = nn.Linear(6 + self.single_dim,
self.single_dim)
self.edge_to_pair = nn.Linear(self.pair_dim, ipa_pairwise_repr_dim)
self.single_to_sequence = nn.Linear(self.single_dim, self.sequence_dim)
self.layers = nn.ModuleList([])
for i in range(ipa_depth):
|
class frame2seq(pl.LightningModule):
def __init__(self, config):
super(frame2seq, self).__init__()
self.save_hyperparameters()
config = self.hparams.config
self.config = config
ipa_depth = config['ipa_depth']
ipa_dim = config['ipa_dim']
ipa_heads = config['ipa_heads']
ipa_pairwise_repr_dim = config['ipa_pairwise_repr_dim']
self.st_mod_tsit_factor = config['st_mod_tsit_factor']
self.sequence_dim = config['sequence_dim']
self.single_dim = config['single_dim']
self.torsion_bin_width = 8
self.torsion_bins = 360 // self.torsion_bin_width
self.relpos_k = 32
self.dist_bin_width = 0.5
self.dist_bins = 24
self.pair_dim = 16 * self.dist_bins + 2 * self.relpos_k + 1
self.sequence_to_single = nn.Linear(6 + self.single_dim,
self.single_dim)
self.edge_to_pair = nn.Linear(self.pair_dim, ipa_pairwise_repr_dim)
self.single_to_sequence = nn.Linear(self.single_dim, self.sequence_dim)
self.layers = nn.ModuleList([])
for i in range(ipa_depth):
| ipa = InvariantPointAttention( | 2 | 2023-12-25 09:29:36+00:00 | 12k |
wwxu21/CUT | finetune_unlikelihood.py | [
{
"identifier": "LlamaForCausalLM",
"path": "modeling_llama_unlikelihood.py",
"snippet": "class LlamaForCausalLM(LlamaPreTrainedModel):\n _tied_weights_keys = [\"lm_head.weight\"]\n\n def __init__(self, config, threshold):\n super().__init__(config)\n self.model = LlamaModel(config)\... | import os
import sys
import json
import fire
import torch
import transformers
import numpy as np
import random
from typing import List
from torch.utils.data import DataLoader
from datasets import load_dataset, concatenate_datasets, Dataset
from transformers import TrainerCallback, TrainingArguments, TrainerState, TrainerControl
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
from peft import (
LoraConfig,
prepare_model_for_int8_training,
set_peft_model_state_dict,
MODEL_TYPE_TO_PEFT_MODEL_MAPPING,
PeftModel,
)
from peft.utils import _prepare_prompt_learning_config
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.utils import PaddingStrategy
from transformers import LlamaTokenizer, LlamaConfig
from modeling_llama_unlikelihood import LlamaForCausalLM, PeftModelForCausalLM
from prompter import Prompter
from typing import Optional, Union, Any
from dataclasses import dataclass | 7,617 | set_peft_model_state_dict(model, adapters_weights)
return control
def get_peft_model(model, peft_config, adapter_name: str = "default"):
"""
Returns a Peft model object from a model and a config.
Args:
model ([`transformers.PreTrainedModel`]): Model to be wrapped.
peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model.
"""
model_config = getattr(model, "config", {"model_type": "custom"})
if hasattr(model_config, "to_dict"):
model_config = model_config.to_dict()
peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None)
if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning:
return PeftModel(model, peft_config, adapter_name=adapter_name)
if peft_config.is_prompt_learning:
peft_config = _prepare_prompt_learning_config(peft_config, model_config)
return PeftModelForCausalLM(model, peft_config, adapter_name=adapter_name)
def train(
# model/data params
base_model: str = "",
data_path: str = "",
output_dir: str = "",
# training hyperparams
batch_size: int = 128,
micro_batch_size: int = 8,
num_epochs: int = 1,
learning_rate: float = 3e-4,
cutoff_len: int = 4096,
val_set_size: int = 0,
lr_scheduler: str = "cosine",
warmup_steps: int = 100,
# lora hyperparams
lora_r: int = 16,
lora_alpha: int = 16,
lora_dropout: float = 0.05,
# from peft docs: ["q_proj", "k_proj", "v_proj", "o_proj", "fc_in", "fc_out", "wte", "gate_proj", "down_proj", "up_proj"]
lora_target_modules: List[str] = ["gate_proj", "down_proj", "up_proj"],
# llm hyperparams
train_on_inputs: bool = False, # if False, masks out inputs in loss
add_eos_token: bool = False,
group_by_length: bool = False, # faster, but produces an odd training loss curve
# wandb params
wandb_project: str = "",
wandb_run_name: str = "",
wandb_watch: str = "", # options: false | gradients | all
wandb_log_model: str = "", # options: false | true
resume_from_checkpoint: str = None, # either training checkpoint or final adapter
prompt_template_name: str = "alpaca",
weight_unlike: float = 0.1,
threshold: float = 1.1,
downsample: float = -1,
debug: bool = False,
):
if int(os.environ.get("LOCAL_RANK", 0)) == 0:
print(
f"Params using prompt template {prompt_template_name}\n"
f"the unlikelihood weight for the incorrect token in the incorrect response: {weight_unlike}\n"
f"the threshold to determine the unlikelihood token: {threshold}\n"
f"downssample rate for Hindsight-P: {downsample}\n"
f"base_model: {base_model}\n"
f"data_path: {data_path}\n"
f"output_dir: {output_dir}\n"
f"batch_size: {batch_size}\n"
f"micro_batch_size: {micro_batch_size}\n"
f"num_epochs: {num_epochs}\n"
f"learning_rate: {learning_rate}\n"
f"cutoff_len: {cutoff_len}\n"
f"val_set_size: {val_set_size}\n"
f"lr_scheduler: {lr_scheduler}\n"
f"warmup_steps: {warmup_steps}\n"
f"lora_r: {lora_r}\n"
f"lora_alpha: {lora_alpha}\n"
f"lora_dropout: {lora_dropout}\n"
f"lora_target_modules: {lora_target_modules}\n"
f"train_on_inputs: {train_on_inputs}\n"
f"add_eos_token: {add_eos_token}\n"
f"group_by_length: {group_by_length}\n"
f"wandb_project: {wandb_project}\n"
f"wandb_run_name: {wandb_run_name}\n"
f"wandb_watch: {wandb_watch}\n"
f"wandb_log_model: {wandb_log_model}\n"
f"resume_from_checkpoint: {resume_from_checkpoint or False}\n"
)
assert (
base_model
), "Please specify a --base_model, e.g. --base_model='huggyllama/llama-7b'"
gradient_accumulation_steps = batch_size // micro_batch_size
prompter = Prompter(prompt_template_name)
if not debug:
device_map = "auto"
else:
device_map = "cpu"
world_size = int(os.environ.get("WORLD_SIZE", 1))
local_rank = int(os.environ.get("LOCAL_RANK", 0))
ddp = world_size != 1
if ddp:
device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)}
gradient_accumulation_steps = gradient_accumulation_steps // world_size
print("gradient_accumulation_steps: ", gradient_accumulation_steps)
# Check if parameter passed or if set within environ
use_wandb = len(wandb_project) > 0 or (
"WANDB_PROJECT" in os.environ and len(os.environ["WANDB_PROJECT"]) > 0
)
use_wandb =False
# Only overwrite environ if wandb param passed
if len(wandb_project) > 0:
os.environ["WANDB_PROJECT"] = wandb_project
if len(wandb_watch) > 0:
os.environ["WANDB_WATCH"] = wandb_watch
if len(wandb_log_model) > 0:
os.environ["WANDB_LOG_MODEL"] = wandb_log_model
if not debug:
| seed = 42
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
@dataclass
class MyDataCollator:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
model ([`PreTrainedModel`]):
The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to
prepare the *decoder_input_ids*
This is useful when using *label_smoothing* to avoid calculating loss twice.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (`int`, *optional*, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
tokenizer: PreTrainedTokenizerBase
model: Optional[Any] = None
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
def __call__(self, features, return_tensors=None):
if return_tensors is None:
return_tensors = self.return_tensors
labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None
labels_neg = [feature["labels_neg"] for feature in features] if "labels_neg" in features[0].keys() else None
# We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the
# same length to return tensors.
if labels is not None:
max_label_length = max(len(l) for l in labels)
if labels_neg is not None:
max_label_length_neg = max(len(l) for l in labels_neg)
max_label_length = max(max_label_length, max_label_length_neg)
if self.pad_to_multiple_of is not None:
max_label_length = (
(max_label_length + self.pad_to_multiple_of - 1)
// self.pad_to_multiple_of
* self.pad_to_multiple_of
)
# self.tokenizer.padding_side = "left"
padding_side = self.tokenizer.padding_side
for feature in features:
feature['weight_like'] = [feature['weight_like']]
feature['weight_unlike'] = [feature['weight_unlike']]
remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"]))
remainder_length = max_label_length - len(feature["labels_neg"])
remainder_label = [self.label_pad_token_id] * remainder_length
remainder_ids = [self.tokenizer.pad_token_id] * remainder_length
remainder_mask = [0] * remainder_length
if isinstance(feature["labels"], list):
feature["labels"] = (
feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"]
)
feature["labels_neg"] = (
feature["labels_neg"] + remainder_label if padding_side == "right" else remainder_label + feature["labels_neg"]
)
feature["input_ids_neg"] = (
feature["input_ids_neg"] + remainder_ids if padding_side == "right" else remainder_ids + feature["input_ids_neg"]
)
feature["attention_mask_neg"] = (
feature["attention_mask_neg"] + remainder_mask if padding_side == "right" else remainder_mask + feature["attention_mask_neg"]
)
elif padding_side == "right":
feature["labels"] = np.concatenate([feature["labels"], remainder]).astype(np.int64)
feature["labels_neg"] = np.concatenate([feature["labels_neg"], remainder_label]).astype(np.int64)
feature["input_ids_neg"] = np.concatenate([feature["input_ids_neg"], remainder_ids]).astype(np.int64)
feature["attention_mask_neg"] = np.concatenate([feature["attention_mask_neg"], remainder_mask]).astype(np.int64)
else:
feature["labels"] = np.concatenate([remainder, feature["labels"]]).astype(np.int64)
feature["labels_neg"] = np.concatenate([remainder_label, feature["labels_neg"]]).astype(np.int64)
feature["input_ids_neg"] = np.concatenate([remainder_ids, feature["input_ids_neg"]]).astype(np.int64)
feature["attention_mask_neg"] = np.concatenate([remainder_mask, feature["attention_mask_neg"]]).astype(np.int64)
features = self.tokenizer.pad(
features,
padding=self.padding,
max_length=max_label_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=return_tensors,
)
# prepare decoder_input_ids
if (
labels is not None
and self.model is not None
and hasattr(self.model, "prepare_decoder_input_ids_from_labels")
):
decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features["labels"])
features["decoder_input_ids"] = decoder_input_ids
return features
class SavePeftModelCallback(TrainerCallback):
def on_save(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
):
checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
kwargs["model"].save_pretrained(checkpoint_folder)
pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin")
torch.save({}, pytorch_model_path)
return control
class LoadBestPeftModelCallback(TrainerCallback):
def on_train_end(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
):
print(f"Loading best peft model from {state.best_model_checkpoint} (score: {state.best_metric}).")
best_model_path = os.path.join(state.best_model_checkpoint, "adapter_model.bin")
adapters_weights = torch.load(best_model_path)
model = kwargs["model"]
set_peft_model_state_dict(model, adapters_weights)
return control
def get_peft_model(model, peft_config, adapter_name: str = "default"):
"""
Returns a Peft model object from a model and a config.
Args:
model ([`transformers.PreTrainedModel`]): Model to be wrapped.
peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model.
"""
model_config = getattr(model, "config", {"model_type": "custom"})
if hasattr(model_config, "to_dict"):
model_config = model_config.to_dict()
peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None)
if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning:
return PeftModel(model, peft_config, adapter_name=adapter_name)
if peft_config.is_prompt_learning:
peft_config = _prepare_prompt_learning_config(peft_config, model_config)
return PeftModelForCausalLM(model, peft_config, adapter_name=adapter_name)
def train(
# model/data params
base_model: str = "",
data_path: str = "",
output_dir: str = "",
# training hyperparams
batch_size: int = 128,
micro_batch_size: int = 8,
num_epochs: int = 1,
learning_rate: float = 3e-4,
cutoff_len: int = 4096,
val_set_size: int = 0,
lr_scheduler: str = "cosine",
warmup_steps: int = 100,
# lora hyperparams
lora_r: int = 16,
lora_alpha: int = 16,
lora_dropout: float = 0.05,
# from peft docs: ["q_proj", "k_proj", "v_proj", "o_proj", "fc_in", "fc_out", "wte", "gate_proj", "down_proj", "up_proj"]
lora_target_modules: List[str] = ["gate_proj", "down_proj", "up_proj"],
# llm hyperparams
train_on_inputs: bool = False, # if False, masks out inputs in loss
add_eos_token: bool = False,
group_by_length: bool = False, # faster, but produces an odd training loss curve
# wandb params
wandb_project: str = "",
wandb_run_name: str = "",
wandb_watch: str = "", # options: false | gradients | all
wandb_log_model: str = "", # options: false | true
resume_from_checkpoint: str = None, # either training checkpoint or final adapter
prompt_template_name: str = "alpaca",
weight_unlike: float = 0.1,
threshold: float = 1.1,
downsample: float = -1,
debug: bool = False,
):
if int(os.environ.get("LOCAL_RANK", 0)) == 0:
print(
f"Params using prompt template {prompt_template_name}\n"
f"the unlikelihood weight for the incorrect token in the incorrect response: {weight_unlike}\n"
f"the threshold to determine the unlikelihood token: {threshold}\n"
f"downssample rate for Hindsight-P: {downsample}\n"
f"base_model: {base_model}\n"
f"data_path: {data_path}\n"
f"output_dir: {output_dir}\n"
f"batch_size: {batch_size}\n"
f"micro_batch_size: {micro_batch_size}\n"
f"num_epochs: {num_epochs}\n"
f"learning_rate: {learning_rate}\n"
f"cutoff_len: {cutoff_len}\n"
f"val_set_size: {val_set_size}\n"
f"lr_scheduler: {lr_scheduler}\n"
f"warmup_steps: {warmup_steps}\n"
f"lora_r: {lora_r}\n"
f"lora_alpha: {lora_alpha}\n"
f"lora_dropout: {lora_dropout}\n"
f"lora_target_modules: {lora_target_modules}\n"
f"train_on_inputs: {train_on_inputs}\n"
f"add_eos_token: {add_eos_token}\n"
f"group_by_length: {group_by_length}\n"
f"wandb_project: {wandb_project}\n"
f"wandb_run_name: {wandb_run_name}\n"
f"wandb_watch: {wandb_watch}\n"
f"wandb_log_model: {wandb_log_model}\n"
f"resume_from_checkpoint: {resume_from_checkpoint or False}\n"
)
assert (
base_model
), "Please specify a --base_model, e.g. --base_model='huggyllama/llama-7b'"
gradient_accumulation_steps = batch_size // micro_batch_size
prompter = Prompter(prompt_template_name)
if not debug:
device_map = "auto"
else:
device_map = "cpu"
world_size = int(os.environ.get("WORLD_SIZE", 1))
local_rank = int(os.environ.get("LOCAL_RANK", 0))
ddp = world_size != 1
if ddp:
device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)}
gradient_accumulation_steps = gradient_accumulation_steps // world_size
print("gradient_accumulation_steps: ", gradient_accumulation_steps)
# Check if parameter passed or if set within environ
use_wandb = len(wandb_project) > 0 or (
"WANDB_PROJECT" in os.environ and len(os.environ["WANDB_PROJECT"]) > 0
)
use_wandb =False
# Only overwrite environ if wandb param passed
if len(wandb_project) > 0:
os.environ["WANDB_PROJECT"] = wandb_project
if len(wandb_watch) > 0:
os.environ["WANDB_WATCH"] = wandb_watch
if len(wandb_log_model) > 0:
os.environ["WANDB_LOG_MODEL"] = wandb_log_model
if not debug: | model = LlamaForCausalLM.from_pretrained( | 0 | 2023-12-22 07:32:19+00:00 | 12k |
usail-hkust/LLMTSCS | utils/oneline.py | [
{
"identifier": "DIC_AGENTS",
"path": "utils/config.py",
"snippet": "DIC_AGENTS = {\n \"Random\": RandomAgent,\n \"Fixedtime\": FixedtimeAgent,\n \"MaxPressure\": MaxPressureAgent,\n \"EfficientMaxPressure\": EfficientMaxPressureAgent,\n \"AdvancedMaxPressure\": AdvancedMaxPressureAgent,\... | from .config import DIC_AGENTS
from .my_utils import merge, get_state, get_state_detail, eight_phase_list, dump_json
from copy import deepcopy
from .cityflow_env import CityFlowEnv
from .pipeline import path_check, copy_cityflow_file, copy_conf_file
from tqdm import tqdm
import os
import time
import numpy as np
import wandb
import threading | 7,445 | path_check(self.dic_path)
copy_conf_file(self.dic_path, self.dic_agent_conf, self.dic_traffic_env_conf)
copy_cityflow_file(self.dic_path, self.dic_traffic_env_conf)
self.env = CityFlowEnv(
path_to_log=self.dic_path["PATH_TO_WORK_DIRECTORY"],
path_to_work_directory=self.dic_path["PATH_TO_WORK_DIRECTORY"],
dic_traffic_env_conf=self.dic_traffic_env_conf,
dic_path=self.dic_path
)
self.env.reset()
agent_name = self.dic_traffic_env_conf["MODEL_NAME"]
for i in range(self.dic_traffic_env_conf['NUM_INTERSECTIONS']):
if "ChatGPT" in agent_name:
agent = DIC_AGENTS[agent_name.split("-")[0]](
GPT_version=self.dic_agent_conf["GPT_VERSION"],
intersection=self.env.intersection_dict[self.env.list_intersection[i].inter_name],
inter_name=self.env.list_intersection[i].inter_name,
phase_num=len(self.env.list_intersection[i].list_phases),
log_dir=self.dic_agent_conf["LOG_DIR"],
dataset=f"{self.roadnet}-{self.trafficflow}"
)
elif "open_llm" in agent_name:
agent = DIC_AGENTS[agent_name.split("-")[0]](
ex_api=self.dic_agent_conf["WITH_EXTERNAL_API"],
model=agent_name.split("-")[1],
intersection=self.env.intersection_dict[self.env.list_intersection[i].inter_name],
inter_name=self.env.list_intersection[i].inter_name,
phase_num=len(self.env.list_intersection[i].list_phases),
log_dir=self.dic_agent_conf["LOG_DIR"],
dataset=f"{self.roadnet}-{self.trafficflow}"
)
else:
agent = DIC_AGENTS[agent_name](
dic_agent_conf=self.dic_agent_conf,
dic_traffic_env_conf=self.dic_traffic_env_conf,
dic_path=self.dic_path,
cnt_round=0,
intersection_id=str(i)
)
self.agents.append(agent)
def train(self, round):
print("================ start train ================")
total_run_cnt = self.dic_traffic_env_conf["RUN_COUNTS"]
# initialize output streams
file_name_memory = os.path.join(self.dic_path["PATH_TO_WORK_DIRECTORY"], "memories.txt")
done = False
state = self.env.reset()
total_reward = 0.0
queue_length_episode = []
waiting_time_episode = []
step_num = 0
print("end reset")
current_time = self.env.get_current_time() # in seconds
all_config = merge(merge(self.dic_agent_conf, self.dic_path), self.dic_traffic_env_conf)
logger = wandb.init(
project=self.dic_traffic_env_conf['PROJECT_NAME'],
group=f"{self.dic_traffic_env_conf['MODEL_NAME']}-{self.roadnet}-{self.trafficflow}-{len(self.dic_traffic_env_conf['PHASE'])}_Phases",
name=f"round_{round}",
config=all_config,
)
start_time = time.time()
state_action_log = [[] for _ in range(len(state))]
while not done and current_time < total_run_cnt:
action_list = []
threads = []
for i in range(len(state)):
# log statistic state
intersection = self.env.intersection_dict[self.env.list_intersection[i].inter_name]
roads = deepcopy(intersection["roads"])
statistic_state, statistic_state_incoming, mean_speed = get_state_detail(roads, self.env)
state_action_log[i].append({"state": statistic_state, "state_incoming": statistic_state_incoming, "approaching_speed": mean_speed})
one_state = state[i]
count = step_num
if "ChatGPT" in self.dic_traffic_env_conf["MODEL_NAME"] or "open_llm" in self.dic_traffic_env_conf["MODEL_NAME"]:
thread = threading.Thread(target=self.agents[i].choose_action, args=(self.env,))
threads.append(thread)
else:
action = self.agents[i].choose_action(count, one_state)
action_list.append(action)
# multi-thread
if "ChatGPT" in self.dic_traffic_env_conf["MODEL_NAME"]:
for thread in threads:
thread.start()
for thread in tqdm(threads):
thread.join()
for i in range(len(state)):
action = self.agents[i].temp_action_logger
action_list.append(action)
# multi-thread
if "open_llm" in self.dic_traffic_env_conf["MODEL_NAME"]:
started_thread_id = []
thread_num = self.dic_traffic_env_conf["LLM_API_THREAD_NUM"] if not self.dic_agent_conf["WITH_EXTERNAL_API"] else 2
for i, thread in enumerate(tqdm(threads)):
thread.start()
started_thread_id.append(i)
if (i + 1) % thread_num == 0:
for t_id in started_thread_id:
threads[t_id].join()
started_thread_id = []
for i in range(len(state)):
action = self.agents[i].temp_action_logger
action_list.append(action)
next_state, reward, done, _ = self.env.step(action_list)
# log action
for i in range(len(state)):
|
class OneLine:
def __init__(self, dic_agent_conf, dic_traffic_env_conf, dic_path, roadnet, trafficflow):
self.dic_agent_conf = dic_agent_conf
self.dic_traffic_env_conf = dic_traffic_env_conf
self.dic_path = dic_path
self.agents = []
self.env = None
self.roadnet = roadnet
self.trafficflow = trafficflow
self.models = []
self.initialize()
def initialize(self):
path_check(self.dic_path)
copy_conf_file(self.dic_path, self.dic_agent_conf, self.dic_traffic_env_conf)
copy_cityflow_file(self.dic_path, self.dic_traffic_env_conf)
self.env = CityFlowEnv(
path_to_log=self.dic_path["PATH_TO_WORK_DIRECTORY"],
path_to_work_directory=self.dic_path["PATH_TO_WORK_DIRECTORY"],
dic_traffic_env_conf=self.dic_traffic_env_conf,
dic_path=self.dic_path
)
self.env.reset()
agent_name = self.dic_traffic_env_conf["MODEL_NAME"]
for i in range(self.dic_traffic_env_conf['NUM_INTERSECTIONS']):
if "ChatGPT" in agent_name:
agent = DIC_AGENTS[agent_name.split("-")[0]](
GPT_version=self.dic_agent_conf["GPT_VERSION"],
intersection=self.env.intersection_dict[self.env.list_intersection[i].inter_name],
inter_name=self.env.list_intersection[i].inter_name,
phase_num=len(self.env.list_intersection[i].list_phases),
log_dir=self.dic_agent_conf["LOG_DIR"],
dataset=f"{self.roadnet}-{self.trafficflow}"
)
elif "open_llm" in agent_name:
agent = DIC_AGENTS[agent_name.split("-")[0]](
ex_api=self.dic_agent_conf["WITH_EXTERNAL_API"],
model=agent_name.split("-")[1],
intersection=self.env.intersection_dict[self.env.list_intersection[i].inter_name],
inter_name=self.env.list_intersection[i].inter_name,
phase_num=len(self.env.list_intersection[i].list_phases),
log_dir=self.dic_agent_conf["LOG_DIR"],
dataset=f"{self.roadnet}-{self.trafficflow}"
)
else:
agent = DIC_AGENTS[agent_name](
dic_agent_conf=self.dic_agent_conf,
dic_traffic_env_conf=self.dic_traffic_env_conf,
dic_path=self.dic_path,
cnt_round=0,
intersection_id=str(i)
)
self.agents.append(agent)
def train(self, round):
print("================ start train ================")
total_run_cnt = self.dic_traffic_env_conf["RUN_COUNTS"]
# initialize output streams
file_name_memory = os.path.join(self.dic_path["PATH_TO_WORK_DIRECTORY"], "memories.txt")
done = False
state = self.env.reset()
total_reward = 0.0
queue_length_episode = []
waiting_time_episode = []
step_num = 0
print("end reset")
current_time = self.env.get_current_time() # in seconds
all_config = merge(merge(self.dic_agent_conf, self.dic_path), self.dic_traffic_env_conf)
logger = wandb.init(
project=self.dic_traffic_env_conf['PROJECT_NAME'],
group=f"{self.dic_traffic_env_conf['MODEL_NAME']}-{self.roadnet}-{self.trafficflow}-{len(self.dic_traffic_env_conf['PHASE'])}_Phases",
name=f"round_{round}",
config=all_config,
)
start_time = time.time()
state_action_log = [[] for _ in range(len(state))]
while not done and current_time < total_run_cnt:
action_list = []
threads = []
for i in range(len(state)):
# log statistic state
intersection = self.env.intersection_dict[self.env.list_intersection[i].inter_name]
roads = deepcopy(intersection["roads"])
statistic_state, statistic_state_incoming, mean_speed = get_state_detail(roads, self.env)
state_action_log[i].append({"state": statistic_state, "state_incoming": statistic_state_incoming, "approaching_speed": mean_speed})
one_state = state[i]
count = step_num
if "ChatGPT" in self.dic_traffic_env_conf["MODEL_NAME"] or "open_llm" in self.dic_traffic_env_conf["MODEL_NAME"]:
thread = threading.Thread(target=self.agents[i].choose_action, args=(self.env,))
threads.append(thread)
else:
action = self.agents[i].choose_action(count, one_state)
action_list.append(action)
# multi-thread
if "ChatGPT" in self.dic_traffic_env_conf["MODEL_NAME"]:
for thread in threads:
thread.start()
for thread in tqdm(threads):
thread.join()
for i in range(len(state)):
action = self.agents[i].temp_action_logger
action_list.append(action)
# multi-thread
if "open_llm" in self.dic_traffic_env_conf["MODEL_NAME"]:
started_thread_id = []
thread_num = self.dic_traffic_env_conf["LLM_API_THREAD_NUM"] if not self.dic_agent_conf["WITH_EXTERNAL_API"] else 2
for i, thread in enumerate(tqdm(threads)):
thread.start()
started_thread_id.append(i)
if (i + 1) % thread_num == 0:
for t_id in started_thread_id:
threads[t_id].join()
started_thread_id = []
for i in range(len(state)):
action = self.agents[i].temp_action_logger
action_list.append(action)
next_state, reward, done, _ = self.env.step(action_list)
# log action
for i in range(len(state)): | state_action_log[i][-1]["action"] = eight_phase_list[action_list[i]] | 1 | 2023-12-26 08:31:47+00:00 | 12k |
KyanChen/TTP | mmdet/models/dense_heads/dino_head.py | [
{
"identifier": "MODELS",
"path": "mmdet/registry.py",
"snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])"
},
{
"identifier": "SampleList",
"path": "mmdet/structures/det_data_sample.py",
"snippet": "class DetDataSample(BaseDataElement):\n def pr... | from typing import Dict, List, Tuple
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.structures.bbox import (bbox_cxcywh_to_xyxy, bbox_overlaps,
bbox_xyxy_to_cxcywh)
from mmdet.utils import InstanceList, OptInstanceList, reduce_mean
from ..losses import QualityFocalLoss
from ..utils import multi_apply
from .deformable_detr_head import DeformableDETRHead
import torch | 10,501 | # between DINO and DeformableDETR.
# loss of proposal generated from encode feature map.
if enc_cls_scores is not None:
# NOTE The enc_loss calculation of the DINO is
# different from that of Deformable DETR.
enc_loss_cls, enc_losses_bbox, enc_losses_iou = \
self.loss_by_feat_single(
enc_cls_scores, enc_bbox_preds,
batch_gt_instances=batch_gt_instances,
batch_img_metas=batch_img_metas)
loss_dict['enc_loss_cls'] = enc_loss_cls
loss_dict['enc_loss_bbox'] = enc_losses_bbox
loss_dict['enc_loss_iou'] = enc_losses_iou
if all_layers_denoising_cls_scores is not None:
# calculate denoising loss from all decoder layers
dn_losses_cls, dn_losses_bbox, dn_losses_iou = self.loss_dn(
all_layers_denoising_cls_scores,
all_layers_denoising_bbox_preds,
batch_gt_instances=batch_gt_instances,
batch_img_metas=batch_img_metas,
dn_meta=dn_meta)
# collate denoising loss
loss_dict['dn_loss_cls'] = dn_losses_cls[-1]
loss_dict['dn_loss_bbox'] = dn_losses_bbox[-1]
loss_dict['dn_loss_iou'] = dn_losses_iou[-1]
for num_dec_layer, (loss_cls_i, loss_bbox_i, loss_iou_i) in \
enumerate(zip(dn_losses_cls[:-1], dn_losses_bbox[:-1],
dn_losses_iou[:-1])):
loss_dict[f'd{num_dec_layer}.dn_loss_cls'] = loss_cls_i
loss_dict[f'd{num_dec_layer}.dn_loss_bbox'] = loss_bbox_i
loss_dict[f'd{num_dec_layer}.dn_loss_iou'] = loss_iou_i
return loss_dict
def loss_dn(self, all_layers_denoising_cls_scores: Tensor,
all_layers_denoising_bbox_preds: Tensor,
batch_gt_instances: InstanceList, batch_img_metas: List[dict],
dn_meta: Dict[str, int]) -> Tuple[List[Tensor]]:
"""Calculate denoising loss.
Args:
all_layers_denoising_cls_scores (Tensor): Classification scores of
all decoder layers in denoising part, has shape (
num_decoder_layers, bs, num_denoising_queries,
cls_out_channels).
all_layers_denoising_bbox_preds (Tensor): Regression outputs of all
decoder layers in denoising part. Each is a 4D-tensor with
normalized coordinate format (cx, cy, w, h) and has shape
(num_decoder_layers, bs, num_denoising_queries, 4).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
dn_meta (Dict[str, int]): The dictionary saves information about
group collation, including 'num_denoising_queries' and
'num_denoising_groups'. It will be used for split outputs of
denoising and matching parts and loss calculation.
Returns:
Tuple[List[Tensor]]: The loss_dn_cls, loss_dn_bbox, and loss_dn_iou
of each decoder layers.
"""
return multi_apply(
self._loss_dn_single,
all_layers_denoising_cls_scores,
all_layers_denoising_bbox_preds,
batch_gt_instances=batch_gt_instances,
batch_img_metas=batch_img_metas,
dn_meta=dn_meta)
def _loss_dn_single(self, dn_cls_scores: Tensor, dn_bbox_preds: Tensor,
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
dn_meta: Dict[str, int]) -> Tuple[Tensor]:
"""Denoising loss for outputs from a single decoder layer.
Args:
dn_cls_scores (Tensor): Classification scores of a single decoder
layer in denoising part, has shape (bs, num_denoising_queries,
cls_out_channels).
dn_bbox_preds (Tensor): Regression outputs of a single decoder
layer in denoising part. Each is a 4D-tensor with normalized
coordinate format (cx, cy, w, h) and has shape
(bs, num_denoising_queries, 4).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
dn_meta (Dict[str, int]): The dictionary saves information about
group collation, including 'num_denoising_queries' and
'num_denoising_groups'. It will be used for split outputs of
denoising and matching parts and loss calculation.
Returns:
Tuple[Tensor]: A tuple including `loss_cls`, `loss_box` and
`loss_iou`.
"""
cls_reg_targets = self.get_dn_targets(batch_gt_instances,
batch_img_metas, dn_meta)
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
labels = torch.cat(labels_list, 0)
label_weights = torch.cat(label_weights_list, 0)
bbox_targets = torch.cat(bbox_targets_list, 0)
bbox_weights = torch.cat(bbox_weights_list, 0)
# classification loss
cls_scores = dn_cls_scores.reshape(-1, self.cls_out_channels)
# construct weighted avg_factor to match with the official DETR repo
cls_avg_factor = \
num_total_pos * 1.0 + num_total_neg * self.bg_cls_weight
if self.sync_cls_avg_factor:
cls_avg_factor = reduce_mean(
cls_scores.new_tensor([cls_avg_factor]))
cls_avg_factor = max(cls_avg_factor, 1)
if len(cls_scores) > 0:
| # Copyright (c) OpenMMLab. All rights reserved.
@MODELS.register_module()
class DINOHead(DeformableDETRHead):
r"""Head of the DINO: DETR with Improved DeNoising Anchor Boxes
for End-to-End Object Detection
Code is modified from the `official github repo
<https://github.com/IDEA-Research/DINO>`_.
More details can be found in the `paper
<https://arxiv.org/abs/2203.03605>`_ .
"""
def loss(self, hidden_states: Tensor, references: List[Tensor],
enc_outputs_class: Tensor, enc_outputs_coord: Tensor,
batch_data_samples: SampleList, dn_meta: Dict[str, int]) -> dict:
"""Perform forward propagation and loss calculation of the detection
head on the queries of the upstream network.
Args:
hidden_states (Tensor): Hidden states output from each decoder
layer, has shape (num_decoder_layers, bs, num_queries_total,
dim), where `num_queries_total` is the sum of
`num_denoising_queries` and `num_matching_queries` when
`self.training` is `True`, else `num_matching_queries`.
references (list[Tensor]): List of the reference from the decoder.
The first reference is the `init_reference` (initial) and the
other num_decoder_layers(6) references are `inter_references`
(intermediate). The `init_reference` has shape (bs,
num_queries_total, 4) and each `inter_reference` has shape
(bs, num_queries, 4) with the last dimension arranged as
(cx, cy, w, h).
enc_outputs_class (Tensor): The score of each point on encode
feature map, has shape (bs, num_feat_points, cls_out_channels).
enc_outputs_coord (Tensor): The proposal generate from the
encode feature map, has shape (bs, num_feat_points, 4) with the
last dimension arranged as (cx, cy, w, h).
batch_data_samples (list[:obj:`DetDataSample`]): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
dn_meta (Dict[str, int]): The dictionary saves information about
group collation, including 'num_denoising_queries' and
'num_denoising_groups'. It will be used for split outputs of
denoising and matching parts and loss calculation.
Returns:
dict: A dictionary of loss components.
"""
batch_gt_instances = []
batch_img_metas = []
for data_sample in batch_data_samples:
batch_img_metas.append(data_sample.metainfo)
batch_gt_instances.append(data_sample.gt_instances)
outs = self(hidden_states, references)
loss_inputs = outs + (enc_outputs_class, enc_outputs_coord,
batch_gt_instances, batch_img_metas, dn_meta)
losses = self.loss_by_feat(*loss_inputs)
return losses
def loss_by_feat(
self,
all_layers_cls_scores: Tensor,
all_layers_bbox_preds: Tensor,
enc_cls_scores: Tensor,
enc_bbox_preds: Tensor,
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
dn_meta: Dict[str, int],
batch_gt_instances_ignore: OptInstanceList = None
) -> Dict[str, Tensor]:
"""Loss function.
Args:
all_layers_cls_scores (Tensor): Classification scores of all
decoder layers, has shape (num_decoder_layers, bs,
num_queries_total, cls_out_channels), where
`num_queries_total` is the sum of `num_denoising_queries`
and `num_matching_queries`.
all_layers_bbox_preds (Tensor): Regression outputs of all decoder
layers. Each is a 4D-tensor with normalized coordinate format
(cx, cy, w, h) and has shape (num_decoder_layers, bs,
num_queries_total, 4).
enc_cls_scores (Tensor): The score of each point on encode
feature map, has shape (bs, num_feat_points, cls_out_channels).
enc_bbox_preds (Tensor): The proposal generate from the encode
feature map, has shape (bs, num_feat_points, 4) with the last
dimension arranged as (cx, cy, w, h).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
dn_meta (Dict[str, int]): The dictionary saves information about
group collation, including 'num_denoising_queries' and
'num_denoising_groups'. It will be used for split outputs of
denoising and matching parts and loss calculation.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
# extract denoising and matching part of outputs
(all_layers_matching_cls_scores, all_layers_matching_bbox_preds,
all_layers_denoising_cls_scores, all_layers_denoising_bbox_preds) = \
self.split_outputs(
all_layers_cls_scores, all_layers_bbox_preds, dn_meta)
loss_dict = super(DeformableDETRHead, self).loss_by_feat(
all_layers_matching_cls_scores, all_layers_matching_bbox_preds,
batch_gt_instances, batch_img_metas, batch_gt_instances_ignore)
# NOTE DETRHead.loss_by_feat but not DeformableDETRHead.loss_by_feat
# is called, because the encoder loss calculations are different
# between DINO and DeformableDETR.
# loss of proposal generated from encode feature map.
if enc_cls_scores is not None:
# NOTE The enc_loss calculation of the DINO is
# different from that of Deformable DETR.
enc_loss_cls, enc_losses_bbox, enc_losses_iou = \
self.loss_by_feat_single(
enc_cls_scores, enc_bbox_preds,
batch_gt_instances=batch_gt_instances,
batch_img_metas=batch_img_metas)
loss_dict['enc_loss_cls'] = enc_loss_cls
loss_dict['enc_loss_bbox'] = enc_losses_bbox
loss_dict['enc_loss_iou'] = enc_losses_iou
if all_layers_denoising_cls_scores is not None:
# calculate denoising loss from all decoder layers
dn_losses_cls, dn_losses_bbox, dn_losses_iou = self.loss_dn(
all_layers_denoising_cls_scores,
all_layers_denoising_bbox_preds,
batch_gt_instances=batch_gt_instances,
batch_img_metas=batch_img_metas,
dn_meta=dn_meta)
# collate denoising loss
loss_dict['dn_loss_cls'] = dn_losses_cls[-1]
loss_dict['dn_loss_bbox'] = dn_losses_bbox[-1]
loss_dict['dn_loss_iou'] = dn_losses_iou[-1]
for num_dec_layer, (loss_cls_i, loss_bbox_i, loss_iou_i) in \
enumerate(zip(dn_losses_cls[:-1], dn_losses_bbox[:-1],
dn_losses_iou[:-1])):
loss_dict[f'd{num_dec_layer}.dn_loss_cls'] = loss_cls_i
loss_dict[f'd{num_dec_layer}.dn_loss_bbox'] = loss_bbox_i
loss_dict[f'd{num_dec_layer}.dn_loss_iou'] = loss_iou_i
return loss_dict
def loss_dn(self, all_layers_denoising_cls_scores: Tensor,
all_layers_denoising_bbox_preds: Tensor,
batch_gt_instances: InstanceList, batch_img_metas: List[dict],
dn_meta: Dict[str, int]) -> Tuple[List[Tensor]]:
"""Calculate denoising loss.
Args:
all_layers_denoising_cls_scores (Tensor): Classification scores of
all decoder layers in denoising part, has shape (
num_decoder_layers, bs, num_denoising_queries,
cls_out_channels).
all_layers_denoising_bbox_preds (Tensor): Regression outputs of all
decoder layers in denoising part. Each is a 4D-tensor with
normalized coordinate format (cx, cy, w, h) and has shape
(num_decoder_layers, bs, num_denoising_queries, 4).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
dn_meta (Dict[str, int]): The dictionary saves information about
group collation, including 'num_denoising_queries' and
'num_denoising_groups'. It will be used for split outputs of
denoising and matching parts and loss calculation.
Returns:
Tuple[List[Tensor]]: The loss_dn_cls, loss_dn_bbox, and loss_dn_iou
of each decoder layers.
"""
return multi_apply(
self._loss_dn_single,
all_layers_denoising_cls_scores,
all_layers_denoising_bbox_preds,
batch_gt_instances=batch_gt_instances,
batch_img_metas=batch_img_metas,
dn_meta=dn_meta)
def _loss_dn_single(self, dn_cls_scores: Tensor, dn_bbox_preds: Tensor,
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
dn_meta: Dict[str, int]) -> Tuple[Tensor]:
"""Denoising loss for outputs from a single decoder layer.
Args:
dn_cls_scores (Tensor): Classification scores of a single decoder
layer in denoising part, has shape (bs, num_denoising_queries,
cls_out_channels).
dn_bbox_preds (Tensor): Regression outputs of a single decoder
layer in denoising part. Each is a 4D-tensor with normalized
coordinate format (cx, cy, w, h) and has shape
(bs, num_denoising_queries, 4).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
dn_meta (Dict[str, int]): The dictionary saves information about
group collation, including 'num_denoising_queries' and
'num_denoising_groups'. It will be used for split outputs of
denoising and matching parts and loss calculation.
Returns:
Tuple[Tensor]: A tuple including `loss_cls`, `loss_box` and
`loss_iou`.
"""
cls_reg_targets = self.get_dn_targets(batch_gt_instances,
batch_img_metas, dn_meta)
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
labels = torch.cat(labels_list, 0)
label_weights = torch.cat(label_weights_list, 0)
bbox_targets = torch.cat(bbox_targets_list, 0)
bbox_weights = torch.cat(bbox_weights_list, 0)
# classification loss
cls_scores = dn_cls_scores.reshape(-1, self.cls_out_channels)
# construct weighted avg_factor to match with the official DETR repo
cls_avg_factor = \
num_total_pos * 1.0 + num_total_neg * self.bg_cls_weight
if self.sync_cls_avg_factor:
cls_avg_factor = reduce_mean(
cls_scores.new_tensor([cls_avg_factor]))
cls_avg_factor = max(cls_avg_factor, 1)
if len(cls_scores) > 0: | if isinstance(self.loss_cls, QualityFocalLoss): | 7 | 2023-12-23 08:36:47+00:00 | 12k |
SkierProjects/MultiLabelImageClassificationPytorch | src/utils/training/train_model.py | [
{
"identifier": "LoggerFactory",
"path": "src/utils/logging/loggerfactory.py",
"snippet": "class LoggerFactory:\n DEFAULT_LOG_LEVEL = logging.INFO\n LOG_FILE_MAX_BYTES = 10 * 1024 * 1024 # 10 MB\n LOG_FILE_BACKUP_COUNT = 5 # Keep 5 backup files\n LONG_LOG_FORMAT = \"%(asctime)s - %(name)s ... | from config import config
from src.utils.logging.loggerfactory import LoggerFactory
from src.utils.training.modeltrainer import ModelTrainer
from src.utils.evaluation.modelevaluator import ModelEvaluator
from src.utils.evaluation.test_model import evaluate_model
import torch
import utils.dataset.datasetutils as datasetutils | 8,344 | logger = LoggerFactory.get_logger(f"logger.{__name__}")
def train_model(config=config):
"""
Train a model based on the provided configuration.
Parameters:
config: Configuration module with necessary attributes.
"""
# Initialize the computation device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Get train, validation, and test dataset loaders
train_loader, valid_loader, test_loader = datasetutils.get_train_valid_test_loaders(config=config)
# Initialize the model trainer
| logger = LoggerFactory.get_logger(f"logger.{__name__}")
def train_model(config=config):
"""
Train a model based on the provided configuration.
Parameters:
config: Configuration module with necessary attributes.
"""
# Initialize the computation device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Get train, validation, and test dataset loaders
train_loader, valid_loader, test_loader = datasetutils.get_train_valid_test_loaders(config=config)
# Initialize the model trainer | with ModelTrainer(device, train_loader, valid_loader, test_loader, config=config) as modelTrainer, ModelEvaluator.from_trainer(modelTrainer) as modelEvaluator: | 1 | 2023-12-25 18:45:52+00:00 | 12k |
jpivarski/ragged | src/ragged/_spec_utility_functions.py | [
{
"identifier": "_box",
"path": "src/ragged/_spec_array_object.py",
"snippet": "def _box(\n cls: type[array],\n output: ak.Array | np.number | SupportsDLPack,\n *,\n dtype: None | Dtype = None,\n) -> array:\n if isinstance(output, ak.Array):\n impl = output\n shape, dtype_ob... | import numpy as np
from ._spec_array_object import _box, _unbox, array
from ._spec_statistical_functions import _regularize_axis | 10,729 | # BSD 3-Clause License; see https://github.com/scikit-hep/ragged/blob/main/LICENSE
"""
https://data-apis.org/array-api/latest/API_specification/utility_functions.html
"""
from __future__ import annotations
def all( # pylint: disable=W0622
x: array, /, *, axis: None | int | tuple[int, ...] = None, keepdims: bool = False
) -> array:
"""
Tests whether all input array elements evaluate to `True` along a specified
axis.
Args:
x: Input array.
axis: Axis or axes along which to perform a logical AND reduction. By
default, a logical AND reduction is performed over the entire
array. If a tuple of integers, logical AND reductions are performed
over multiple axes. A valid `axis` must be an integer on the
interval `[-N, N)`, where `N` is the rank (number of dimensions) of
`x`. If an `axis` is specified as a negative integer, the function
must determine the axis along which to perform a reduction by
counting backward from the last dimension (where -1 refers to the
last dimension). If provided an invalid `axis`, the function raises
an exception.
keepdims: If `True`, the reduced axes (dimensions) are included in the
result as singleton dimensions, and, accordingly, the result is
broadcastable with the input array. Otherwise, if `False`, the
reduced axes (dimensions) are not included in the result.
Returns:
If a logical AND reduction was performed over the entire array, the
returned array is a zero-dimensional array containing the test result;
otherwise, the returned array is a non-zero-dimensional array
containing the test results. The returned array has data type
`np.bool_`.
https://data-apis.org/array-api/latest/API_specification/generated/array_api.all.html
"""
| # BSD 3-Clause License; see https://github.com/scikit-hep/ragged/blob/main/LICENSE
"""
https://data-apis.org/array-api/latest/API_specification/utility_functions.html
"""
from __future__ import annotations
def all( # pylint: disable=W0622
x: array, /, *, axis: None | int | tuple[int, ...] = None, keepdims: bool = False
) -> array:
"""
Tests whether all input array elements evaluate to `True` along a specified
axis.
Args:
x: Input array.
axis: Axis or axes along which to perform a logical AND reduction. By
default, a logical AND reduction is performed over the entire
array. If a tuple of integers, logical AND reductions are performed
over multiple axes. A valid `axis` must be an integer on the
interval `[-N, N)`, where `N` is the rank (number of dimensions) of
`x`. If an `axis` is specified as a negative integer, the function
must determine the axis along which to perform a reduction by
counting backward from the last dimension (where -1 refers to the
last dimension). If provided an invalid `axis`, the function raises
an exception.
keepdims: If `True`, the reduced axes (dimensions) are included in the
result as singleton dimensions, and, accordingly, the result is
broadcastable with the input array. Otherwise, if `False`, the
reduced axes (dimensions) are not included in the result.
Returns:
If a logical AND reduction was performed over the entire array, the
returned array is a zero-dimensional array containing the test result;
otherwise, the returned array is a non-zero-dimensional array
containing the test results. The returned array has data type
`np.bool_`.
https://data-apis.org/array-api/latest/API_specification/generated/array_api.all.html
"""
| axis = _regularize_axis(axis, x.ndim) | 3 | 2023-12-26 10:53:35+00:00 | 12k |
see2023/Bert-VITS2-ext | oldVersion/V111/text/chinese.py | [
{
"identifier": "punctuation",
"path": "oldVersion/V111/text/symbols.py",
"snippet": ""
},
{
"identifier": "ToneSandhi",
"path": "oldVersion/V111/text/tone_sandhi.py",
"snippet": "class ToneSandhi:\n def __init__(self):\n self.must_neural_tone_words = {\n \"麻烦\",\n ... | import os
import re
import cn2an
import jieba.posseg as psg
from pypinyin import lazy_pinyin, Style
from .symbols import punctuation
from .tone_sandhi import ToneSandhi
from text import chinese_bert
from text.chinese_bert import get_bert_feature | 7,673 |
current_file_path = os.path.dirname(__file__)
pinyin_to_symbol_map = {
line.split("\t")[0]: line.strip().split("\t")[1]
for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
}
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
tone_modifier = ToneSandhi()
def replace_punctuation(text):
text = text.replace("嗯", "恩").replace("呣", "母")
pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys()))
replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
replaced_text = re.sub(
|
current_file_path = os.path.dirname(__file__)
pinyin_to_symbol_map = {
line.split("\t")[0]: line.strip().split("\t")[1]
for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
}
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
tone_modifier = ToneSandhi()
def replace_punctuation(text):
text = text.replace("嗯", "恩").replace("呣", "母")
pattern = re.compile("|".join(re.escape(p) for p in rep_map.keys()))
replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
replaced_text = re.sub( | r"[^\u4e00-\u9fa5" + "".join(punctuation) + r"]+", "", replaced_text | 0 | 2023-12-27 03:09:11+00:00 | 12k |
chinhsuanwu/ifusion-threestudio | extern/ldm_zero123/models/diffusion/ddpm.py | [
{
"identifier": "AutoencoderKL",
"path": "extern/ldm_zero123/models/autoencoder.py",
"snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"i... | import itertools
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
from contextlib import contextmanager, nullcontext
from functools import partial
from einops import rearrange, repeat
from omegaconf import ListConfig
from pytorch_lightning.utilities.rank_zero import rank_zero_only
from torch.optim.lr_scheduler import LambdaLR
from torchvision.utils import make_grid
from tqdm import tqdm
from extern.ldm_zero123.models.autoencoder import (
AutoencoderKL,
IdentityFirstStage,
VQModelInterface,
)
from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler
from extern.ldm_zero123.modules.attention import CrossAttention
from extern.ldm_zero123.modules.diffusionmodules.util import (
extract_into_tensor,
make_beta_schedule,
noise_like,
)
from extern.ldm_zero123.modules.distributions.distributions import (
DiagonalGaussianDistribution,
normal_kl,
)
from extern.ldm_zero123.modules.ema import LitEma
from extern.ldm_zero123.util import (
count_params,
default,
exists,
instantiate_from_config,
isimage,
ismap,
log_txt_as_img,
mean_flat,
) | 10,504 | """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(
self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.0,
v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.0,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.0,
make_it_fit=False,
ucg_training=None,
):
super().__init__()
assert parameterization in [
"eps",
"x0",
], 'currently only supporting "eps" and "x0"'
self.parameterization = parameterization
print(
f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode"
)
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
| """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(
self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.0,
v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.0,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.0,
make_it_fit=False,
ucg_training=None,
):
super().__init__()
assert parameterization in [
"eps",
"x0",
], 'currently only supporting "eps" and "x0"'
self.parameterization = parameterization
print(
f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode"
)
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key) | count_params(self.model, verbose=True) | 11 | 2023-12-27 20:30:33+00:00 | 12k |
RaceCrewAI/gt-telem | gt_telem/turismo_client.py | [
{
"identifier": "PlayStationNotFoundError",
"path": "gt_telem/errors/playstation_errors.py",
"snippet": "class PlayStationNotFoundError(Exception):\n def __init__(self, message=\"Playstation not found on this network.\"):\n super().__init__(message)"
},
{
"identifier": "PlayStatonOnSta... | import asyncio
import copy
import logging
import socket
import threading
from collections import deque
from time import sleep
from gt_telem.errors.playstation_errors import (PlayStationNotFoundError,
PlayStatonOnStandbyError)
from gt_telem.models.helpers import SpanReader
from gt_telem.models.telemetry import Telemetry
from gt_telem.net.crypto import PDEncyption
from gt_telem.net.device_discover import get_ps_ip_type | 7,953 | loop.run_forever()
except KeyboardInterrupt:
loop.stop()
self._cancellation_token.set()
finally:
# Clean up any resources here if needed
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
async def run_async(self, cancellation_token: asyncio.Event=None) -> None:
"""
Asynchronously start the telemetry client and run the event loop.
Parameters:
- cancellation_token (asyncio.Event): Set token to shut down threads and return from run()
"""
self._cancellation_token = cancellation_token or asyncio.Event()
loop = asyncio.get_event_loop()
heartbeat_task = loop.create_task(self._send_heartbeat())
listen_task = loop.create_task(self._listen(loop))
# Run the tasks in the event loop
try:
await asyncio.gather(heartbeat_task, listen_task)
except KeyboardInterrupt:
self._cancellation_token.set()
loop.stop()
finally:
# Clean up any resources here if needed
await loop.shutdown_asyncgens()
async def _send_heartbeat(self) -> None:
"""
Send heartbeat messages at regular intervals to keep the telemetry stream alive.
"""
logging.info("Starting telemetry heartbeat.")
msg: bytes = b"A"
while not self._cancellation_token.is_set():
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.sendto(msg, (self.ip_addr, self.RECEIVE_PORT))
udp_socket.close()
await asyncio.sleep(10)
async def _listen(self, loop: asyncio.AbstractEventLoop) -> None:
"""
Listen for incoming telemetry data.
Parameters:
- loop: The asyncio event loop.
"""
logging.info(f"Listening for data on {self.ip_addr}:{self.BIND_PORT}")
class MyDatagramProtocol(asyncio.DatagramProtocol):
def __init__(self, client):
self.client = client
def datagram_received(self, data, addr):
self.client._handle_data(data)
udp_socket, _ = await loop.create_datagram_endpoint(
lambda: MyDatagramProtocol(self),
local_addr=("0.0.0.0", self.BIND_PORT)
)
await self._cancellation_token.wait()
udp_socket.close()
async def _process_telemetry_callbacks(self):
"""
Process telemetry callbacks.
"""
self._processing_callbacks = True
while True:
try:
# Wait for the next telemetry update callback
telemetry_value = await self._telem_callback_queue.get()
# Call the user-provided callback
for cb, args in self._telem_update_callbacks.items():
if args:
await cb(telemetry_value, *args)
else:
await cb(telemetry_value)
# Optionally introduce a delay here if needed
await asyncio.sleep(1 / 60) # 60 Hz update rate
except asyncio.CancelledError:
# The task is cancelled when the event loop is stopped
break
except Exception as e:
# Handle exceptions during callback processing
logging.error(f"Error processing telemetry {cb}: {e}")
self._processing_callbacks = False
def _handle_data(self, data: bytes) -> None:
"""
Handle incoming telemetry data.
Parameters:
- data: Raw telemetry data.
"""
try:
message: bytes = self._crypto.decrypt(data)
except Exception as e:
logging.debug(f"Failed to decrypt. Error: {e}. Wrong system?")
return
# First 4 bytes are header and indicate which system this is
try:
header: str = message[:4].decode("ascii")
except Exception as e:
logging.debug(f"Not sure what this is \n{message[:4]}. Error: {e}")
return
message: bytes = message[4:]
if not header in ["0S7G", "G6S0"]:
# bad data
logging.debug(f"Not sure what this is \n{header}")
return
if header == "0S7G":
|
class TurismoClient:
RECEIVE_PORT = 33339
BIND_PORT = 33340
def __init__(self, is_gt7: bool=True, ps_ip: str=None):
"""
Initialize TurismoClient.
Parameters:
- is_gt7 (bool): Flag indicating whether it's Gran Turismo 7. Default is True.
- ps_ip (str): PlayStation IP address. If None, it will be discovered.
"""
self._cancellation_token = None
ip, ps = get_ps_ip_type()
ip = ip or ps_ip
if not ip:
raise PlayStationNotFoundError()
if ps and "STANDBY" in ps:
raise PlayStatonOnStandbyError(ip)
logging.info(f"Using the {ps} at {ip}")
self.ip_addr: str = ip
if is_gt7:
self.RECEIVE_PORT += 400
self.BIND_PORT += 400
self._crypto: PDEncyption = PDEncyption(is_gt7)
self._telem_lock: threading.Lock = threading.Lock()
# Thread for when run w/o wait:
self._loop_thread = threading.Thread(target=self._run_forever_threaded)
self._telem: Telemetry = None
self._telem_update_callbacks = {}
self._telem_callback_queue = asyncio.LifoQueue(maxsize=1)
self._processing_callbacks = False
@property
def telemetry(self) -> Telemetry:
"""
Get a copy of the telemetry data.
Returns:
Telemetry: A copy of the telemetry data.
"""
if not self._telem:
return None
with self._telem_lock:
cpy: Telemetry = copy.deepcopy(self._telem)
return cpy
@telemetry.setter
def telemetry(self, value: Telemetry) -> None:
"""
Set the telemetry data and call any registered callbacks.
Parameters:
- value (Telemetry): Telemetry data to set.
"""
with self._telem_lock:
self._telem = value
try:
self._telem_callback_queue.put_nowait(value)
except asyncio.QueueFull:
self._telem_callback_queue.get_nowait()
self._telem_callback_queue.put_nowait(value)
if not self._processing_callbacks:
asyncio.create_task(self._process_telemetry_callbacks())
def register_callback(self, callback, args=None):
"""
Register an awaitable callback to be invoked when new telemetry is received.
The telemetry object is sent as the first parameter, and additional
args can be passed if specified.
Callbacks are executed off the main thread, potentially compromising
state integrity (e.g., using `self.` within your callback won't work).
To work around this limitation, declare your callback as a @staticmethod,
pass the class instance (self) as an argument, and receive the context of
the class in your parameters (after telemetry, which is the first).
.. code-block:: python
def __init__(self, tc: TurismoClient):
tc.add_callback(MyClass.parse_telem, [self])
@staticmethod
async def parse_telem(t: Telemetry, context: MyClass):
self = context
Additionally, note that the game sends telemetry at the same frequency as
the frame rate (~60/s). If your callback takes too long to process and exit,
subsequent callbacks will not be invoked until it returns.
"""
self._telem_update_callbacks[callback] = args
def deregister_callback(self, callback):
"""
Deregister a callback.
Parameters:
- callback: Callback to deregister.
"""
self._telem_update_callbacks.pop(callback)
def start(self):
self._loop_thread.start()
def stop(self):
self._cancellation_token.set()
self._loop_thread.join()
def _run_forever_threaded(self, cancellation_token: asyncio.Event=None) -> None:
"""
Start the telemetry client and return immediately. Must provide cancellation token.
Parameters:
- cancellation_token (asyncio.Event): Set token to shut down threads and return from run()
"""
asyncio.run(self.run_async(cancellation_token))
def run(self, cancellation_token: asyncio.Event=None) -> None:
"""
Start the telemetry client and run the event loop. Blocking.
Parameters:
- cancellation_token (asyncio.Event): Set token to shut down threads and return from run()
"""
self._cancellation_token = cancellation_token or asyncio.Event()
loop = asyncio.get_event_loop()
heartbeat_task = loop.create_task(self._send_heartbeat())
listen_task = loop.create_task(self._listen(loop))
# Run the tasks in the event loop
try:
loop.run_forever()
except KeyboardInterrupt:
loop.stop()
self._cancellation_token.set()
finally:
# Clean up any resources here if needed
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
async def run_async(self, cancellation_token: asyncio.Event=None) -> None:
"""
Asynchronously start the telemetry client and run the event loop.
Parameters:
- cancellation_token (asyncio.Event): Set token to shut down threads and return from run()
"""
self._cancellation_token = cancellation_token or asyncio.Event()
loop = asyncio.get_event_loop()
heartbeat_task = loop.create_task(self._send_heartbeat())
listen_task = loop.create_task(self._listen(loop))
# Run the tasks in the event loop
try:
await asyncio.gather(heartbeat_task, listen_task)
except KeyboardInterrupt:
self._cancellation_token.set()
loop.stop()
finally:
# Clean up any resources here if needed
await loop.shutdown_asyncgens()
async def _send_heartbeat(self) -> None:
"""
Send heartbeat messages at regular intervals to keep the telemetry stream alive.
"""
logging.info("Starting telemetry heartbeat.")
msg: bytes = b"A"
while not self._cancellation_token.is_set():
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.sendto(msg, (self.ip_addr, self.RECEIVE_PORT))
udp_socket.close()
await asyncio.sleep(10)
async def _listen(self, loop: asyncio.AbstractEventLoop) -> None:
"""
Listen for incoming telemetry data.
Parameters:
- loop: The asyncio event loop.
"""
logging.info(f"Listening for data on {self.ip_addr}:{self.BIND_PORT}")
class MyDatagramProtocol(asyncio.DatagramProtocol):
def __init__(self, client):
self.client = client
def datagram_received(self, data, addr):
self.client._handle_data(data)
udp_socket, _ = await loop.create_datagram_endpoint(
lambda: MyDatagramProtocol(self),
local_addr=("0.0.0.0", self.BIND_PORT)
)
await self._cancellation_token.wait()
udp_socket.close()
async def _process_telemetry_callbacks(self):
"""
Process telemetry callbacks.
"""
self._processing_callbacks = True
while True:
try:
# Wait for the next telemetry update callback
telemetry_value = await self._telem_callback_queue.get()
# Call the user-provided callback
for cb, args in self._telem_update_callbacks.items():
if args:
await cb(telemetry_value, *args)
else:
await cb(telemetry_value)
# Optionally introduce a delay here if needed
await asyncio.sleep(1 / 60) # 60 Hz update rate
except asyncio.CancelledError:
# The task is cancelled when the event loop is stopped
break
except Exception as e:
# Handle exceptions during callback processing
logging.error(f"Error processing telemetry {cb}: {e}")
self._processing_callbacks = False
def _handle_data(self, data: bytes) -> None:
"""
Handle incoming telemetry data.
Parameters:
- data: Raw telemetry data.
"""
try:
message: bytes = self._crypto.decrypt(data)
except Exception as e:
logging.debug(f"Failed to decrypt. Error: {e}. Wrong system?")
return
# First 4 bytes are header and indicate which system this is
try:
header: str = message[:4].decode("ascii")
except Exception as e:
logging.debug(f"Not sure what this is \n{message[:4]}. Error: {e}")
return
message: bytes = message[4:]
if not header in ["0S7G", "G6S0"]:
# bad data
logging.debug(f"Not sure what this is \n{header}")
return
if header == "0S7G": | sr: SpanReader = SpanReader(message, "little") | 2 | 2023-12-23 03:37:54+00:00 | 12k |
gardenifi/server | app/raspi/services.py | [
{
"identifier": "DayValueException",
"path": "app/raspi/exceptions.py",
"snippet": "class DayValueException(Exception):\n \"\"\"Specific exception definition.\"\"\"\n\n def __init__(self, argument_name):\n self.argument_name = argument_name\n super().__init__(f\"Day is not correct: {... | import json
from threading import Thread
from os import path, remove
from loguru import logger
from apscheduler.triggers.combining import OrTrigger
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from app.raspi.exceptions import DayValueException
from app.raspi.const import (
DAYS,
PROGRAM,
PROGRAM_EXT,
RPI_HW_ID,
ARCH,
MQTT_HOST,
MQTT_PORT,
MQTT_USER,
MQTT_PASS,
MAX_NUM_OF_BYTES_CHUNK,
MAX_NUM_OF_BUFFER_TO_ADD,
)
from app.raspi.helpers import Helpers | 7,326 | if adjusted_hour <= 0:
days_passed = -1
elif adjusted_hour >= 24:
days_passed = 1
else:
days_passed = 0
adjusted_hour = adjusted_hour % 24
return adjusted_hour, days_passed
def get_previous_day(self, current_day):
"""
Returns the name of the previous day based on the given current day.
Parameters:
- current_day (str): The name of the current day (e.g., 'mon').
Returns:
str: The name of the previous day.
"""
# Find the index of the current day
current_index = DAYS.index(current_day)
# Calculate the index of the previous day
previous_index = (current_index - 1) % len(DAYS)
# Get the name of the previous day
previous_day = DAYS[previous_index]
return previous_day
def get_next_day(self, current_day):
"""
Returns the name of the next day based on the given current day.
Parameters:
- current_day (str): The name of the current day (e.g., 'mon').
Returns:
str: The name of the next day.
"""
# Find the index of the current day
current_index = DAYS.index(current_day)
# Calculate the index of the next day
next_index = (current_index + 1) % len(DAYS)
# Get the name of the next day
next_day = DAYS[next_index]
return next_day
def get_start_day_hour(self, day, start_hour, tz_offset):
"""
Checks if the start day or hour should be adjusted based on the provided conditions.
Parameters:
- day (str): The name of the current day (e.g., 'Monday').
- start_hour (int): The original start hour (0 to 23).
- tz_offset (int): The timezone offset in hours (-12 to +14).
Returns:
tuple: A tuple containing the adjusted day and start hour based on the provided conditions.
"""
logger.info(f"Checking whether start_day should change: {day}")
# Convert start_hour to UTC (e.g. start_hour=0, tz_offset=2, start_hour=22)
start_hour, days_passed = self.convert_to_utc(start_hour, tz_offset)
if days_passed == 1:
day = self.get_next_day(day)
elif days_passed == -1:
day = self.get_previous_day(day)
logger.info(f"new start_day: {day}")
logger.info(f"new start_hour: {start_hour}")
return day, start_hour
def get_stop_datetime(self, day, start_hour, start_min, period):
"""
Calculate the stop time for a program cycle.
Parameters:
- day (str): The day of the week.
- start_hour (int): The starting hour.
- start_min (int): The starting minute.
- period (int): The duration of the cycle in minutes.
Returns:
tuple: A tuple containing the stop day, stop hour, and stop minute.
"""
logger.debug(f"Converting to correct day, start, stop: {day}, {start_hour}, {start_min}, {period}")
stop_day_index = DAYS.index(day)
logger.debug(f"stop_day_index {stop_day_index}")
stop_min = (start_min + period) % 60
logger.debug(f"stop_min {stop_min}")
if stop_min < start_min:
# should go to the next hour
stop_hour = (start_hour + 1) % 24
# should go to the next day
if stop_hour < start_hour:
stop_day_index = (stop_day_index + 1) % 7
else:
stop_hour = start_hour
logger.debug(f"stop_hour {stop_hour}")
stop_day = DAYS[stop_day_index]
logger.debug(f"stop_day: {stop_day}")
return stop_day, stop_hour, stop_min
def store_program_cycles(self, json_data, store=False) -> None:
"""
Store program cycles and schedule them using the scheduler.
Parameters:
- json_data (dict): JSON data containing program information.
- store (bool, optional): Whether to store the program information. Default is False.
Returns:
None
"""
try:
triggers_to_start = []
triggers_to_stop = []
for day in json_data["days"].split(","):
if day not in DAYS:
| """MIT License
Copyright (c) 2023, Marios Karagiannopoulos
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
**Attribution Requirement:**
When using or distributing the software, an attribution to Marios Karagiannopoulos must be included.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# pylint: disable=too-many-locals
class Services:
"""
The `Services` class provides various methods for managing and controlling
services related to a Raspberry Pi device, such as turning on/off valves,
storing and deleting program cycles, loading program cycles, discovering
WiFi networks, and saving WiFi network configurations.
"""
def __init__(self):
"""Constructor"""
self._scheduler = BackgroundScheduler()
self._scheduler_started = False
@property
def scheduler_started(self):
"""getter"""
return self._scheduler_started
@scheduler_started.setter
def scheduler_started(self, value):
"""setter"""
self._scheduler_started = value
@property
def scheduler(self):
"""getter"""
return self._scheduler
@scheduler.setter
def scheduler(self, value):
"""setter"""
self._scheduler = value
def turn_on_from_program(self, valve):
"""
Turn on a valve based on the program.
Parameters:
- valve (int): The valve number.
Returns:
None
"""
return Helpers().toggle(2, "out" + str(valve))
def turn_off_from_program(self, valve):
"""
Turn off a valve based on the program.
Parameters:
- valve (int): The valve number.
Returns:
None
"""
return Helpers().toggle(0, "out" + str(valve))
def convert_to_utc(self, start_hour, tz_offset):
"""
Converts a given start hour in a specific time zone to Coordinated Universal Time (UTC).
Args:
start_hour (int): The starting hour in the local time zone.
tz_offset (int): The time zone offset in hours. Positive values for time zones ahead of UTC,
negative values for time zones behind UTC.
Returns:
Tuple[int, int]: A tuple containing the adjusted hour in UTC and the number of days passed.
The adjusted hour is in the range [0, 23], and the days_passed is -1, 0, or 1
indicating whether the adjusted hour falls before, within, or after the current day.
Example:
For a local start_hour of 10 and tz_offset of -5 (Eastern Standard Time),
convert_to_utc(10, -5) may return (5, 0), indicating that the adjusted UTC hour is 5 with no days passed.
Note:
The method assumes a 24-hour clock format.
"""
logger.info(f"Checking whether start_hour should change: {start_hour}, tz_offset: {tz_offset}")
# Calculate the adjusted hour
adjusted_hour = start_hour - tz_offset
if adjusted_hour <= 0:
days_passed = -1
elif adjusted_hour >= 24:
days_passed = 1
else:
days_passed = 0
adjusted_hour = adjusted_hour % 24
return adjusted_hour, days_passed
def get_previous_day(self, current_day):
"""
Returns the name of the previous day based on the given current day.
Parameters:
- current_day (str): The name of the current day (e.g., 'mon').
Returns:
str: The name of the previous day.
"""
# Find the index of the current day
current_index = DAYS.index(current_day)
# Calculate the index of the previous day
previous_index = (current_index - 1) % len(DAYS)
# Get the name of the previous day
previous_day = DAYS[previous_index]
return previous_day
def get_next_day(self, current_day):
"""
Returns the name of the next day based on the given current day.
Parameters:
- current_day (str): The name of the current day (e.g., 'mon').
Returns:
str: The name of the next day.
"""
# Find the index of the current day
current_index = DAYS.index(current_day)
# Calculate the index of the next day
next_index = (current_index + 1) % len(DAYS)
# Get the name of the next day
next_day = DAYS[next_index]
return next_day
def get_start_day_hour(self, day, start_hour, tz_offset):
"""
Checks if the start day or hour should be adjusted based on the provided conditions.
Parameters:
- day (str): The name of the current day (e.g., 'Monday').
- start_hour (int): The original start hour (0 to 23).
- tz_offset (int): The timezone offset in hours (-12 to +14).
Returns:
tuple: A tuple containing the adjusted day and start hour based on the provided conditions.
"""
logger.info(f"Checking whether start_day should change: {day}")
# Convert start_hour to UTC (e.g. start_hour=0, tz_offset=2, start_hour=22)
start_hour, days_passed = self.convert_to_utc(start_hour, tz_offset)
if days_passed == 1:
day = self.get_next_day(day)
elif days_passed == -1:
day = self.get_previous_day(day)
logger.info(f"new start_day: {day}")
logger.info(f"new start_hour: {start_hour}")
return day, start_hour
def get_stop_datetime(self, day, start_hour, start_min, period):
"""
Calculate the stop time for a program cycle.
Parameters:
- day (str): The day of the week.
- start_hour (int): The starting hour.
- start_min (int): The starting minute.
- period (int): The duration of the cycle in minutes.
Returns:
tuple: A tuple containing the stop day, stop hour, and stop minute.
"""
logger.debug(f"Converting to correct day, start, stop: {day}, {start_hour}, {start_min}, {period}")
stop_day_index = DAYS.index(day)
logger.debug(f"stop_day_index {stop_day_index}")
stop_min = (start_min + period) % 60
logger.debug(f"stop_min {stop_min}")
if stop_min < start_min:
# should go to the next hour
stop_hour = (start_hour + 1) % 24
# should go to the next day
if stop_hour < start_hour:
stop_day_index = (stop_day_index + 1) % 7
else:
stop_hour = start_hour
logger.debug(f"stop_hour {stop_hour}")
stop_day = DAYS[stop_day_index]
logger.debug(f"stop_day: {stop_day}")
return stop_day, stop_hour, stop_min
def store_program_cycles(self, json_data, store=False) -> None:
"""
Store program cycles and schedule them using the scheduler.
Parameters:
- json_data (dict): JSON data containing program information.
- store (bool, optional): Whether to store the program information. Default is False.
Returns:
None
"""
try:
triggers_to_start = []
triggers_to_stop = []
for day in json_data["days"].split(","):
if day not in DAYS: | raise DayValueException(f"{day} is not correct! Accepted values: {DAYS}") | 0 | 2023-12-22 08:06:09+00:00 | 12k |
bclavie/RAGatouille | ragatouille/RAGTrainer.py | [
{
"identifier": "LateInteractionModel",
"path": "ragatouille/models/base.py",
"snippet": "class LateInteractionModel(ABC):\n @abstractmethod\n def __init__(\n self,\n pretrained_model_name_or_path: Union[str, Path],\n n_gpu,\n ):\n ...\n\n @abstractmethod\n def... | from pathlib import Path
from typing import Union, Literal, Optional
from colbert.infra import ColBERTConfig
from ragatouille.models import LateInteractionModel, ColBERT
from ragatouille.negative_miners import HardNegativeMiner, SimpleMiner
from ragatouille.utils import seeded_shuffle
from ragatouille.data import TrainingDataProcessor | 7,472 |
class RAGTrainer:
"""Main trainer to fine-tune/train ColBERT models with a few lines."""
model: Union[LateInteractionModel, None] = None
negative_miner: Union[HardNegativeMiner, None] = None
collection: list[str] = []
queries: Union[list[str], None] = None
raw_data: Union[list[tuple], list[list], None] = None
training_triplets: list[list[int]] = list()
def __init__(
self,
model_name: str,
pretrained_model_name: str,
language_code: str = "en",
n_usable_gpus: int = -1,
):
"""
Initialise a RAGTrainer instance. This will load a base model: either an existing ColBERT model to fine-tune or a BERT/RoBERTa-like model to build a new ColBERT model from.
Parameters:
model_name: str - Name of the model to train. This will be used to name the checkpoints and the index.
pretrained_model_name: str - Name of the pretrained model to use as a base. Can be a local path to a checkpoint or a huggingface model name.
language_code: str - Language code of the model to train. This will be used to name the checkpoints and the index.
n_usable_gpus: int - Number of GPUs to use. By default, value is -1, which means use all available GPUs or none if no GPU is available.
Returns:
self (RAGTrainer): The current instance of RAGTrainer, with the base model initialised.
"""
self.model_name = model_name
self.pretrained_model_name = pretrained_model_name
self.language_code = language_code
self.model = ColBERT(
pretrained_model_name_or_path=pretrained_model_name, n_gpu=n_usable_gpus
)
def add_documents(self, documents: list[str]):
self.collection += documents
seeded_shuffle(self.collection)
def export_training_data(self, path: Union[str, Path]):
"""
Manually export the training data processed by prepare_training_data to a given path.
Parameters:
path: Union[str, Path] - Path to the directory where the data will be exported."""
self.data_processor.export_training_data(path)
def prepare_training_data(
self,
raw_data: Union[list[tuple], list[list]],
all_documents: Optional[list[str]] = None,
data_out_path: Union[str, Path] = "./data/",
num_new_negatives: int = 10,
hard_negative_minimum_rank: int = 10,
mine_hard_negatives: bool = True,
hard_negative_model_size: str = "small",
pairs_with_labels: bool = False,
positive_label: Union[int, str] = 1,
negative_label: Union[int, str] = 0,
) -> str:
"""
Fully pre-process input-data in various raw formats into ColBERT-ready files and triplets.
Will accept a variety of formats, such as unannotated pairs, annotated pairs, triplets of strings and triplets of list of strings.
Will process into a ColBERT-ready format and export to data_out_path.
Will generate hard negatives if mine_hard_negatives is True.
num_new_negatives decides how many negatives will be generated. if mine_hard_negatives is False and num_new_negatives is > 0, these negatives will be randomly sampled.
Parameters:
raw_data: Union[list[tuple], list[list]] - List of pairs, annotated pairs, or triplets of strings.
all_documents: Optional[list[str]] - A corpus of documents to be used for sampling negatives.
data_out_path: Union[str, Path] - Path to the directory where the data will be exported (can be a tmp directory).
num_new_negatives: int - Number of new negatives to generate for each query.
mine_hard_negatives: bool - Whether to use hard negatives mining or not.
hard_negative_model_size: str - Size of the model to use for hard negatives mining.
pairs_with_labels: bool - Whether the raw_data is a list of pairs with labels or not.
positive_label: Union[int, str] - Label to use for positive pairs.
negative_label: Union[int, str] - Label to use for negative pairs.
Returns:
data_out_path: Union[str, Path] - Path to the directory where the data has been exported.
"""
if all_documents is not None:
self.collection += all_documents
self.data_dir = Path(data_out_path)
if len(raw_data[0]) == 2:
data_type = "pairs"
if pairs_with_labels:
data_type = "labeled_pairs"
elif len(raw_data[0]) == 3:
data_type = "triplets"
else:
raise ValueError("Raw data must be a list of pairs or triplets of strings.")
self.collection += [x[1] for x in raw_data]
if data_type == "triplets":
self.collection += [x[2] for x in raw_data]
self.queries = set([x[0] for x in raw_data])
self.collection = list(set(self.collection))
seeded_shuffle(self.collection)
if mine_hard_negatives:
self.negative_miner = SimpleMiner(
language_code=self.language_code,
model_size=hard_negative_model_size,
)
self.negative_miner.build_index(self.collection)
|
class RAGTrainer:
"""Main trainer to fine-tune/train ColBERT models with a few lines."""
model: Union[LateInteractionModel, None] = None
negative_miner: Union[HardNegativeMiner, None] = None
collection: list[str] = []
queries: Union[list[str], None] = None
raw_data: Union[list[tuple], list[list], None] = None
training_triplets: list[list[int]] = list()
def __init__(
self,
model_name: str,
pretrained_model_name: str,
language_code: str = "en",
n_usable_gpus: int = -1,
):
"""
Initialise a RAGTrainer instance. This will load a base model: either an existing ColBERT model to fine-tune or a BERT/RoBERTa-like model to build a new ColBERT model from.
Parameters:
model_name: str - Name of the model to train. This will be used to name the checkpoints and the index.
pretrained_model_name: str - Name of the pretrained model to use as a base. Can be a local path to a checkpoint or a huggingface model name.
language_code: str - Language code of the model to train. This will be used to name the checkpoints and the index.
n_usable_gpus: int - Number of GPUs to use. By default, value is -1, which means use all available GPUs or none if no GPU is available.
Returns:
self (RAGTrainer): The current instance of RAGTrainer, with the base model initialised.
"""
self.model_name = model_name
self.pretrained_model_name = pretrained_model_name
self.language_code = language_code
self.model = ColBERT(
pretrained_model_name_or_path=pretrained_model_name, n_gpu=n_usable_gpus
)
def add_documents(self, documents: list[str]):
self.collection += documents
seeded_shuffle(self.collection)
def export_training_data(self, path: Union[str, Path]):
"""
Manually export the training data processed by prepare_training_data to a given path.
Parameters:
path: Union[str, Path] - Path to the directory where the data will be exported."""
self.data_processor.export_training_data(path)
def prepare_training_data(
self,
raw_data: Union[list[tuple], list[list]],
all_documents: Optional[list[str]] = None,
data_out_path: Union[str, Path] = "./data/",
num_new_negatives: int = 10,
hard_negative_minimum_rank: int = 10,
mine_hard_negatives: bool = True,
hard_negative_model_size: str = "small",
pairs_with_labels: bool = False,
positive_label: Union[int, str] = 1,
negative_label: Union[int, str] = 0,
) -> str:
"""
Fully pre-process input-data in various raw formats into ColBERT-ready files and triplets.
Will accept a variety of formats, such as unannotated pairs, annotated pairs, triplets of strings and triplets of list of strings.
Will process into a ColBERT-ready format and export to data_out_path.
Will generate hard negatives if mine_hard_negatives is True.
num_new_negatives decides how many negatives will be generated. if mine_hard_negatives is False and num_new_negatives is > 0, these negatives will be randomly sampled.
Parameters:
raw_data: Union[list[tuple], list[list]] - List of pairs, annotated pairs, or triplets of strings.
all_documents: Optional[list[str]] - A corpus of documents to be used for sampling negatives.
data_out_path: Union[str, Path] - Path to the directory where the data will be exported (can be a tmp directory).
num_new_negatives: int - Number of new negatives to generate for each query.
mine_hard_negatives: bool - Whether to use hard negatives mining or not.
hard_negative_model_size: str - Size of the model to use for hard negatives mining.
pairs_with_labels: bool - Whether the raw_data is a list of pairs with labels or not.
positive_label: Union[int, str] - Label to use for positive pairs.
negative_label: Union[int, str] - Label to use for negative pairs.
Returns:
data_out_path: Union[str, Path] - Path to the directory where the data has been exported.
"""
if all_documents is not None:
self.collection += all_documents
self.data_dir = Path(data_out_path)
if len(raw_data[0]) == 2:
data_type = "pairs"
if pairs_with_labels:
data_type = "labeled_pairs"
elif len(raw_data[0]) == 3:
data_type = "triplets"
else:
raise ValueError("Raw data must be a list of pairs or triplets of strings.")
self.collection += [x[1] for x in raw_data]
if data_type == "triplets":
self.collection += [x[2] for x in raw_data]
self.queries = set([x[0] for x in raw_data])
self.collection = list(set(self.collection))
seeded_shuffle(self.collection)
if mine_hard_negatives:
self.negative_miner = SimpleMiner(
language_code=self.language_code,
model_size=hard_negative_model_size,
)
self.negative_miner.build_index(self.collection)
| self.data_processor = TrainingDataProcessor( | 5 | 2023-12-29 16:26:42+00:00 | 12k |
shibing624/chatgpt-webui | main.py | [
{
"identifier": "http_proxy",
"path": "src/config.py",
"snippet": "def retrieve_openai_api(api_key=None):\ndef retrieve_proxy(proxy=None):\ndef update_doc_config(two_column_pdf):"
},
{
"identifier": "get_model",
"path": "src/models.py",
"snippet": "def get_model(\n model_name,\n ... | import gradio as gr
from loguru import logger
from src.config import (
http_proxy,
hide_history_when_not_logged_in,
chat_name_method_index,
my_api_key, multi_api_key, server_name,
server_port, share, config_file, api_host,
authflag,
dockerflag,
show_api_billing,
latex_delimiters_set,
user_avatar, bot_avatar,
update_doc_config,
)
from src.models import get_model
from src.overwrites import (
postprocess, postprocess_chat_messages,
reload_javascript, get_html,
)
from src.presets import (
MODELS,
HISTORY_NAME_METHODS,
small_and_beautiful_theme,
CONCURRENT_COUNT,
CHUANHU_TITLE,
HIDE_MY_KEY,
DEFAULT_MODEL,
REPLY_LANGUAGES,
INITIAL_SYSTEM_PROMPT,
ENABLE_STREAMING_OPTION,
CHUANHU_DESCRIPTION,
favicon_path,
API_HOST,
HISTORY_DIR,
assets_path,
)
from src.utils import (
delete_chat_history,
filter_history,
get_history_list,
auto_name_chat_history,
get_template_dropdown,
rename_chat_history,
init_history_list,
get_first_history_name,
setup_wizard,
auth_from_conf,
get_geoip,
get_template_names,
load_template,
get_history_names,
reset,
predict,
interrupt,
retry,
i18n,
dislike,
toggle_like_btn_visibility,
set_key,
set_single_turn,
hide_middle_chars,
set_system_prompt,
start_outputing,
set_token_upper_limit,
set_temperature,
set_user_identifier,
set_top_p,
delete_first_conversation,
delete_last_conversation,
set_n_choices,
set_logit_bias,
load_chat_history,
end_outputing,
set_max_tokens,
reset_default,
reset_textbox,
set_stop_sequence,
set_presence_penalty, set_frequency_penalty,
upload_chat_history,
export_markdown,
billing_info,
get_template_content,
like,
transfer_input,
handle_file_upload,
handle_summarize_index,
) | 7,923 | single_turn_checkbox.change(
set_single_turn, [current_model, single_turn_checkbox], None, show_progress=False)
model_select_dropdown.change(get_model,
[model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider,
top_p_slider, systemPromptTxt, user_name, current_model], [
current_model, status_display, chatbot, lora_select_dropdown, user_api_key,
keyTxt], show_progress=True, api_name="get_model")
model_select_dropdown.change(toggle_like_btn_visibility, [model_select_dropdown], [
like_dislike_area], show_progress=False)
lora_select_dropdown.change(get_model,
[model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider,
top_p_slider, systemPromptTxt, user_name, current_model],
[current_model, status_display, chatbot], show_progress=True)
# Template
systemPromptTxt.input(set_system_prompt, [
current_model, systemPromptTxt], None)
templateRefreshBtn.click(get_template_dropdown, None, [
templateFileSelectDropdown])
templateFileSelectDropdown.input(
load_template,
[templateFileSelectDropdown],
[promptTemplates, templateSelectDropdown],
show_progress=True,
)
templateSelectDropdown.change(
get_template_content,
[promptTemplates, templateSelectDropdown, systemPromptTxt],
[systemPromptTxt],
show_progress=True,
)
# S&L
renameHistoryBtn.click(
rename_chat_history,
[current_model, saveFileName, chatbot],
[historySelectList],
show_progress=True,
_js='(a,b,c,d)=>{return saveChatHistory(a,b,c,d);}'
)
exportMarkdownBtn.click(
export_markdown,
[current_model, saveFileName, chatbot],
[],
show_progress=True,
)
historyRefreshBtn.click(**refresh_history_args)
historyDeleteBtn.click(delete_chat_history, [current_model, historySelectList],
[status_display, historySelectList, chatbot],
_js='(a,b,c)=>{return showConfirmationDialog(a, b, c);}').then(
reset,
inputs=[current_model, retain_system_prompt_checkbox],
outputs=[chatbot, status_display, historySelectList, systemPromptTxt],
show_progress=True,
_js='(a,b)=>{return clearChatbot(a,b);}',
)
historySelectList.input(**load_history_from_file_args)
uploadFileBtn.upload(upload_chat_history, [current_model, uploadFileBtn], [
saveFileName, systemPromptTxt, chatbot, single_turn_checkbox, temperature_slider, top_p_slider,
n_choices_slider, stop_sequence_txt, max_context_length_slider, max_generation_slider, presence_penalty_slider,
frequency_penalty_slider, logit_bias_txt, user_identifier_txt]).then(**refresh_history_args)
historyDownloadBtn.click(None, [
user_name, historySelectList], None, _js='(a,b)=>{return downloadHistory(a,b,".json");}')
historyMarkdownDownloadBtn.click(None, [
user_name, historySelectList], None, _js='(a,b)=>{return downloadHistory(a,b,".md");}')
historySearchTextbox.input(
filter_history,
[user_name, historySearchTextbox],
[historySelectList]
)
# Advanced
temperature_slider.input(
set_temperature, [current_model, temperature_slider], None, show_progress=False)
top_p_slider.input(set_top_p, [current_model, top_p_slider], None, show_progress=False)
n_choices_slider.input(
set_n_choices, [current_model, n_choices_slider], None, show_progress=False)
stop_sequence_txt.input(
set_stop_sequence, [current_model, stop_sequence_txt], None, show_progress=False)
max_context_length_slider.input(
set_token_upper_limit, [current_model, max_context_length_slider], None, show_progress=False)
max_generation_slider.input(
set_max_tokens, [current_model, max_generation_slider], None, show_progress=False)
presence_penalty_slider.input(
set_presence_penalty, [current_model, presence_penalty_slider], None, show_progress=False)
frequency_penalty_slider.input(
set_frequency_penalty, [current_model, frequency_penalty_slider], None, show_progress=False)
logit_bias_txt.input(
set_logit_bias, [current_model, logit_bias_txt], None, show_progress=False)
user_identifier_txt.input(set_user_identifier, [
current_model, user_identifier_txt], None, show_progress=False)
default_btn.click(
reset_default, [], [apihostTxt, proxyTxt, status_display], show_progress=True
)
# Invisible elements
changeSingleSessionBtn.click(
fn=lambda value: gr.Checkbox.update(value=value),
inputs=[single_turn_checkbox],
outputs=[single_turn_checkbox],
_js='(a)=>{return bgChangeSingleSession(a);}'
)
historySelectBtn.click( # This is an experimental feature... Not actually used.
fn=load_chat_history,
inputs=[current_model, historySelectList],
outputs=[saveFileName, systemPromptTxt, chatbot, single_turn_checkbox, temperature_slider, top_p_slider,
n_choices_slider, stop_sequence_txt, max_context_length_slider, max_generation_slider,
presence_penalty_slider, frequency_penalty_slider, logit_bias_txt, user_identifier_txt],
_js='(a,b)=>{return bgSelectHistory(a,b);}'
)
demo.title = CHUANHU_TITLE
if __name__ == "__main__":
reload_javascript()
setup_wizard()
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
allowed_paths=[HISTORY_DIR, assets_path],
server_name=server_name,
| # -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
"""
gr.Chatbot._postprocess_chat_messages = postprocess_chat_messages
gr.Chatbot.postprocess = postprocess
with gr.Blocks(theme=small_and_beautiful_theme) as demo:
user_name = gr.Textbox("", visible=False)
promptTemplates = gr.State(load_template(get_template_names()[0], mode=2))
user_question = gr.State("")
assert type(my_api_key) == str
user_api_key = gr.State(my_api_key)
current_model = gr.State()
topic = gr.State(i18n("未命名对话历史记录"))
with gr.Row(elem_id="chuanhu-header"):
gr.HTML(get_html("header_title.html").format(
app_title=CHUANHU_TITLE), elem_id="app-title")
status_display = gr.Markdown(get_geoip, elem_id="status-display")
with gr.Row(elem_id="float-display"):
user_info = gr.Markdown(
value="getting user info...", elem_id="user-info")
with gr.Row(equal_height=True, elem_id="chuanhu-body"):
with gr.Column(elem_id="menu-area"):
with gr.Column(elem_id="chuanhu-history"):
with gr.Box():
with gr.Row(elem_id="chuanhu-history-header"):
with gr.Row(elem_id="chuanhu-history-search-row"):
with gr.Column(min_width=150, scale=2):
historySearchTextbox = gr.Textbox(show_label=False, container=False, placeholder=i18n(
"搜索(支持正则)..."), lines=1, elem_id="history-search-tb")
with gr.Column(min_width=52, scale=1, elem_id="gr-history-header-btns"):
uploadFileBtn = gr.UploadButton(
interactive=True, label="", file_types=[".json"], elem_id="gr-history-upload-btn")
historyRefreshBtn = gr.Button("", elem_id="gr-history-refresh-btn")
with gr.Row(elem_id="chuanhu-history-body"):
with gr.Column(scale=6, elem_id="history-select-wrap"):
historySelectList = gr.Radio(
label=i18n("从列表中加载对话"),
choices=get_history_names(),
value=get_first_history_name(),
# multiselect=False,
container=False,
elem_id="history-select-dropdown"
)
with gr.Row(visible=False):
with gr.Column(min_width=42, scale=1):
historyDeleteBtn = gr.Button(
"🗑️", elem_id="gr-history-delete-btn")
with gr.Column(min_width=42, scale=1):
historyDownloadBtn = gr.Button(
"⏬", elem_id="gr-history-download-btn")
with gr.Column(min_width=42, scale=1):
historyMarkdownDownloadBtn = gr.Button(
"⤵️", elem_id="gr-history-mardown-download-btn")
with gr.Row(visible=False):
with gr.Column(scale=6):
saveFileName = gr.Textbox(
show_label=True,
placeholder=i18n("设置文件名: 默认为.json,可选为.md"),
label=i18n("设置保存文件名"),
value=i18n("对话历史记录"),
elem_classes="no-container"
# container=False,
)
with gr.Column(scale=1):
renameHistoryBtn = gr.Button(
i18n("💾 保存对话"), elem_id="gr-history-save-btn")
exportMarkdownBtn = gr.Button(
i18n("📝 导出为 Markdown"), elem_id="gr-markdown-export-btn")
with gr.Column(elem_id="chuanhu-menu-footer"):
with gr.Row(elem_id="chuanhu-func-nav"):
gr.HTML(get_html("func_nav.html"))
# gr.HTML(get_html("footer.html").format(versions=versions_html()), elem_id="footer")
# gr.Markdown(CHUANHU_DESCRIPTION, elem_id="chuanhu-author")
with gr.Column(elem_id="chuanhu-area", scale=5):
with gr.Column(elem_id="chatbot-area"):
with gr.Row(elem_id="chatbot-header"):
model_select_dropdown = gr.Dropdown(
label=i18n("选择模型"), choices=MODELS, multiselect=False, value=MODELS[DEFAULT_MODEL],
interactive=True,
show_label=False, container=False, elem_id="model-select-dropdown"
)
lora_select_dropdown = gr.Dropdown(
label=i18n("选择LoRA模型"), choices=[], multiselect=False, interactive=True,
container=False, visible=False,
)
gr.HTML(get_html("chatbot_header_btn.html").format(
json_label=i18n("历史记录(JSON)"),
md_label=i18n("导出为 Markdown")
), elem_id="chatbot-header-btn-bar")
with gr.Row():
chatbot = gr.Chatbot(
label="ChatGPT",
elem_id="chuanhu-chatbot",
latex_delimiters=latex_delimiters_set,
sanitize_html=False,
# height=700,
show_label=False,
avatar_images=[user_avatar, bot_avatar],
show_share_button=False,
)
with gr.Row(elem_id="chatbot-footer"):
with gr.Box(elem_id="chatbot-input-box"):
with gr.Row(elem_id="chatbot-input-row"):
gr.HTML(get_html("chatbot_more.html").format(
single_turn_label=i18n("单轮对话"),
websearch_label=i18n("在线搜索"),
upload_file_label=i18n("上传文件"),
uploaded_files_label=i18n("知识库文件"),
uploaded_files_tip=i18n("在工具箱中管理知识库文件")
))
with gr.Row(elem_id="chatbot-input-tb-row"):
with gr.Column(min_width=225, scale=12):
user_input = gr.Textbox(
elem_id="user-input-tb",
show_label=False,
placeholder=i18n("在这里输入"),
elem_classes="no-container",
max_lines=5,
# container=False
)
with gr.Column(min_width=42, scale=1, elem_id="chatbot-ctrl-btns"):
submitBtn = gr.Button(
value="", variant="primary", elem_id="submit-btn")
cancelBtn = gr.Button(
value="", variant="secondary", visible=False, elem_id="cancel-btn")
# Note: Buttons below are set invisible in UI. But they are used in JS.
with gr.Row(elem_id="chatbot-buttons", visible=False):
with gr.Column(min_width=120, scale=1):
emptyBtn = gr.Button(
i18n("🧹 新的对话"), elem_id="empty-btn"
)
with gr.Column(min_width=120, scale=1):
retryBtn = gr.Button(
i18n("🔄 重新生成"), elem_id="gr-retry-btn")
with gr.Column(min_width=120, scale=1):
delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话"))
with gr.Column(min_width=120, scale=1):
delLastBtn = gr.Button(
i18n("🗑️ 删除最新对话"), elem_id="gr-dellast-btn")
with gr.Row(visible=False) as like_dislike_area:
with gr.Column(min_width=20, scale=1):
likeBtn = gr.Button(
"👍", elem_id="gr-like-btn")
with gr.Column(min_width=20, scale=1):
dislikeBtn = gr.Button(
"👎", elem_id="gr-dislike-btn")
with gr.Column(elem_id="toolbox-area", scale=1):
# For CSS setting, there is an extra box. Don't remove it.
with gr.Box(elem_id="chuanhu-toolbox"):
with gr.Row():
gr.Markdown("## " + i18n("工具箱"))
gr.HTML(get_html("close_btn.html").format(
obj="toolbox"), elem_classes="close-btn")
with gr.Tabs(elem_id="chuanhu-toolbox-tabs"):
with gr.Tab(label=i18n("对话")):
with gr.Accordion(label=i18n("模型"), open=not HIDE_MY_KEY, visible=not HIDE_MY_KEY):
keyTxt = gr.Textbox(
show_label=True,
placeholder=f"Your API-key...",
value=hide_middle_chars(user_api_key.value),
type="password",
visible=not HIDE_MY_KEY,
label="API-Key",
)
if multi_api_key:
usageTxt = gr.Markdown(i18n(
"多账号模式已开启,无需输入key,可直接开始对话"), elem_id="usage-display",
elem_classes="insert-block", visible=show_api_billing)
else:
usageTxt = gr.Markdown(i18n(
"**发送消息** 或 **提交key** 以显示额度"), elem_id="usage-display",
elem_classes="insert-block", visible=show_api_billing)
gr.Markdown("---", elem_classes="hr-line", visible=not HIDE_MY_KEY)
with gr.Accordion(label="Prompt", open=True):
systemPromptTxt = gr.Textbox(
show_label=True,
placeholder=i18n("在这里输入System Prompt..."),
label="System prompt",
value=INITIAL_SYSTEM_PROMPT,
lines=8
)
retain_system_prompt_checkbox = gr.Checkbox(
label=i18n("新建对话保留Prompt"), value=False, visible=True,
elem_classes="switch-checkbox")
with gr.Accordion(label=i18n("加载Prompt模板"), open=False):
with gr.Column():
with gr.Row():
with gr.Column(scale=6):
templateFileSelectDropdown = gr.Dropdown(
label=i18n("选择Prompt模板集合文件"),
choices=get_template_names(),
multiselect=False,
value=get_template_names()[0],
container=False,
)
with gr.Column(scale=1):
templateRefreshBtn = gr.Button(
i18n("🔄 刷新"))
with gr.Row():
with gr.Column():
templateSelectDropdown = gr.Dropdown(
label=i18n("从Prompt模板中加载"),
choices=load_template(
get_template_names()[
0], mode=1
),
multiselect=False,
container=False,
)
gr.Markdown("---", elem_classes="hr-line")
with gr.Accordion(label=i18n("知识库"), open=True, elem_id="gr-kb-accordion", visible=True):
use_websearch_checkbox = gr.Checkbox(label=i18n(
"使用在线搜索"), value=False, elem_classes="switch-checkbox", elem_id="gr-websearch-cb",
visible=False)
index_files = gr.Files(label=i18n(
"上传"), type="file",
file_types=[".pdf", ".docx", ".pptx", ".epub", ".xlsx", ".txt", "text", "image"],
elem_id="upload-index-file")
two_column = gr.Checkbox(label=i18n(
"双栏pdf"), value=False)
summarize_btn = gr.Button(i18n("总结"), visible=False)
with gr.Tab(label=i18n("参数")):
gr.Markdown(i18n("# ⚠️ 务必谨慎更改 ⚠️"),
elem_id="advanced-warning")
with gr.Accordion(i18n("参数"), open=True):
temperature_slider = gr.Slider(
minimum=-0,
maximum=2.0,
value=1.0,
step=0.1,
interactive=True,
label="temperature",
)
top_p_slider = gr.Slider(
minimum=-0,
maximum=1.0,
value=1.0,
step=0.05,
interactive=True,
label="top-p",
)
n_choices_slider = gr.Slider(
minimum=1,
maximum=10,
value=1,
step=1,
interactive=True,
label="n choices",
)
stop_sequence_txt = gr.Textbox(
show_label=True,
placeholder=i18n("停止符,用英文逗号隔开..."),
label="stop",
value="",
lines=1,
)
max_context_length_slider = gr.Slider(
minimum=1,
maximum=32768,
value=2000,
step=1,
interactive=True,
label="max context",
)
max_generation_slider = gr.Slider(
minimum=1,
maximum=32768,
value=1000,
step=1,
interactive=True,
label="max generations",
)
presence_penalty_slider = gr.Slider(
minimum=-2.0,
maximum=2.0,
value=0.0,
step=0.01,
interactive=True,
label="presence penalty",
)
frequency_penalty_slider = gr.Slider(
minimum=-2.0,
maximum=2.0,
value=0.0,
step=0.01,
interactive=True,
label="frequency penalty",
)
logit_bias_txt = gr.Textbox(
show_label=True,
placeholder=f"word:likelihood",
label="logit bias",
value="",
lines=1,
)
user_identifier_txt = gr.Textbox(
show_label=True,
placeholder=i18n("用于定位滥用行为"),
label=i18n("用户标识符"),
value=user_name.value,
lines=1,
)
with gr.Tab(label=i18n("关于")):
gr.Markdown("#### " + i18n("ChatGPT WebUI"))
gr.Markdown(CHUANHU_DESCRIPTION)
with gr.Row(elem_id="popup-wrapper"):
with gr.Box(elem_id="chuanhu-popup"):
with gr.Box(elem_id="chuanhu-setting"):
with gr.Row():
gr.Markdown("## " + i18n("设置"))
gr.HTML(get_html("close_btn.html").format(
obj="box"), elem_classes="close-btn")
with gr.Tabs(elem_id="chuanhu-setting-tabs"):
with gr.Tab(label=i18n("高级")):
gr.HTML(get_html("appearance_switcher.html").format(
label=i18n("切换亮暗色主题")), elem_classes="insert-block", visible=False)
use_streaming_checkbox = gr.Checkbox(
label=i18n("实时传输回答"), value=True, visible=ENABLE_STREAMING_OPTION,
elem_classes="switch-checkbox"
)
language_select_dropdown = gr.Dropdown(
label=i18n("选择回复语言(针对搜索&索引功能)"),
choices=REPLY_LANGUAGES,
multiselect=False,
value=REPLY_LANGUAGES[0],
visible=False,
)
name_chat_method = gr.Dropdown(
label=i18n("对话命名方式"),
choices=HISTORY_NAME_METHODS,
multiselect=False,
interactive=True,
value=HISTORY_NAME_METHODS[chat_name_method_index],
)
single_turn_checkbox = gr.Checkbox(label=i18n(
"单轮对话"), value=False, elem_classes="switch-checkbox", elem_id="gr-single-session-cb",
visible=False)
# checkUpdateBtn = gr.Button(i18n("🔄 检查更新..."), visible=check_update)
with gr.Tab(i18n("网络")):
gr.Markdown(
i18n("⚠️ 为保证API-Key安全,请在配置文件`config.json`中修改网络设置"),
elem_id="netsetting-warning")
default_btn = gr.Button(i18n("🔙 恢复默认网络设置"))
# 网络代理
proxyTxt = gr.Textbox(
show_label=True,
placeholder=i18n("未设置代理..."),
label=i18n("代理地址"),
value=http_proxy,
lines=1,
interactive=False,
# container=False,
elem_classes="view-only-textbox no-container",
)
# changeProxyBtn = gr.Button(i18n("🔄 设置代理地址"))
# 优先展示自定义的api_host
apihostTxt = gr.Textbox(
show_label=True,
placeholder="api.openai.com",
label="OpenAI API-Host",
value=api_host or API_HOST,
lines=1,
interactive=False,
# container=False,
elem_classes="view-only-textbox no-container",
)
with gr.Tab(label=i18n("关于"), elem_id="about-tab"):
gr.Markdown("# " + i18n("ChatGPT WebUI"))
gr.Markdown(CHUANHU_DESCRIPTION, elem_id="description")
with gr.Box(elem_id="web-config", visible=False):
gr.HTML(get_html('web_config.html').format(
enableCheckUpdate_config=False,
hideHistoryWhenNotLoggedIn_config=hide_history_when_not_logged_in,
forView_i18n=i18n("仅供查看"),
deleteConfirm_i18n_pref=i18n("你真的要删除 "),
deleteConfirm_i18n_suff=i18n(" 吗?"),
usingLatest_i18n=i18n("您使用的就是最新版!"),
updatingMsg_i18n=i18n("正在尝试更新..."),
updateSuccess_i18n=i18n("更新成功,请重启本程序"),
updateFailure_i18n=i18n(
"更新失败,请尝试[手动更新](https://github.com/shibing624/chatgpt-webui/"),
regenerate_i18n=i18n("重新生成"),
deleteRound_i18n=i18n("删除这轮问答"),
renameChat_i18n=i18n("重命名该对话"),
validFileName_i18n=i18n("请输入有效的文件名,不要包含以下特殊字符:"),
clearFileHistoryMsg_i18n=i18n("⚠️请先删除知识库中的历史文件,再尝试上传!"),
dropUploadMsg_i18n=i18n("释放文件以上传"),
))
with gr.Box(elem_id="fake-gradio-components", visible=False):
changeSingleSessionBtn = gr.Button(
visible=False, elem_classes="invisible-btn", elem_id="change-single-session-btn")
historySelectBtn = gr.Button(
visible=False, elem_classes="invisible-btn", elem_id="history-select-btn") # Not used
def create_greeting(request: gr.Request):
if hasattr(request, "username") and request.username:
logger.info(f"Get User Name: {request.username}")
user_info, user_name = gr.Markdown.update(
value=f"User: {request.username}"), request.username
else:
user_info, user_name = gr.Markdown.update(
value=f"", visible=False), ""
current_model = get_model(
model_name=MODELS[DEFAULT_MODEL], access_key=my_api_key, user_name=user_name)[0]
if not hide_history_when_not_logged_in or user_name:
loaded_stuff = current_model.auto_load()
else:
loaded_stuff = [gr.update(), gr.update(), gr.Chatbot.update(label=MODELS[DEFAULT_MODEL]),
current_model.single_turn, current_model.temperature, current_model.top_p,
current_model.n_choices, current_model.stop_sequence, current_model.token_upper_limit,
current_model.max_generation_token, current_model.presence_penalty,
current_model.frequency_penalty, current_model.logit_bias, current_model.user_identifier]
return user_info, user_name, current_model, toggle_like_btn_visibility(
DEFAULT_MODEL), *loaded_stuff, init_history_list(user_name)
demo.load(create_greeting, inputs=None, outputs=[
user_info, user_name, current_model, like_dislike_area, saveFileName, systemPromptTxt, chatbot,
single_turn_checkbox, temperature_slider, top_p_slider, n_choices_slider, stop_sequence_txt,
max_context_length_slider, max_generation_slider, presence_penalty_slider, frequency_penalty_slider,
logit_bias_txt, user_identifier_txt, historySelectList], api_name="load")
chatgpt_predict_args = dict(
fn=predict,
inputs=[
current_model,
user_question,
chatbot,
use_streaming_checkbox,
use_websearch_checkbox,
index_files,
language_select_dropdown,
],
outputs=[chatbot, status_display],
show_progress=True,
)
start_outputing_args = dict(
fn=start_outputing,
inputs=[],
outputs=[submitBtn, cancelBtn],
show_progress=True,
)
end_outputing_args = dict(
fn=end_outputing, inputs=[], outputs=[submitBtn, cancelBtn]
)
reset_textbox_args = dict(
fn=reset_textbox, inputs=[], outputs=[user_input]
)
transfer_input_args = dict(
fn=transfer_input, inputs=[user_input], outputs=[
user_question, user_input, submitBtn, cancelBtn], show_progress=True
)
get_usage_args = dict(
fn=billing_info, inputs=[current_model], outputs=[
usageTxt], show_progress=False
)
load_history_from_file_args = dict(
fn=load_chat_history,
inputs=[current_model, historySelectList],
outputs=[saveFileName, systemPromptTxt, chatbot, single_turn_checkbox, temperature_slider, top_p_slider,
n_choices_slider, stop_sequence_txt, max_context_length_slider, max_generation_slider,
presence_penalty_slider, frequency_penalty_slider, logit_bias_txt, user_identifier_txt],
)
refresh_history_args = dict(
fn=get_history_list, inputs=[user_name], outputs=[historySelectList]
)
auto_name_chat_history_args = dict(
fn=auto_name_chat_history,
inputs=[current_model, name_chat_method, user_question, chatbot, single_turn_checkbox],
outputs=[historySelectList],
show_progress=False,
)
# Chatbot
cancelBtn.click(interrupt, [current_model], [])
user_input.submit(
**transfer_input_args).then(
**chatgpt_predict_args).then(
**end_outputing_args).then(
**auto_name_chat_history_args)
user_input.submit(**get_usage_args)
submitBtn.click(**transfer_input_args).then(
**chatgpt_predict_args, api_name="predict").then(
**end_outputing_args).then(
**auto_name_chat_history_args)
submitBtn.click(**get_usage_args)
index_files.upload(handle_file_upload, [current_model, index_files, chatbot, language_select_dropdown], [
index_files, chatbot, status_display])
summarize_btn.click(handle_summarize_index, [
current_model, index_files, chatbot, language_select_dropdown], [chatbot, status_display])
emptyBtn.click(
reset,
inputs=[current_model, retain_system_prompt_checkbox],
outputs=[chatbot, status_display, historySelectList, systemPromptTxt, single_turn_checkbox, temperature_slider,
top_p_slider, n_choices_slider, stop_sequence_txt, max_context_length_slider, max_generation_slider,
presence_penalty_slider, frequency_penalty_slider, logit_bias_txt, user_identifier_txt],
show_progress=True,
_js='(a,b)=>{return clearChatbot(a,b);}',
)
retryBtn.click(**start_outputing_args).then(
retry,
[
current_model,
chatbot,
use_streaming_checkbox,
use_websearch_checkbox,
index_files,
language_select_dropdown,
],
[chatbot, status_display],
show_progress=True,
).then(**end_outputing_args)
retryBtn.click(**get_usage_args)
delFirstBtn.click(
delete_first_conversation,
[current_model],
[status_display],
)
delLastBtn.click(
delete_last_conversation,
[current_model, chatbot],
[chatbot, status_display],
show_progress=False
)
likeBtn.click(
like,
[current_model],
[status_display],
show_progress=False
)
dislikeBtn.click(
dislike,
[current_model],
[status_display],
show_progress=False
)
two_column.change(update_doc_config, [two_column], None)
# LLM Models
keyTxt.change(set_key, [current_model, keyTxt], [
user_api_key, status_display], api_name="set_key").then(**get_usage_args)
keyTxt.submit(**get_usage_args)
single_turn_checkbox.change(
set_single_turn, [current_model, single_turn_checkbox], None, show_progress=False)
model_select_dropdown.change(get_model,
[model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider,
top_p_slider, systemPromptTxt, user_name, current_model], [
current_model, status_display, chatbot, lora_select_dropdown, user_api_key,
keyTxt], show_progress=True, api_name="get_model")
model_select_dropdown.change(toggle_like_btn_visibility, [model_select_dropdown], [
like_dislike_area], show_progress=False)
lora_select_dropdown.change(get_model,
[model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider,
top_p_slider, systemPromptTxt, user_name, current_model],
[current_model, status_display, chatbot], show_progress=True)
# Template
systemPromptTxt.input(set_system_prompt, [
current_model, systemPromptTxt], None)
templateRefreshBtn.click(get_template_dropdown, None, [
templateFileSelectDropdown])
templateFileSelectDropdown.input(
load_template,
[templateFileSelectDropdown],
[promptTemplates, templateSelectDropdown],
show_progress=True,
)
templateSelectDropdown.change(
get_template_content,
[promptTemplates, templateSelectDropdown, systemPromptTxt],
[systemPromptTxt],
show_progress=True,
)
# S&L
renameHistoryBtn.click(
rename_chat_history,
[current_model, saveFileName, chatbot],
[historySelectList],
show_progress=True,
_js='(a,b,c,d)=>{return saveChatHistory(a,b,c,d);}'
)
exportMarkdownBtn.click(
export_markdown,
[current_model, saveFileName, chatbot],
[],
show_progress=True,
)
historyRefreshBtn.click(**refresh_history_args)
historyDeleteBtn.click(delete_chat_history, [current_model, historySelectList],
[status_display, historySelectList, chatbot],
_js='(a,b,c)=>{return showConfirmationDialog(a, b, c);}').then(
reset,
inputs=[current_model, retain_system_prompt_checkbox],
outputs=[chatbot, status_display, historySelectList, systemPromptTxt],
show_progress=True,
_js='(a,b)=>{return clearChatbot(a,b);}',
)
historySelectList.input(**load_history_from_file_args)
uploadFileBtn.upload(upload_chat_history, [current_model, uploadFileBtn], [
saveFileName, systemPromptTxt, chatbot, single_turn_checkbox, temperature_slider, top_p_slider,
n_choices_slider, stop_sequence_txt, max_context_length_slider, max_generation_slider, presence_penalty_slider,
frequency_penalty_slider, logit_bias_txt, user_identifier_txt]).then(**refresh_history_args)
historyDownloadBtn.click(None, [
user_name, historySelectList], None, _js='(a,b)=>{return downloadHistory(a,b,".json");}')
historyMarkdownDownloadBtn.click(None, [
user_name, historySelectList], None, _js='(a,b)=>{return downloadHistory(a,b,".md");}')
historySearchTextbox.input(
filter_history,
[user_name, historySearchTextbox],
[historySelectList]
)
# Advanced
temperature_slider.input(
set_temperature, [current_model, temperature_slider], None, show_progress=False)
top_p_slider.input(set_top_p, [current_model, top_p_slider], None, show_progress=False)
n_choices_slider.input(
set_n_choices, [current_model, n_choices_slider], None, show_progress=False)
stop_sequence_txt.input(
set_stop_sequence, [current_model, stop_sequence_txt], None, show_progress=False)
max_context_length_slider.input(
set_token_upper_limit, [current_model, max_context_length_slider], None, show_progress=False)
max_generation_slider.input(
set_max_tokens, [current_model, max_generation_slider], None, show_progress=False)
presence_penalty_slider.input(
set_presence_penalty, [current_model, presence_penalty_slider], None, show_progress=False)
frequency_penalty_slider.input(
set_frequency_penalty, [current_model, frequency_penalty_slider], None, show_progress=False)
logit_bias_txt.input(
set_logit_bias, [current_model, logit_bias_txt], None, show_progress=False)
user_identifier_txt.input(set_user_identifier, [
current_model, user_identifier_txt], None, show_progress=False)
default_btn.click(
reset_default, [], [apihostTxt, proxyTxt, status_display], show_progress=True
)
# Invisible elements
changeSingleSessionBtn.click(
fn=lambda value: gr.Checkbox.update(value=value),
inputs=[single_turn_checkbox],
outputs=[single_turn_checkbox],
_js='(a)=>{return bgChangeSingleSession(a);}'
)
historySelectBtn.click( # This is an experimental feature... Not actually used.
fn=load_chat_history,
inputs=[current_model, historySelectList],
outputs=[saveFileName, systemPromptTxt, chatbot, single_turn_checkbox, temperature_slider, top_p_slider,
n_choices_slider, stop_sequence_txt, max_context_length_slider, max_generation_slider,
presence_penalty_slider, frequency_penalty_slider, logit_bias_txt, user_identifier_txt],
_js='(a,b)=>{return bgSelectHistory(a,b);}'
)
demo.title = CHUANHU_TITLE
if __name__ == "__main__":
reload_javascript()
setup_wizard()
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
allowed_paths=[HISTORY_DIR, assets_path],
server_name=server_name, | server_port=server_port, | 0 | 2023-12-27 12:14:26+00:00 | 12k |
camenduru/DiffMorpher-hf | app.py | [
{
"identifier": "DiffMorpherPipeline",
"path": "morph_attn.py",
"snippet": "class DiffMorpherPipeline(StableDiffusionPipeline):\n\n def __init__(self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n une... | import os
import torch
import numpy as np
import cv2
import gradio as gr
from PIL import Image
from datetime import datetime
from morph_attn import DiffMorpherPipeline
from lora_utils import train_lora | 7,292 |
LENGTH=450
def train_lora_interface(
image,
prompt,
model_path,
output_path,
lora_steps,
lora_rank,
lora_lr,
num
):
os.makedirs(output_path, exist_ok=True)
train_lora(image, prompt, output_path, model_path,
lora_steps=lora_steps, lora_lr=lora_lr, lora_rank=lora_rank, weight_name=f"lora_{num}.ckpt", progress=gr.Progress())
return f"Train LoRA {'A' if num == 0 else 'B'} Done!"
def run_diffmorpher(
image_0,
image_1,
prompt_0,
prompt_1,
model_path,
lora_mode,
lamb,
use_adain,
use_reschedule,
num_frames,
fps,
load_lora_path_0,
load_lora_path_1,
output_path
):
run_id = datetime.now().strftime("%H%M") + "_" + datetime.now().strftime("%Y%m%d")
os.makedirs(output_path, exist_ok=True)
|
LENGTH=450
def train_lora_interface(
image,
prompt,
model_path,
output_path,
lora_steps,
lora_rank,
lora_lr,
num
):
os.makedirs(output_path, exist_ok=True)
train_lora(image, prompt, output_path, model_path,
lora_steps=lora_steps, lora_lr=lora_lr, lora_rank=lora_rank, weight_name=f"lora_{num}.ckpt", progress=gr.Progress())
return f"Train LoRA {'A' if num == 0 else 'B'} Done!"
def run_diffmorpher(
image_0,
image_1,
prompt_0,
prompt_1,
model_path,
lora_mode,
lamb,
use_adain,
use_reschedule,
num_frames,
fps,
load_lora_path_0,
load_lora_path_1,
output_path
):
run_id = datetime.now().strftime("%H%M") + "_" + datetime.now().strftime("%Y%m%d")
os.makedirs(output_path, exist_ok=True) | morpher_pipeline = DiffMorpherPipeline.from_pretrained(model_path, torch_dtype=torch.float32).to("cuda") | 0 | 2023-12-25 04:51:41+00:00 | 12k |
camenduru/AnyDoor-online-hf | ldm/models/diffusion/ddpm.py | [
{
"identifier": "log_txt_as_img",
"path": "ldm/util.py",
"snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ... | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
import itertools
import torch.nn.functional as F
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager, nullcontext
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from omegaconf import ListConfig
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler | 9,748 | """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
reset_ema=False,
reset_num_ema_updates=False,
):
super().__init__()
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
| """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
reset_ema=False,
reset_num_ema_updates=False,
):
super().__init__()
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key) | count_params(self.model, verbose=True) | 6 | 2023-12-25 04:48:34+00:00 | 12k |
pangxincheng/TaskManager | task_manager/main.py | [
{
"identifier": "CoreManager",
"path": "task_manager/manager/core.py",
"snippet": "class CoreManager(mp.Process):\n\n def __init__(\n self,\n core_manager_addr: str,\n gpu_manager_addr: str=\"ipc://gpu_manager\",\n task_manager_addr: str=\"ipc://task_manager\",\n lo... | import os
import sys
import rich
import time
import argparse
import multiprocessing as mp
import task_manager.utils.common_utils as common_utils
from task_manager.manager.core import CoreManager
from task_manager.controller.cli_controller import CLIController | 8,187 |
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method("spawn")
else:
assert mp.get_start_method() == "spawn", "Only support spawn start method"
def parse_args():
identity_id = common_utils.md5(str(time.time()))
parser = argparse.ArgumentParser()
parser.add_argument("--log_dir", default="logs", help="Log dir")
parser.add_argument("--log_level", default="INFO", help="Log level")
parser.add_argument("--web_controller", action="store_true", help="Whether start web gui to watch GPU usage&Tasks")
parser.add_argument(
"--core_manager_addr",
type=str,
default=f"ipc:///tmp/core_manager-{identity_id}.sock",
help="Address to run Core manager on"
)
parser.add_argument(
"--gpu_manager_addr",
type=str,
default=f"ipc:///tmp/gpu_manager-{identity_id}.sock",
help="Address to run GPU manager on"
)
parser.add_argument(
"--task_manager_addr",
type=str,
default=f"ipc:///tmp/task_manager-{identity_id}.sock",
help="Address to run Task manager on"
)
args = parser.parse_args()
os.makedirs(args.log_dir, exist_ok=True)
sys.argv = sys.argv[:1]
return args
def start_core_manager(args):
core_manager = CoreManager(
core_manager_addr=args.core_manager_addr,
gpu_manager_addr=args.gpu_manager_addr,
task_manager_addr=args.task_manager_addr,
log_dir=args.log_dir,
log_level=args.log_level,
)
core_manager.start()
time.sleep(1)
return core_manager
def start_cli_controller(args):
|
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method("spawn")
else:
assert mp.get_start_method() == "spawn", "Only support spawn start method"
def parse_args():
identity_id = common_utils.md5(str(time.time()))
parser = argparse.ArgumentParser()
parser.add_argument("--log_dir", default="logs", help="Log dir")
parser.add_argument("--log_level", default="INFO", help="Log level")
parser.add_argument("--web_controller", action="store_true", help="Whether start web gui to watch GPU usage&Tasks")
parser.add_argument(
"--core_manager_addr",
type=str,
default=f"ipc:///tmp/core_manager-{identity_id}.sock",
help="Address to run Core manager on"
)
parser.add_argument(
"--gpu_manager_addr",
type=str,
default=f"ipc:///tmp/gpu_manager-{identity_id}.sock",
help="Address to run GPU manager on"
)
parser.add_argument(
"--task_manager_addr",
type=str,
default=f"ipc:///tmp/task_manager-{identity_id}.sock",
help="Address to run Task manager on"
)
args = parser.parse_args()
os.makedirs(args.log_dir, exist_ok=True)
sys.argv = sys.argv[:1]
return args
def start_core_manager(args):
core_manager = CoreManager(
core_manager_addr=args.core_manager_addr,
gpu_manager_addr=args.gpu_manager_addr,
task_manager_addr=args.task_manager_addr,
log_dir=args.log_dir,
log_level=args.log_level,
)
core_manager.start()
time.sleep(1)
return core_manager
def start_cli_controller(args): | cli_controller = CLIController( | 1 | 2023-12-30 11:47:06+00:00 | 12k |
Shaokang-Agent/S2L | marlgrid/envs/goalcycle.py | [
{
"identifier": "MultiGridEnv",
"path": "marlgrid/base.py",
"snippet": "class MultiGridEnv(gym.Env):\n def __init__(\n self,\n agents = [],\n grid_size=None,\n width=None,\n height=None,\n max_steps=100,\n reward_decay=True,\n seed=1337,\n ... | from ..base import MultiGridEnv, MultiGrid
from ..objects import * | 7,280 |
class ClutteredGoalCycleEnv(MultiGridEnv):
mission = "Cycle between yellow goal tiles."
metadata = {}
def __init__(self, *args, reward=1, penalty=0.0, n_clutter=None, clutter_density=None, n_bonus_tiles=3,
initial_reward=True, cycle_reset=False, reset_on_mistake=False, reward_decay=False, **kwargs):
if (n_clutter is None) == (clutter_density is None):
raise ValueError("Must provide n_clutter xor clutter_density in environment config.")
# Overwrite the default reward_decay for goal cycle environments.
super().__init__(*args, **{**kwargs, 'reward_decay': reward_decay})
if clutter_density is not None:
self.n_clutter = int(clutter_density * (self.width - 2) * (self.height - 2))
else:
self.n_clutter = n_clutter
self.reward = reward
self.penalty = penalty
self.initial_reward = initial_reward
self.n_bonus_tiles = n_bonus_tiles
self.reset_on_mistake = reset_on_mistake
self.bonus_tiles = []
self.bonus_tiles_pos = [[] for _ in range(self.n_bonus_tiles)]
self.wall_pos = [[] for _ in range(self.n_clutter)]
def _gen_grid(self, width, height):
|
class ClutteredGoalCycleEnv(MultiGridEnv):
mission = "Cycle between yellow goal tiles."
metadata = {}
def __init__(self, *args, reward=1, penalty=0.0, n_clutter=None, clutter_density=None, n_bonus_tiles=3,
initial_reward=True, cycle_reset=False, reset_on_mistake=False, reward_decay=False, **kwargs):
if (n_clutter is None) == (clutter_density is None):
raise ValueError("Must provide n_clutter xor clutter_density in environment config.")
# Overwrite the default reward_decay for goal cycle environments.
super().__init__(*args, **{**kwargs, 'reward_decay': reward_decay})
if clutter_density is not None:
self.n_clutter = int(clutter_density * (self.width - 2) * (self.height - 2))
else:
self.n_clutter = n_clutter
self.reward = reward
self.penalty = penalty
self.initial_reward = initial_reward
self.n_bonus_tiles = n_bonus_tiles
self.reset_on_mistake = reset_on_mistake
self.bonus_tiles = []
self.bonus_tiles_pos = [[] for _ in range(self.n_bonus_tiles)]
self.wall_pos = [[] for _ in range(self.n_clutter)]
def _gen_grid(self, width, height): | self.grid = MultiGrid((width, height)) | 1 | 2023-12-24 06:50:38+00:00 | 12k |
smonsays/modular-hyperteacher | metax/data/imitation.py | [
{
"identifier": "Environment",
"path": "metax/data/envs/base.py",
"snippet": "class Environment(abc.ABC):\n @abc.abstractproperty\n def num_actions(self) -> int:\n \"\"\" Number of possible actions.\"\"\"\n\n @abc.abstractproperty\n def observation_shape(self):\n \"\"\"The shap... | from functools import partial
from typing import Optional
from chex import PRNGKey
from metax.data.envs.base import Environment
from metax.data.envs.grid import CompositionalGrid
from metax.data.envs.preference import CompositionalPreference
from .base import Dataloader, MetaDataset, MultitaskDataset
import jax
import jax.numpy as jnp
import jax.tree_util as jtu | 10,258 | """
Copyright (c) Simon Schug
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
| """
Copyright (c) Simon Schug
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
| class ImitationMetaDataloader(Dataloader): | 3 | 2023-12-22 16:35:49+00:00 | 12k |
willfinnigan/RetroBioCat_2 | rbc2/mcts/mcts.py | [
{
"identifier": "MultiExpander",
"path": "rbc2/expansion/multi_expander.py",
"snippet": "class MultiExpander:\n\n def __init__(self,\n expanders: dict[str: Expander],\n network: Optional[Network] = None):\n\n if len(expanders) == 0:\n raise ValueError... | import time
from typing import Optional, List
from rbc2.expansion.multi_expander import MultiExpander
from rbc2.reaction_evaluation.feasability_filters import Filter, default_filter_repo
from rbc2.reaction_evaluation.starting_material_evaluator.starting_material_evaluator import \
DefaultSQLStartingMaterialEvaluator
from rbc2.utils.add_logger import add_logger
from rbc2.configs.logging_config import logging_config
from rbc2.configs.mcts_config import MCTS_Config
from rbc2.expansion.expander_repository import get_expanders
from rbc2.expansion.default_expander_interface import Expander
from rbc2.mcts.mcts_loop.backpropogate import backpropogate
from rbc2.mcts.mcts_loop.expansion.expand import Expansion
from rbc2.mcts.mcts_loop.rollout import rollout
from rbc2.mcts.mcts_loop.score_node import score_node
from rbc2.mcts.mcts_loop.selection import Selection
from rbc2.mcts.tree_node import create_root, MCTS_Node
from rbc2.reaction_network_entities.network import Network
from rbc2.reaction_network_entities.pathway import Pathway | 7,824 |
class MCTS():
def __init__(self,
target_smi: str,
expanders: dict[str: Expander],
filters: dict[str: Filter] = default_filter_repo,
starting_material_evaluator: Optional[DefaultSQLStartingMaterialEvaluator] = None,
network: Optional[Network] = None,
mcts_config: Optional[MCTS_Config] = None):
self.target_smi = target_smi
self.logger = add_logger('MCTS', level=logging_config.mcts)
# config
self.mcts_config = mcts_config
if self.mcts_config is None:
self.mcts_config = MCTS_Config()
# starting material evaluator
self.starting_material_evaluator = starting_material_evaluator
if self.starting_material_evaluator is None:
self.starting_material_evaluator = DefaultSQLStartingMaterialEvaluator()
# network - used to save expansions so they are only done once
self.network = network
if self.network is None:
self.network = Network()
# multi_expander made up of the individual expanders
self.multi_expander = MultiExpander(expanders, network=self.network)
# filters
self.filters = filters
# mcts steps
self.selection = Selection()
self.expansion = Expansion(self.multi_expander,
self.starting_material_evaluator,
self.mcts_config)
self.root: MCTS_Node = create_root(target_smi) # root node
self.solved = [] # the solved nodes, updated during the backpropagation step
self.search_complete = False # used to stop the search either on max iterations or max run time
# stats
self.iterations = 0
self.run_time = 0
self.positive_backpropagations = 0
def run(self, callback=None):
"""Runs the MCTS search"""
self.logger.debug(f'Running MCTS search for {self.target_smi}. Max time: {self.mcts_config.max_search_time} seconds. Max iterations: {self.mcts_config.max_iterations}')
t0 = time.time()
while self.search_complete is False:
self.do_a_loop()
self._check_run_time(t0)
if callback is not None and self.iterations % self.mcts_config.callback_iterations == 0:
callback(self)
def do_a_loop(self):
self.logger.debug(f'---- ITERATION {self.iterations} ----')
node = self.selection.select(self.root, self.mcts_config.exploration)
|
class MCTS():
def __init__(self,
target_smi: str,
expanders: dict[str: Expander],
filters: dict[str: Filter] = default_filter_repo,
starting_material_evaluator: Optional[DefaultSQLStartingMaterialEvaluator] = None,
network: Optional[Network] = None,
mcts_config: Optional[MCTS_Config] = None):
self.target_smi = target_smi
self.logger = add_logger('MCTS', level=logging_config.mcts)
# config
self.mcts_config = mcts_config
if self.mcts_config is None:
self.mcts_config = MCTS_Config()
# starting material evaluator
self.starting_material_evaluator = starting_material_evaluator
if self.starting_material_evaluator is None:
self.starting_material_evaluator = DefaultSQLStartingMaterialEvaluator()
# network - used to save expansions so they are only done once
self.network = network
if self.network is None:
self.network = Network()
# multi_expander made up of the individual expanders
self.multi_expander = MultiExpander(expanders, network=self.network)
# filters
self.filters = filters
# mcts steps
self.selection = Selection()
self.expansion = Expansion(self.multi_expander,
self.starting_material_evaluator,
self.mcts_config)
self.root: MCTS_Node = create_root(target_smi) # root node
self.solved = [] # the solved nodes, updated during the backpropagation step
self.search_complete = False # used to stop the search either on max iterations or max run time
# stats
self.iterations = 0
self.run_time = 0
self.positive_backpropagations = 0
def run(self, callback=None):
"""Runs the MCTS search"""
self.logger.debug(f'Running MCTS search for {self.target_smi}. Max time: {self.mcts_config.max_search_time} seconds. Max iterations: {self.mcts_config.max_iterations}')
t0 = time.time()
while self.search_complete is False:
self.do_a_loop()
self._check_run_time(t0)
if callback is not None and self.iterations % self.mcts_config.callback_iterations == 0:
callback(self)
def do_a_loop(self):
self.logger.debug(f'---- ITERATION {self.iterations} ----')
node = self.selection.select(self.root, self.mcts_config.exploration) | new_node = rollout(node, self.expansion, self.selection, self.network, self.filters, self.mcts_config) | 10 | 2023-12-30 11:33:41+00:00 | 12k |
DerwenAI/textgraphs | textgraphs/doc.py | [
{
"identifier": "PAGERANK_ALPHA",
"path": "textgraphs/defaults.py",
"snippet": "PAGERANK_ALPHA: float = 0.85"
},
{
"identifier": "Edge",
"path": "textgraphs/elem.py",
"snippet": "class Edge:\n \"\"\"\nA data class representing an edge between two nodes.\n \"\"\"\n src_node: int\... | import asyncio
import logging
import os
import sys
import typing
import networkx as nx # pylint: disable=E0401
import numpy as np # pylint: disable=E0401
import pandas as pd # pylint: disable=E0401
import pulp # pylint: disable=E0401
import spacy # pylint: disable=E0401
import transformers # pylint: disable=E0401
import urllib3 # pylint: disable=E0401
from icecream import ic # pylint: disable=E0401
from .defaults import PAGERANK_ALPHA
from .elem import Edge, Node, NodeEnum, RelEnum
from .graph import SimpleGraph
from .pipe import Pipeline, PipelineFactory
from .util import calc_quantile_bins, root_mean_square, stripe_column
from .vis import RenderPyVis
from tqdm.notebook import tqdm # pylint: disable=E0401,W0611
from tqdm import tqdm # pylint: disable=E0401 | 8,139 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=C0302
"""
Implementation of an LLM-augmented `textgraph` algorithm for
constructing a _lemma graph_ from raw, unstructured text source.
The results provide elements for semi-automated construction or
augmentation of a _knowledge graph_.
This class maintains the state of a graph. Updates get applied by
running methods on `Pipeline` objects, typically per paragraph.
see copyright/license https://huggingface.co/spaces/DerwenAI/textgraphs/blob/main/README.md
"""
######################################################################
## fix the borked libraries
# workaround: determine whether this is loading into a Jupyter
# notebook, to allow for `tqdm` progress bars
if "ipykernel" in sys.modules:
else:
# override: HF `transformers` and `tokenizers` have noisy logging
transformers.logging.set_verbosity_error()
os.environ["TOKENIZERS_PARALLELISM"] = "0"
# override: `OpenNRE` uses `word2vec` which has noisy logging
logging.disable(logging.INFO)
# override: WikidMedia and others allow their SSL certs to expire
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
######################################################################
## class definitions
class TextGraphs (SimpleGraph):
"""
Construct a _lemma graph_ from the unstructured text source,
then extract ranked phrases using a `textgraph` algorithm.
"""
def __init__ (
self,
*,
factory: typing.Optional[ PipelineFactory ] = None,
) -> None:
"""
Constructor.
factory:
optional `PipelineFactory` used to configure components
"""
super().__init__()
# initialize the pipeline factory
if factory is not None:
self.factory = factory
else:
self.factory = PipelineFactory()
def create_pipeline (
self,
text_input: str,
) -> Pipeline:
"""
Use the pipeline factory to create a pipeline (e.g., `spaCy.Document`)
for each text input, which are typically paragraph-length.
text_input:
raw text to be parsed by this pipeline
returns:
a configured pipeline
"""
return self.factory.create_pipeline(
text_input,
)
def create_render (
self
) -> RenderPyVis:
"""
Create an object for rendering the graph in `PyVis` HTML+JavaScript.
returns:
a configured `RenderPyVis` object for generating graph visualizations
"""
return RenderPyVis(
self,
self.factory.kg,
)
def _extract_phrases ( # pylint: disable=R0913
self,
pipe: Pipeline,
sent_id: int,
sent: spacy.tokens.span.Span,
text_id: int,
para_id: int,
lemma_iter: typing.Iterator[ typing.Tuple[ str, int ]],
*,
debug: bool = False,
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=C0302
"""
Implementation of an LLM-augmented `textgraph` algorithm for
constructing a _lemma graph_ from raw, unstructured text source.
The results provide elements for semi-automated construction or
augmentation of a _knowledge graph_.
This class maintains the state of a graph. Updates get applied by
running methods on `Pipeline` objects, typically per paragraph.
see copyright/license https://huggingface.co/spaces/DerwenAI/textgraphs/blob/main/README.md
"""
######################################################################
## fix the borked libraries
# workaround: determine whether this is loading into a Jupyter
# notebook, to allow for `tqdm` progress bars
if "ipykernel" in sys.modules:
else:
# override: HF `transformers` and `tokenizers` have noisy logging
transformers.logging.set_verbosity_error()
os.environ["TOKENIZERS_PARALLELISM"] = "0"
# override: `OpenNRE` uses `word2vec` which has noisy logging
logging.disable(logging.INFO)
# override: WikidMedia and others allow their SSL certs to expire
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
######################################################################
## class definitions
class TextGraphs (SimpleGraph):
"""
Construct a _lemma graph_ from the unstructured text source,
then extract ranked phrases using a `textgraph` algorithm.
"""
def __init__ (
self,
*,
factory: typing.Optional[ PipelineFactory ] = None,
) -> None:
"""
Constructor.
factory:
optional `PipelineFactory` used to configure components
"""
super().__init__()
# initialize the pipeline factory
if factory is not None:
self.factory = factory
else:
self.factory = PipelineFactory()
def create_pipeline (
self,
text_input: str,
) -> Pipeline:
"""
Use the pipeline factory to create a pipeline (e.g., `spaCy.Document`)
for each text input, which are typically paragraph-length.
text_input:
raw text to be parsed by this pipeline
returns:
a configured pipeline
"""
return self.factory.create_pipeline(
text_input,
)
def create_render (
self
) -> RenderPyVis:
"""
Create an object for rendering the graph in `PyVis` HTML+JavaScript.
returns:
a configured `RenderPyVis` object for generating graph visualizations
"""
return RenderPyVis(
self,
self.factory.kg,
)
def _extract_phrases ( # pylint: disable=R0913
self,
pipe: Pipeline,
sent_id: int,
sent: spacy.tokens.span.Span,
text_id: int,
para_id: int,
lemma_iter: typing.Iterator[ typing.Tuple[ str, int ]],
*,
debug: bool = False, | ) -> typing.Iterator[ Node ]: | 2 | 2023-12-25 11:42:53+00:00 | 12k |
pkariz/grin-explorer | backend/api/views.py | [
{
"identifier": "fetch_and_store_block",
"path": "backend/api/bootstrap.py",
"snippet": "def fetch_and_store_block(blockchain, block_height, prefetch=True):\n # initialize node api\n node_api = NodeV2API(blockchain.node)\n if block_height < 0:\n # no such block height\n raise Node... | from asgiref.sync import async_to_sync
from django.contrib.contenttypes.models import ContentType
from django.db.models.deletion import ProtectedError
from django.views.generic import TemplateView
from django.views.decorators.cache import never_cache
from dramatiq_abort import abort
from rest_framework import status
from rest_framework.exceptions import APIException
from rest_framework.exceptions import NotFound
from rest_framework.exceptions import ValidationError as DRFValidationError
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from slugify import slugify
from .bootstrap import fetch_and_store_block, update_blockchain_progress
from .exceptions import UpdateBlockchainProgressError
from .helpers import get_filter_backends, load_data_from_redis
from .filters import (
BlockFilter,
CustomBlockSearchFilter,
NodeFilter,
NodeGroupFilter,
)
from .mixins import CustomModelViewSet
from .models import Blockchain, Block, Reorg, Node, NodeGroup, DramatiqTask
from .serializers import (
BlockchainSerializer,
BlockchainExtendedSerializer,
BlockSerializer,
BlockDetailSerializer,
NodeSerializer,
NodeGroupSerializer,
DramatiqTaskSerializer,
)
from .tasks import bootstrap_blockchain, delete_blockchain
import channels
import logging
import pytz | 8,844 |
logger = logging.getLogger(__name__)
# Serve Vue Application
index_view = never_cache(TemplateView.as_view(template_name='index.html'))
class NodeGroupViewSet(CustomModelViewSet):
"""API endpoint for NodeGroup."""
queryset = NodeGroup.objects.all()
filterset_class = NodeGroupFilter
serializer_class = NodeGroupSerializer
lookup_field = 'slug'
permission_classes = [IsAuthenticated]
def create(self, request, *args, **kwargs):
slug = request.data.get('slug')
if not slug:
request.data['slug'] = slugify(request.data['name'], to_lower=True)
return super().create(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
try:
return super().destroy(request, *args, **kwargs)
except ProtectedError as e:
raise DRFValidationError(
detail='Node group is related to nodes, delete them first')
class NodeViewSet(CustomModelViewSet):
"""API endpoint for Node."""
queryset = Node.objects.all()
filterset_class = NodeFilter
serializer_class = NodeSerializer
# currently all node views require authentication
permission_classes = [IsAuthenticated]
lookup_field = 'slug'
def create(self, request, *args, **kwargs):
slug = request.data.get('slug')
if not slug:
request.data['slug'] = slugify(request.data['name'], to_lower=True)
request.data['group'] = NodeGroup.objects.get(slug=request.data['group']).pk
return super().create(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
# NOTE: super().partial_update calls update(..., partial=True)
if not kwargs.get('partial'):
# we don't allow full updates - aka PUT
raise DRFPermissionDenied()
return super().update(request, *args, **kwargs)
def partial_update(self, request, slug=None):
request.data['group'] = NodeGroup.objects.get(slug=request.data['group']).pk
return super().partial_update(request, slug=slug)
@action(detail=True, methods=['get'])
def reachable(self, request, slug=None):
node = self.get_object()
try:
res = node.is_reachable()
except Exception as e:
logger.exception('Unreachable node')
res = False
return Response(res, status=status.HTTP_200_OK)
def destroy(self, request, *args, **kwargs):
try:
return super().destroy(request, *args, **kwargs)
except ProtectedError as e:
raise DRFValidationError(
detail='Node is related to blockchains, delete them first')
class BlockchainViewSet(CustomModelViewSet):
"""API endpoint for Blockchain."""
|
logger = logging.getLogger(__name__)
# Serve Vue Application
index_view = never_cache(TemplateView.as_view(template_name='index.html'))
class NodeGroupViewSet(CustomModelViewSet):
"""API endpoint for NodeGroup."""
queryset = NodeGroup.objects.all()
filterset_class = NodeGroupFilter
serializer_class = NodeGroupSerializer
lookup_field = 'slug'
permission_classes = [IsAuthenticated]
def create(self, request, *args, **kwargs):
slug = request.data.get('slug')
if not slug:
request.data['slug'] = slugify(request.data['name'], to_lower=True)
return super().create(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
try:
return super().destroy(request, *args, **kwargs)
except ProtectedError as e:
raise DRFValidationError(
detail='Node group is related to nodes, delete them first')
class NodeViewSet(CustomModelViewSet):
"""API endpoint for Node."""
queryset = Node.objects.all()
filterset_class = NodeFilter
serializer_class = NodeSerializer
# currently all node views require authentication
permission_classes = [IsAuthenticated]
lookup_field = 'slug'
def create(self, request, *args, **kwargs):
slug = request.data.get('slug')
if not slug:
request.data['slug'] = slugify(request.data['name'], to_lower=True)
request.data['group'] = NodeGroup.objects.get(slug=request.data['group']).pk
return super().create(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
# NOTE: super().partial_update calls update(..., partial=True)
if not kwargs.get('partial'):
# we don't allow full updates - aka PUT
raise DRFPermissionDenied()
return super().update(request, *args, **kwargs)
def partial_update(self, request, slug=None):
request.data['group'] = NodeGroup.objects.get(slug=request.data['group']).pk
return super().partial_update(request, slug=slug)
@action(detail=True, methods=['get'])
def reachable(self, request, slug=None):
node = self.get_object()
try:
res = node.is_reachable()
except Exception as e:
logger.exception('Unreachable node')
res = False
return Response(res, status=status.HTTP_200_OK)
def destroy(self, request, *args, **kwargs):
try:
return super().destroy(request, *args, **kwargs)
except ProtectedError as e:
raise DRFValidationError(
detail='Node is related to blockchains, delete them first')
class BlockchainViewSet(CustomModelViewSet):
"""API endpoint for Blockchain.""" | queryset = Blockchain.objects.all() | 10 | 2023-12-24 22:15:11+00:00 | 12k |
datrocity/pond | pond/activity.py | [
{
"identifier": "Artifact",
"path": "pond/artifact/artifact.py",
"snippet": "class Artifact(ABC):\n \"\"\" Knows how to read and write one type of artifact.\n\n Concrete Artifact implementation should save the metadata with the data if possible,\n so that the artifact is self-contained even if,... | from typing import Any, Dict, Optional, Set, Type, Union
from pond.artifact import Artifact
from pond.artifact.artifact_registry import ArtifactRegistry, global_artifact_registry
from pond.conventions import DataType, WriteMode
from pond.metadata.metadata_source import MetadataSource
from pond.metadata.dict import DictMetadataSource
from pond.metadata.manifest import Manifest
from pond.storage.datastore import Datastore
from pond.version import Version
from pond.version_name import SimpleVersionName, VersionName
from pond.versioned_artifact import VersionedArtifact | 8,776 |
class Activity:
# TODO: can location have subpaths? e.g. `experiment1/test22`
def __init__(self,
source: str,
location: str,
datastore: Datastore,
author: str='NA',
|
class Activity:
# TODO: can location have subpaths? e.g. `experiment1/test22`
def __init__(self,
source: str,
location: str,
datastore: Datastore,
author: str='NA', | version_name_class: Type[VersionName] = SimpleVersionName, | 8 | 2023-12-24 13:05:58+00:00 | 12k |
demirogun/pyethnobiology | pyethnobiology/pyethnobiology.py | [
{
"identifier": "UR",
"path": "pyethnobiology/indices.py",
"snippet": "class UR:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:... | import pandas as pd
import rdata
from .indices import UR, CI, FC, NU, RFC, RI, UV, CV, FL, FIC
from .stats import Jaccard
from .visualization import ChordPlot | 9,727 |
class pyethnobiology:
"""
Encapsulates ethnobotanical data and analysis.
"""
def __init__(
self,
data: pd.DataFrame,
informant_column: str = "informant",
taxon_column: str = "taxon",
use_column: str = "ailments_treated",
literature_column: str = "literature",
convert_use_data: bool = False,
) -> None:
"""
Initializes the Ethnobotany class.
Args:
data: DataFrame containing ethnobotanical information.
informant_column: Name of the column containing informant IDs.
taxon_column: Name of the column containing species names.
use_column: Name of the column containing plant uses.
convert_use_data: Whether to convert use data format (optional).
"""
self.data = self.load_data(data, informant_column, taxon_column, use_column, convert_use_data)
self.informant_column = informant_column
self.taxon_column = taxon_column
self.use_column = use_column
self.literature_column = literature_column
def CI(self):
CI_class = CI(self.data, self.informant_column, self.taxon_column, self.use_column)
return CI_class
def FC(self):
FC_class = FC(self.data, self.informant_column, self.taxon_column, self.use_column)
return FC_class
def NU(self):
NU_class = NU(self.data, self.informant_column, self.taxon_column, self.use_column)
return NU_class
def UR(self):
UR_class = UR(self.data, self.informant_column, self.taxon_column, self.use_column)
return UR_class
def RFC(self):
RFC_class = RFC(self.data, self.informant_column, self.taxon_column, self.use_column)
return RFC_class
def RI(self):
RI_class = RI(self.data, self.informant_column, self.taxon_column, self.use_column)
return RI_class
def UV(self):
UV_class = UV(self.data, self.informant_column, self.taxon_column, self.use_column)
return UV_class
def CV(self):
CV_class = CV(self.data, self.informant_column, self.taxon_column, self.use_column)
return CV_class
|
class pyethnobiology:
"""
Encapsulates ethnobotanical data and analysis.
"""
def __init__(
self,
data: pd.DataFrame,
informant_column: str = "informant",
taxon_column: str = "taxon",
use_column: str = "ailments_treated",
literature_column: str = "literature",
convert_use_data: bool = False,
) -> None:
"""
Initializes the Ethnobotany class.
Args:
data: DataFrame containing ethnobotanical information.
informant_column: Name of the column containing informant IDs.
taxon_column: Name of the column containing species names.
use_column: Name of the column containing plant uses.
convert_use_data: Whether to convert use data format (optional).
"""
self.data = self.load_data(data, informant_column, taxon_column, use_column, convert_use_data)
self.informant_column = informant_column
self.taxon_column = taxon_column
self.use_column = use_column
self.literature_column = literature_column
def CI(self):
CI_class = CI(self.data, self.informant_column, self.taxon_column, self.use_column)
return CI_class
def FC(self):
FC_class = FC(self.data, self.informant_column, self.taxon_column, self.use_column)
return FC_class
def NU(self):
NU_class = NU(self.data, self.informant_column, self.taxon_column, self.use_column)
return NU_class
def UR(self):
UR_class = UR(self.data, self.informant_column, self.taxon_column, self.use_column)
return UR_class
def RFC(self):
RFC_class = RFC(self.data, self.informant_column, self.taxon_column, self.use_column)
return RFC_class
def RI(self):
RI_class = RI(self.data, self.informant_column, self.taxon_column, self.use_column)
return RI_class
def UV(self):
UV_class = UV(self.data, self.informant_column, self.taxon_column, self.use_column)
return UV_class
def CV(self):
CV_class = CV(self.data, self.informant_column, self.taxon_column, self.use_column)
return CV_class
| def FL(self): | 8 | 2023-12-25 01:06:51+00:00 | 12k |
JiePKU/MoLE | train_textual_inversion_XTI.py | [
{
"identifier": "ConfigSanitizer",
"path": "library/config_util.py",
"snippet": "class ConfigSanitizer:\n # @curry\n @staticmethod\n def __validate_and_convert_twodim(klass, value: Sequence) -> Tuple:\n Schema(ExactSequence([klass, klass]))(value)\n return tuple(value)\n\n # @curry\n @staticm... | import importlib
import argparse
import gc
import math
import os
import toml
import torch
import diffusers
import library.train_util as train_util
import library.huggingface_util as huggingface_util
import library.config_util as config_util
import library.custom_train_functions as custom_train_functions
from multiprocessing import Value
from tqdm import tqdm
from accelerate.utils import set_seed
from diffusers import DDPMScheduler
from library.config_util import (
ConfigSanitizer,
BlueprintGenerator,
)
from library.custom_train_functions import (
apply_snr_weight,
prepare_scheduler_for_custom_training,
pyramid_noise_like,
apply_noise_offset,
scale_v_prediction_loss_like_noise_prediction,
)
from XTI_hijack import unet_forward_XTI, downblock_forward_XTI, upblock_forward_XTI
from safetensors.torch import save_file
from safetensors.torch import load_file | 9,001 | vae.to(accelerator.device, dtype=weight_dtype)
# 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする
if args.full_fp16:
train_util.patch_accelerator_for_fp16_training(accelerator)
text_encoder.to(weight_dtype)
# resumeする
train_util.resume_from_local_or_hf_if_specified(accelerator, args)
# epoch数を計算する
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0):
args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1
# 学習する
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
print("running training / 学習開始")
print(f" num train images * repeats / 学習画像の数×繰り返し回数: {train_dataset_group.num_train_images}")
print(f" num reg images / 正則化画像の数: {train_dataset_group.num_reg_images}")
print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}")
print(f" num epochs / epoch数: {num_train_epochs}")
print(f" batch size per device / バッチサイズ: {args.train_batch_size}")
print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}")
print(f" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}")
print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}")
progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc="steps")
global_step = 0
noise_scheduler = DDPMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, clip_sample=False
)
prepare_scheduler_for_custom_training(noise_scheduler, accelerator.device)
if accelerator.is_main_process:
accelerator.init_trackers("textual_inversion" if args.log_tracker_name is None else args.log_tracker_name)
# function for saving/removing
def save_model(ckpt_name, embs, steps, epoch_no, force_sync_upload=False):
os.makedirs(args.output_dir, exist_ok=True)
ckpt_file = os.path.join(args.output_dir, ckpt_name)
print(f"\nsaving checkpoint: {ckpt_file}")
save_weights(ckpt_file, embs, save_dtype)
if args.huggingface_repo_id is not None:
huggingface_util.upload(args, ckpt_file, "/" + ckpt_name, force_sync_upload=force_sync_upload)
def remove_model(old_ckpt_name):
old_ckpt_file = os.path.join(args.output_dir, old_ckpt_name)
if os.path.exists(old_ckpt_file):
print(f"removing old checkpoint: {old_ckpt_file}")
os.remove(old_ckpt_file)
# training loop
for epoch in range(num_train_epochs):
print(f"\nepoch {epoch+1}/{num_train_epochs}")
current_epoch.value = epoch + 1
text_encoder.train()
loss_total = 0
for step, batch in enumerate(train_dataloader):
current_step.value = global_step
with accelerator.accumulate(text_encoder):
with torch.no_grad():
if "latents" in batch and batch["latents"] is not None:
latents = batch["latents"].to(accelerator.device)
else:
# latentに変換
latents = vae.encode(batch["images"].to(dtype=weight_dtype)).latent_dist.sample()
latents = latents * 0.18215
b_size = latents.shape[0]
# Get the text embedding for conditioning
input_ids = batch["input_ids"].to(accelerator.device)
# weight_dtype) use float instead of fp16/bf16 because text encoder is float
encoder_hidden_states = torch.stack(
[
train_util.get_hidden_states(args, s, tokenizer, text_encoder, weight_dtype)
for s in torch.split(input_ids, 1, dim=1)
]
)
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents, device=latents.device)
if args.noise_offset:
noise = apply_noise_offset(latents, noise, args.noise_offset, args.adaptive_noise_scale)
elif args.multires_noise_iterations:
noise = pyramid_noise_like(noise, latents.device, args.multires_noise_iterations, args.multires_noise_discount)
# Sample a random timestep for each image
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)
timesteps = timesteps.long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
# Predict the noise residual
with accelerator.autocast():
noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states=encoder_hidden_states).sample
if args.v_parameterization:
# v-parameterization training
target = noise_scheduler.get_velocity(latents, noise, timesteps)
else:
target = noise
loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction="none")
loss = loss.mean([1, 2, 3])
loss_weights = batch["loss_weights"] # 各sampleごとのweight
loss = loss * loss_weights
if args.min_snr_gamma:
loss = apply_snr_weight(loss, timesteps, noise_scheduler, args.min_snr_gamma)
if args.scale_v_pred_loss_like_noise_pred:
|
imagenet_templates_small = [
"a photo of a {}",
"a rendering of a {}",
"a cropped photo of the {}",
"the photo of a {}",
"a photo of a clean {}",
"a photo of a dirty {}",
"a dark photo of the {}",
"a photo of my {}",
"a photo of the cool {}",
"a close-up photo of a {}",
"a bright photo of the {}",
"a cropped photo of a {}",
"a photo of the {}",
"a good photo of the {}",
"a photo of one {}",
"a close-up photo of the {}",
"a rendition of the {}",
"a photo of the clean {}",
"a rendition of a {}",
"a photo of a nice {}",
"a good photo of a {}",
"a photo of the nice {}",
"a photo of the small {}",
"a photo of the weird {}",
"a photo of the large {}",
"a photo of a cool {}",
"a photo of a small {}",
]
imagenet_style_templates_small = [
"a painting in the style of {}",
"a rendering in the style of {}",
"a cropped painting in the style of {}",
"the painting in the style of {}",
"a clean painting in the style of {}",
"a dirty painting in the style of {}",
"a dark painting in the style of {}",
"a picture in the style of {}",
"a cool painting in the style of {}",
"a close-up painting in the style of {}",
"a bright painting in the style of {}",
"a cropped painting in the style of {}",
"a good painting in the style of {}",
"a close-up painting in the style of {}",
"a rendition in the style of {}",
"a nice painting in the style of {}",
"a small painting in the style of {}",
"a weird painting in the style of {}",
"a large painting in the style of {}",
]
def train(args):
if args.output_name is None:
args.output_name = args.token_string
use_template = args.use_object_template or args.use_style_template
train_util.verify_training_args(args)
train_util.prepare_dataset_args(args, True)
if args.sample_every_n_steps is not None or args.sample_every_n_epochs is not None:
print(
"sample_every_n_steps and sample_every_n_epochs are not supported in this script currently / sample_every_n_stepsとsample_every_n_epochsは現在このスクリプトではサポートされていません"
)
assert (
args.dataset_class is None
), "dataset_class is not supported in this script currently / dataset_classは現在このスクリプトではサポートされていません"
cache_latents = args.cache_latents
if args.seed is not None:
set_seed(args.seed)
tokenizer = train_util.load_tokenizer(args)
# acceleratorを準備する
print("prepare accelerator")
accelerator, unwrap_model = train_util.prepare_accelerator(args)
# mixed precisionに対応した型を用意しておき適宜castする
weight_dtype, save_dtype = train_util.prepare_dtype(args)
# モデルを読み込む
text_encoder, vae, unet, _ = train_util.load_target_model(args, weight_dtype, accelerator)
# Convert the init_word to token_id
if args.init_word is not None:
init_token_ids = tokenizer.encode(args.init_word, add_special_tokens=False)
if len(init_token_ids) > 1 and len(init_token_ids) != args.num_vectors_per_token:
print(
f"token length for init words is not same to num_vectors_per_token, init words is repeated or truncated / 初期化単語のトークン長がnum_vectors_per_tokenと合わないため、繰り返しまたは切り捨てが発生します: length {len(init_token_ids)}"
)
else:
init_token_ids = None
# add new word to tokenizer, count is num_vectors_per_token
token_strings = [args.token_string] + [f"{args.token_string}{i+1}" for i in range(args.num_vectors_per_token - 1)]
num_added_tokens = tokenizer.add_tokens(token_strings)
assert (
num_added_tokens == args.num_vectors_per_token
), f"tokenizer has same word to token string. please use another one / 指定したargs.token_stringは既に存在します。別の単語を使ってください: {args.token_string}"
token_ids = tokenizer.convert_tokens_to_ids(token_strings)
print(f"tokens are added: {token_ids}")
assert min(token_ids) == token_ids[0] and token_ids[-1] == token_ids[0] + len(token_ids) - 1, f"token ids is not ordered"
assert len(tokenizer) - 1 == token_ids[-1], f"token ids is not end of tokenize: {len(tokenizer)}"
token_strings_XTI = []
XTI_layers = [
"IN01",
"IN02",
"IN04",
"IN05",
"IN07",
"IN08",
"MID",
"OUT03",
"OUT04",
"OUT05",
"OUT06",
"OUT07",
"OUT08",
"OUT09",
"OUT10",
"OUT11",
]
for layer_name in XTI_layers:
token_strings_XTI += [f"{t}_{layer_name}" for t in token_strings]
tokenizer.add_tokens(token_strings_XTI)
token_ids_XTI = tokenizer.convert_tokens_to_ids(token_strings_XTI)
print(f"tokens are added (XTI): {token_ids_XTI}")
# Resize the token embeddings as we are adding new special tokens to the tokenizer
text_encoder.resize_token_embeddings(len(tokenizer))
# Initialise the newly added placeholder token with the embeddings of the initializer token
token_embeds = text_encoder.get_input_embeddings().weight.data
if init_token_ids is not None:
for i, token_id in enumerate(token_ids_XTI):
token_embeds[token_id] = token_embeds[init_token_ids[(i // 16) % len(init_token_ids)]]
# print(token_id, token_embeds[token_id].mean(), token_embeds[token_id].min())
# load weights
if args.weights is not None:
embeddings = load_weights(args.weights)
assert len(token_ids) == len(
embeddings
), f"num_vectors_per_token is mismatch for weights / 指定した重みとnum_vectors_per_tokenの値が異なります: {len(embeddings)}"
# print(token_ids, embeddings.size())
for token_id, embedding in zip(token_ids_XTI, embeddings):
token_embeds[token_id] = embedding
# print(token_id, token_embeds[token_id].mean(), token_embeds[token_id].min())
print(f"weighs loaded")
print(f"create embeddings for {args.num_vectors_per_token} tokens, for {args.token_string}")
# データセットを準備する
blueprint_generator = BlueprintGenerator(ConfigSanitizer(True, True, False))
if args.dataset_config is not None:
print(f"Load dataset config from {args.dataset_config}")
user_config = config_util.load_user_config(args.dataset_config)
ignored = ["train_data_dir", "reg_data_dir", "in_json"]
if any(getattr(args, attr) is not None for attr in ignored):
print(
"ignore following options because config file is found: {0} / 設定ファイルが利用されるため以下のオプションは無視されます: {0}".format(
", ".join(ignored)
)
)
else:
use_dreambooth_method = args.in_json is None
if use_dreambooth_method:
print("Use DreamBooth method.")
user_config = {
"datasets": [
{"subsets": config_util.generate_dreambooth_subsets_config_by_subdirs(args.train_data_dir, args.reg_data_dir)}
]
}
else:
print("Train with captions.")
user_config = {
"datasets": [
{
"subsets": [
{
"image_dir": args.train_data_dir,
"metadata_file": args.in_json,
}
]
}
]
}
blueprint = blueprint_generator.generate(user_config, args, tokenizer=tokenizer)
train_dataset_group = config_util.generate_dataset_group_by_blueprint(blueprint.dataset_group)
train_dataset_group.enable_XTI(XTI_layers, token_strings=token_strings)
current_epoch = Value("i", 0)
current_step = Value("i", 0)
ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collater = train_util.collater_class(current_epoch, current_step, ds_for_collater)
# make captions: tokenstring tokenstring1 tokenstring2 ...tokenstringn という文字列に書き換える超乱暴な実装
if use_template:
print("use template for training captions. is object: {args.use_object_template}")
templates = imagenet_templates_small if args.use_object_template else imagenet_style_templates_small
replace_to = " ".join(token_strings)
captions = []
for tmpl in templates:
captions.append(tmpl.format(replace_to))
train_dataset_group.add_replacement("", captions)
if args.num_vectors_per_token > 1:
prompt_replacement = (args.token_string, replace_to)
else:
prompt_replacement = None
else:
if args.num_vectors_per_token > 1:
replace_to = " ".join(token_strings)
train_dataset_group.add_replacement(args.token_string, replace_to)
prompt_replacement = (args.token_string, replace_to)
else:
prompt_replacement = None
if args.debug_dataset:
train_util.debug_dataset(train_dataset_group, show_input_ids=True)
return
if len(train_dataset_group) == 0:
print("No data found. Please verify arguments / 画像がありません。引数指定を確認してください")
return
if cache_latents:
assert (
train_dataset_group.is_latent_cacheable()
), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません"
# モデルに xformers とか memory efficient attention を組み込む
train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers)
diffusers.models.UNet2DConditionModel.forward = unet_forward_XTI
diffusers.models.unet_2d_blocks.CrossAttnDownBlock2D.forward = downblock_forward_XTI
diffusers.models.unet_2d_blocks.CrossAttnUpBlock2D.forward = upblock_forward_XTI
# 学習を準備する
if cache_latents:
vae.to(accelerator.device, dtype=weight_dtype)
vae.requires_grad_(False)
vae.eval()
with torch.no_grad():
train_dataset_group.cache_latents(vae, args.vae_batch_size, args.cache_latents_to_disk, accelerator.is_main_process)
vae.to("cpu")
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
accelerator.wait_for_everyone()
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
text_encoder.gradient_checkpointing_enable()
# 学習に必要なクラスを準備する
print("prepare optimizer, data loader etc.")
trainable_params = text_encoder.get_input_embeddings().parameters()
_, _, optimizer = train_util.get_optimizer(args, trainable_params)
# dataloaderを準備する
# DataLoaderのプロセス数:0はメインプロセスになる
n_workers = min(args.max_data_loader_n_workers, os.cpu_count() - 1) # cpu_count-1 ただし最大で指定された数まで
train_dataloader = torch.utils.data.DataLoader(
train_dataset_group,
batch_size=1,
shuffle=True,
collate_fn=collater,
num_workers=n_workers,
persistent_workers=args.persistent_data_loader_workers,
)
# 学習ステップ数を計算する
if args.max_train_epochs is not None:
args.max_train_steps = args.max_train_epochs * math.ceil(
len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps
)
print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}")
# データセット側にも学習ステップを送信
train_dataset_group.set_max_train_steps(args.max_train_steps)
# lr schedulerを用意する
lr_scheduler = train_util.get_scheduler_fix(args, optimizer, accelerator.num_processes)
# acceleratorがなんかよろしくやってくれるらしい
text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
text_encoder, optimizer, train_dataloader, lr_scheduler
)
# transform DDP after prepare
text_encoder, unet = train_util.transform_if_model_is_DDP(text_encoder, unet)
index_no_updates = torch.arange(len(tokenizer)) < token_ids_XTI[0]
# print(len(index_no_updates), torch.sum(index_no_updates))
orig_embeds_params = unwrap_model(text_encoder).get_input_embeddings().weight.data.detach().clone()
# Freeze all parameters except for the token embeddings in text encoder
text_encoder.requires_grad_(True)
text_encoder.text_model.encoder.requires_grad_(False)
text_encoder.text_model.final_layer_norm.requires_grad_(False)
text_encoder.text_model.embeddings.position_embedding.requires_grad_(False)
# text_encoder.text_model.embeddings.token_embedding.requires_grad_(True)
unet.requires_grad_(False)
unet.to(accelerator.device, dtype=weight_dtype)
if args.gradient_checkpointing: # according to TI example in Diffusers, train is required
unet.train()
else:
unet.eval()
if not cache_latents:
vae.requires_grad_(False)
vae.eval()
vae.to(accelerator.device, dtype=weight_dtype)
# 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする
if args.full_fp16:
train_util.patch_accelerator_for_fp16_training(accelerator)
text_encoder.to(weight_dtype)
# resumeする
train_util.resume_from_local_or_hf_if_specified(accelerator, args)
# epoch数を計算する
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0):
args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1
# 学習する
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
print("running training / 学習開始")
print(f" num train images * repeats / 学習画像の数×繰り返し回数: {train_dataset_group.num_train_images}")
print(f" num reg images / 正則化画像の数: {train_dataset_group.num_reg_images}")
print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}")
print(f" num epochs / epoch数: {num_train_epochs}")
print(f" batch size per device / バッチサイズ: {args.train_batch_size}")
print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}")
print(f" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}")
print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}")
progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc="steps")
global_step = 0
noise_scheduler = DDPMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, clip_sample=False
)
prepare_scheduler_for_custom_training(noise_scheduler, accelerator.device)
if accelerator.is_main_process:
accelerator.init_trackers("textual_inversion" if args.log_tracker_name is None else args.log_tracker_name)
# function for saving/removing
def save_model(ckpt_name, embs, steps, epoch_no, force_sync_upload=False):
os.makedirs(args.output_dir, exist_ok=True)
ckpt_file = os.path.join(args.output_dir, ckpt_name)
print(f"\nsaving checkpoint: {ckpt_file}")
save_weights(ckpt_file, embs, save_dtype)
if args.huggingface_repo_id is not None:
huggingface_util.upload(args, ckpt_file, "/" + ckpt_name, force_sync_upload=force_sync_upload)
def remove_model(old_ckpt_name):
old_ckpt_file = os.path.join(args.output_dir, old_ckpt_name)
if os.path.exists(old_ckpt_file):
print(f"removing old checkpoint: {old_ckpt_file}")
os.remove(old_ckpt_file)
# training loop
for epoch in range(num_train_epochs):
print(f"\nepoch {epoch+1}/{num_train_epochs}")
current_epoch.value = epoch + 1
text_encoder.train()
loss_total = 0
for step, batch in enumerate(train_dataloader):
current_step.value = global_step
with accelerator.accumulate(text_encoder):
with torch.no_grad():
if "latents" in batch and batch["latents"] is not None:
latents = batch["latents"].to(accelerator.device)
else:
# latentに変換
latents = vae.encode(batch["images"].to(dtype=weight_dtype)).latent_dist.sample()
latents = latents * 0.18215
b_size = latents.shape[0]
# Get the text embedding for conditioning
input_ids = batch["input_ids"].to(accelerator.device)
# weight_dtype) use float instead of fp16/bf16 because text encoder is float
encoder_hidden_states = torch.stack(
[
train_util.get_hidden_states(args, s, tokenizer, text_encoder, weight_dtype)
for s in torch.split(input_ids, 1, dim=1)
]
)
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents, device=latents.device)
if args.noise_offset:
noise = apply_noise_offset(latents, noise, args.noise_offset, args.adaptive_noise_scale)
elif args.multires_noise_iterations:
noise = pyramid_noise_like(noise, latents.device, args.multires_noise_iterations, args.multires_noise_discount)
# Sample a random timestep for each image
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (b_size,), device=latents.device)
timesteps = timesteps.long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
# Predict the noise residual
with accelerator.autocast():
noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states=encoder_hidden_states).sample
if args.v_parameterization:
# v-parameterization training
target = noise_scheduler.get_velocity(latents, noise, timesteps)
else:
target = noise
loss = torch.nn.functional.mse_loss(noise_pred.float(), target.float(), reduction="none")
loss = loss.mean([1, 2, 3])
loss_weights = batch["loss_weights"] # 各sampleごとのweight
loss = loss * loss_weights
if args.min_snr_gamma:
loss = apply_snr_weight(loss, timesteps, noise_scheduler, args.min_snr_gamma)
if args.scale_v_pred_loss_like_noise_pred: | loss = scale_v_prediction_loss_like_noise_prediction(loss, timesteps, noise_scheduler) | 6 | 2023-12-30 07:46:35+00:00 | 12k |
Hatins/DEOE | modules/detection.py | [
{
"identifier": "ObjectLabels",
"path": "data/genx_utils/labels.py",
"snippet": "class ObjectLabels(ObjectLabelBase):\n def __init__(self,\n object_labels: th.Tensor,\n input_size_hw: Tuple[int, int]):\n super().__init__(object_labels=object_labels, input_size_h... | from typing import Any, Optional, Tuple, Union, Dict
from warnings import warn
from omegaconf import DictConfig
from pytorch_lightning.utilities.types import STEP_OUTPUT
from data.genx_utils.labels import ObjectLabels
from data.utils.types import DataType, LstmStates, ObjDetOutput, DatasetSamplingMode
from models.detection.yolox.utils.boxes import postprocess
from models.detection.yolox_extension.models.detector import YoloXDetector
from utils.evaluation.prophesee.evaluator import PropheseeEvaluator
from utils.evaluation.prophesee.io.box_loading import to_prophesee
from utils.padding import InputPadderFromShape
from .utils.detection import BackboneFeatureSelector, EventReprSelector, RNNStates, REGStates, Mode, mode_2_string, \
merge_mixed_batches
import numpy as np
import pytorch_lightning as pl
import torch
import torch as th
import torch.distributed as dist
import os
import cv2
import ipdb | 8,815 | retrieve_detections: bool = True,
targets=None) \
-> Tuple[Union[th.Tensor, None], Union[Dict[str, th.Tensor], None], LstmStates]:
return self.mdl(x=event_tensor,
previous_states=previous_states,
retrieve_detections=retrieve_detections,
targets=targets)
def get_worker_id_from_batch(self, batch: Any) -> int:
return batch['worker_id']
def get_data_from_batch(self, batch: Any):
return batch['data']
def vis_and_save_image(self, ev_pr, label, pred, unseen_classes,
save_dir = '/home/zht/python_project/RVT_CAOD_v9/save_img/', threshold = 0.3, topn = 10):
files = os.listdir(save_dir)
index = len(files)
ev_pr = ev_pr.to('cpu')
assert ev_pr.shape[0] % 2 == 0
num_bins = int(ev_pr.shape[0] / 2)
height = int(ev_pr.shape[1])
width = int(ev_pr.shape[2])
ev_pr = ev_pr.permute(1, 2, 0)
ev_pr = ev_pr.numpy()
frame = np.zeros((height, width, 3), dtype=np.uint8)
for i in range(num_bins):
pos_image = (ev_pr[:, :, i + num_bins]).astype(np.uint8)
neg_image = (ev_pr[:, :, i]).astype(np.uint8)
pos_image = cv2.equalizeHist(pos_image)
neg_image = cv2.equalizeHist(neg_image)
image = np.concatenate((neg_image[..., None], np.zeros((height, width, 1), dtype=np.uint8), pos_image[..., None]), axis=-1)
frame = np.add(frame, image)
frame = frame * 255.0
frame_copy = frame.copy()
# topn = label.shape[0]
fix_num_threshold = np.partition(pred['class_confidence'], -topn)[-topn]
if fix_num_threshold > threshold:
pass
else:
threshold = fix_num_threshold
mask = pred['class_confidence'] > threshold
pred = pred[mask]
for item in pred:
x, y, w, h = item['x'], item['y'], item['w'], item['h']
left = int(x)
top = int(y)
right = int(x + w)
bottom = int(y + h)
cv2.rectangle(frame, (left, top), (right, bottom), (255, 250, 250), 1)
for item in label:
x, y, w, h = item['x'], item['y'], item['w'], item['h']
class_id = item['class_id']
left = int(x)
top = int(y)
right = int(x + w)
bottom = int(y + h)
center = ((left + right) // 2, (top + bottom) // 2)
if class_id in unseen_classes:
color = (255, 165, 0)
cv2.putText(frame_copy, str(class_id), (center[0], bottom - 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
else:
color = (0, 255, 0)
cv2.rectangle(frame_copy, (left, top), (right, bottom), color, 1)
stacked_image = cv2.hconcat([frame, frame_copy])
save_path = save_dir + '{}.png'.format(index)
cv2.imwrite(save_path, stacked_image)
def concatenate_tensors(self, tensor1, tensor2, order1, order2):
D1 = tensor1.shape[0]
D2 = tensor2.shape[0]
D = D1 + D2
result_shape = (D,) + tensor1.shape[1:]
result = torch.zeros(result_shape, dtype=tensor1.dtype).to(tensor1.device)
for i, idx in enumerate(order1):
result[idx] = tensor1[i]
for i, idx in enumerate(order2):
result[idx] = tensor2[i]
return result
def subtract_lists(self, listA: list, listB: list) -> list:
return [x for x in listA if x not in listB]
def merge_dicts_and_average(self, dicts_list: list):
result_dict = {}
num_dicts = len(dicts_list)
for d in dicts_list:
for key, value in d.items():
if key in result_dict:
result_dict[key] += value
else:
result_dict[key] = value
for key in result_dict:
result_dict[key] /= num_dicts
return result_dict
def training_step(self, batch: Any, batch_idx: int) -> STEP_OUTPUT:
batch = merge_mixed_batches(batch)
data = self.get_data_from_batch(batch)
worker_id = self.get_worker_id_from_batch(batch)
mode = Mode.TRAIN
self.started_training = True
step = self.trainer.global_step
|
def remove_elements(ori_items, moving_items):
return [elem for elem in ori_items if elem not in moving_items]
class Module(pl.LightningModule):
def __init__(self, full_config: DictConfig):
super().__init__()
self.full_config = full_config
self.mdl_config = full_config.model
in_res_hw = tuple(self.mdl_config.backbone.in_res_hw)
self.input_padder = InputPadderFromShape(desired_hw=in_res_hw)
self.mdl = YoloXDetector(self.mdl_config)
self.mode_2_rnn_states: Dict[Mode, RNNStates] = {
Mode.TRAIN: RNNStates(),
Mode.VAL: RNNStates(),
Mode.TEST: RNNStates(),
}
self.reg_states = REGStates()
def setup(self, stage: Optional[str] = None) -> None:
dataset_name = self.full_config.dataset.name
self.mode_2_hw: Dict[Mode, Optional[Tuple[int, int]]] = {}
self.mode_2_batch_size: Dict[Mode, Optional[int]] = {}
self.mode_2_psee_evaluator: Dict[Mode, Optional[PropheseeEvaluator]] = {}
self.mode_2_sampling_mode: Dict[Mode, DatasetSamplingMode] = {}
self.started_training = True
dataset_train_sampling = self.full_config.dataset.train.sampling
dataset_eval_sampling = self.full_config.dataset.eval.sampling
assert dataset_train_sampling in iter(DatasetSamplingMode)
assert dataset_eval_sampling in (DatasetSamplingMode.STREAM, DatasetSamplingMode.RANDOM)
if stage == 'fit': # train + val
self.training_classes = self.full_config.dataset.training_classes
self.unseen_classes = self.full_config.dataset.unseen_classes
self.testing_classes = self.full_config.dataset.testing_classes
self.train_config = self.full_config.training
self.train_metrics_config = self.full_config.logging.train.metrics
if self.train_metrics_config.compute:
self.mode_2_psee_evaluator[Mode.TRAIN] = PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2)
#We set two evaluator, one (0) for unseen classes and one (1) for all classes
self.mode_2_psee_evaluator[Mode.VAL] = [PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2),
PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2)
]
self.mode_2_sampling_mode[Mode.TRAIN] = dataset_train_sampling
self.mode_2_sampling_mode[Mode.VAL] = dataset_eval_sampling
for mode in (Mode.TRAIN, Mode.VAL):
self.mode_2_hw[mode] = None
self.mode_2_batch_size[mode] = None
self.started_training = False
elif stage == 'validate':
self.unseen_classes = self.full_config.dataset.unseen_classes
self.testing_classes = self.full_config.dataset.testing_classes
mode = Mode.VAL
self.mode_2_psee_evaluator[mode] = [PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2),
PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2)
]
self.mode_2_sampling_mode[Mode.VAL] = dataset_eval_sampling
self.mode_2_hw[mode] = None
self.mode_2_batch_size[mode] = None
elif stage == 'test':
mode = Mode.TEST
self.unseen_classes = self.full_config.dataset.unseen_classes
self.testing_classes = self.full_config.dataset.testing_classes
self.mode_2_psee_evaluator[Mode.TEST] = [PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2),
PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2)
]
self.mode_2_sampling_mode[Mode.TEST] = dataset_eval_sampling
self.mode_2_hw[mode] = None
self.mode_2_batch_size[mode] = None
else:
raise NotImplementedError
def forward(self,
event_tensor: th.Tensor,
previous_states: Optional[LstmStates] = None,
retrieve_detections: bool = True,
targets=None) \
-> Tuple[Union[th.Tensor, None], Union[Dict[str, th.Tensor], None], LstmStates]:
return self.mdl(x=event_tensor,
previous_states=previous_states,
retrieve_detections=retrieve_detections,
targets=targets)
def get_worker_id_from_batch(self, batch: Any) -> int:
return batch['worker_id']
def get_data_from_batch(self, batch: Any):
return batch['data']
def vis_and_save_image(self, ev_pr, label, pred, unseen_classes,
save_dir = '/home/zht/python_project/RVT_CAOD_v9/save_img/', threshold = 0.3, topn = 10):
files = os.listdir(save_dir)
index = len(files)
ev_pr = ev_pr.to('cpu')
assert ev_pr.shape[0] % 2 == 0
num_bins = int(ev_pr.shape[0] / 2)
height = int(ev_pr.shape[1])
width = int(ev_pr.shape[2])
ev_pr = ev_pr.permute(1, 2, 0)
ev_pr = ev_pr.numpy()
frame = np.zeros((height, width, 3), dtype=np.uint8)
for i in range(num_bins):
pos_image = (ev_pr[:, :, i + num_bins]).astype(np.uint8)
neg_image = (ev_pr[:, :, i]).astype(np.uint8)
pos_image = cv2.equalizeHist(pos_image)
neg_image = cv2.equalizeHist(neg_image)
image = np.concatenate((neg_image[..., None], np.zeros((height, width, 1), dtype=np.uint8), pos_image[..., None]), axis=-1)
frame = np.add(frame, image)
frame = frame * 255.0
frame_copy = frame.copy()
# topn = label.shape[0]
fix_num_threshold = np.partition(pred['class_confidence'], -topn)[-topn]
if fix_num_threshold > threshold:
pass
else:
threshold = fix_num_threshold
mask = pred['class_confidence'] > threshold
pred = pred[mask]
for item in pred:
x, y, w, h = item['x'], item['y'], item['w'], item['h']
left = int(x)
top = int(y)
right = int(x + w)
bottom = int(y + h)
cv2.rectangle(frame, (left, top), (right, bottom), (255, 250, 250), 1)
for item in label:
x, y, w, h = item['x'], item['y'], item['w'], item['h']
class_id = item['class_id']
left = int(x)
top = int(y)
right = int(x + w)
bottom = int(y + h)
center = ((left + right) // 2, (top + bottom) // 2)
if class_id in unseen_classes:
color = (255, 165, 0)
cv2.putText(frame_copy, str(class_id), (center[0], bottom - 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
else:
color = (0, 255, 0)
cv2.rectangle(frame_copy, (left, top), (right, bottom), color, 1)
stacked_image = cv2.hconcat([frame, frame_copy])
save_path = save_dir + '{}.png'.format(index)
cv2.imwrite(save_path, stacked_image)
def concatenate_tensors(self, tensor1, tensor2, order1, order2):
D1 = tensor1.shape[0]
D2 = tensor2.shape[0]
D = D1 + D2
result_shape = (D,) + tensor1.shape[1:]
result = torch.zeros(result_shape, dtype=tensor1.dtype).to(tensor1.device)
for i, idx in enumerate(order1):
result[idx] = tensor1[i]
for i, idx in enumerate(order2):
result[idx] = tensor2[i]
return result
def subtract_lists(self, listA: list, listB: list) -> list:
return [x for x in listA if x not in listB]
def merge_dicts_and_average(self, dicts_list: list):
result_dict = {}
num_dicts = len(dicts_list)
for d in dicts_list:
for key, value in d.items():
if key in result_dict:
result_dict[key] += value
else:
result_dict[key] = value
for key in result_dict:
result_dict[key] /= num_dicts
return result_dict
def training_step(self, batch: Any, batch_idx: int) -> STEP_OUTPUT:
batch = merge_mixed_batches(batch)
data = self.get_data_from_batch(batch)
worker_id = self.get_worker_id_from_batch(batch)
mode = Mode.TRAIN
self.started_training = True
step = self.trainer.global_step | ev_tensor_sequence = data[DataType.EV_REPR] | 1 | 2023-12-29 04:04:34+00:00 | 12k |
Wangyuhao06/2022-adhoc | main.py | [
{
"identifier": "Environment",
"path": "src/env.py",
"snippet": "class Environment():\n #初始化环境\n def __init__(self):\n #初始数据-最大节点数\n self.node_max=NODE_MAX\n self.node_space_size=NODE_MAX\n self.node_moving_area=MOV_AREA\n #初始化二维平面\n self.geo_area = rando... | from src.env import Environment
from src.node import Node
from src.packet import Packet
from src.transtask import Trans_task
from src.DGN import DGN,DPG
from src.parameter import *
from src.buffereplay import ReplayBuffer
from queue import Queue
import math
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F | 9,342 | os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# import matplotlib.pyplot as plt
USE_CUDA = torch.cuda.is_available()
print(USE_CUDA)
| os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# import matplotlib.pyplot as plt
USE_CUDA = torch.cuda.is_available()
print(USE_CUDA)
| env=Environment() | 0 | 2023-12-30 09:35:30+00:00 | 12k |
alshubati99/BeamEye | uiElements/uiHandler 3.py | [
{
"identifier": "TkinterVideo",
"path": "uiElements/tkVideoPlayer.py",
"snippet": "class TkinterVideo(tk.Label):\n\n\tdef __init__(self, master, scaled: bool = True, consistant_frame_rate: bool = True, keep_aspect: bool = False,\n\t\t\t\t *args, **kwargs):\n\t\tsuper(TkinterVideo, self).__init__(master,... | import os.path
import shutil
import tkinter as tk
import customtkinter as ctk
import threading
import cv2
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
from tkinter import PhotoImage, filedialog, messagebox
from uiElements.tkVideoPlayer import TkinterVideo
from uiElements.SettingsWindow import open_settings_window, settings_inherit_root
from time import sleep
from pathlib import Path
from shutil import move
from PIL import Image, ImageTk | 9,924 | if bool(include_crowd):
current_crowd_number_off.place_forget()
max_crowd_number_off.place_forget()
current_crowd_number.place(x=480 / 1300 * video_canvas.winfo_width(),
y=600 / 750 * video_canvas.winfo_height())
current_crowd_number.configure(text_color=color_dict[crowd_color])
mc = User.crowd_count_second.index(max(User.crowd_count_second))
max_crowd_number.configure(text_color=color_dict[crowd_color])
max_crowd_number.configure(
text=f"{seconds_to_hhmmss(mc - 1 if mc > 1 else mc)} "
f"- {seconds_to_hhmmss(mc + 1 if mc > 1 else mc + 2)}"
f" ({max(User.crowd_count_second)})")
max_crowd_number.place(x=885 / 1300 * root.winfo_width(), y=600 / 750 * root.winfo_height())
else:
current_crowd_number.place_forget()
max_crowd_number.place_forget()
current_crowd_number_off.place(x=480 / 1300 * root.winfo_width(), y=600 / 750 * root.winfo_height())
max_crowd_number_off.place(x=885 / 1300 * root.winfo_width(), y=600 / 750 * root.winfo_height())
current_pd_number.configure(text_color=color_dict[pedestrian_color])
mp = User.pedestrian_count_second.index(max(User.pedestrian_count_second))
max_people_number.configure(text_color=color_dict[pedestrian_color])
max_people_number.configure(
text=f"{seconds_to_hhmmss(mp - 1 if mp > 1 else mp)} - "
f"{seconds_to_hhmmss(mp + 1 if mp > 1 else mp + 2)}"
f" ({max(User.pedestrian_count_second)})")
if not video:
video = filedialog.askopenfilename()
video_player.load(video)
progress_slider.configure(to=0, from_=0)
play_pause_button["image"] = play_button_image
progress_value.set(0)
def seek(value):
vid_player.seek(int(value))
vid_player.update()
vid_player2.seek(int(value))
vid_player2.update()
video_canvas.pack(fill="both", expand=True)
current_video_canvas = video_canvas
def resize_video_canvas():
video_canvas.config(width=root.winfo_width(), height=root.winfo_height())
Lato = "Lato"
video_title_label = tk.Label(root, text='Video Title', font=("Lato", int(20 / 750 * root.winfo_width())),
foreground="white", background="#051736")
current_timestamp = ctk.CTkLabel(root, text="00:00:00", font=("Lato", int(25 / 750 * root.winfo_width())),
fg_color="#051635", bg_color="#051635",
corner_radius=8)
current_timestamp.place(x=105, y=472)
pil_image2 = Image.open(uiAssets + 'settings.png')
settings_button_image = ImageTk.PhotoImage(pil_image2)
label_a = ctk.CTkLabel(root, text="Max # of People/sec: ", font=("Lato", int(20 / 750 * root.winfo_width())),
fg_color="transparent",
bg_color="#051635", text_color="white",
corner_radius=8) # .place(x=225, y=550)
current_pd_number = ctk.CTkLabel(root, text="", font=("Lato", int(30 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#30A8E6")
current_pd_number.place(x=440, y=545)
label_b = ctk.CTkLabel(root, text="Max # of Crowds/sec: ", font=("Lato", int(20 / 750 * root.winfo_width())),
fg_color="transparent",
bg_color="#051635", text_color="white",
corner_radius=8) # .place(x=225, y=600)
current_crowd_number = ctk.CTkLabel(root, text="", font=("Lato", int(30 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#30A8E6")
current_crowd_number_off = ctk.CTkLabel(root, text="Off", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#FF8484")
label_c = ctk.CTkLabel(root, text="Max # of Crowds at: ", font=("Lato", int(20 / 750 * root.winfo_width())),
fg_color="transparent",
bg_color="#051635", text_color="white",
corner_radius=8) # .place(x=650, y=600)
max_crowd_number = ctk.CTkLabel(root, text="", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#30A8E6")
current_crowd_number_off = ctk.CTkLabel(root, text="Off", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#FF8484")
label_d = ctk.CTkLabel(root, text="Max # of People at: ", font=("Lato", int(20 / 750 * root.winfo_width())),
fg_color="transparent",
bg_color="#051635", text_color="white",
corner_radius=8) # .place(x=650, y=550)
max_people_number = ctk.CTkLabel(root, text="", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#30A8E6")
max_crowd_number_off = ctk.CTkLabel(root, text="Off", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#FF8484")
# settings_button_image = PhotoImage(file=uiAssets + 'settings.png')
settings_open_button = tk.Button(video_canvas,
image=settings_button_image,
border=0,
anchor='n',
background="#031027",
activebackground="#031027",
|
input_video_path = ""
thread_crowd, thread_people, threads_started = threading.Thread, threading.Thread, False
current_pd_number_color, current_crowd_number_color = None, None
parent = Path(__file__).resolve().parent
# if called from uiHandler will return uiElements
# if called from BeamEye.py will return GP
# we need GP//uiAssets path for ui assets
# following block is to get path to folder of the app (GP), whatever its (new) name is
# and add \\uiuAssets\\ to it
# if the parent folder isn't GP ==> a sub-folder of GP
while not os.path.isdir(str(parent) + '\\uiAssets\\'):
# go back to its parent
parent = parent.parent
GP_path = parent
uiAssets = str(GP_path) + '\\uiAssets\\'
root = tk.Tk()
root.title("BeamEye")
root.iconbitmap(uiAssets + "logo.ico")
# UI has too many elements to control during resizing, especially during video
# playback, we get screen size and base the app window on a smaller area
# before resizing is disabled.
# getting user screen size, change values to test different screen sizes
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
# define window size percentage, max is 1 == screen size
resize_ratio = .75
# setting window size 75% screen size
width, height = int(screen_width * resize_ratio), int((screen_width * resize_ratio) * 9 / 16)
# keeping 16:9 aspect ratio to match videos' placeholders
root.geometry(f"{int(screen_width * resize_ratio)}x{int((screen_width * resize_ratio) * 9 / 16)}")
# disable resizing
root.resizable(False, False)
root.configure(bg="black")
# root.geometry(f"{width}x{height}")
pc = "#30A8E6"
ended = False
crowd_is_included = None
progressbar, progressbar_progress, progressbar_placeholder_label = ctk.CTkProgressBar, 0, tk.Label
current_loading_canvas, current_video_canvas = tk.Canvas, tk.Canvas
# background_image_hello = PhotoImage(file=uiAssets + 'home2.png')
pedestrian_count_second, crowd_count_second = [], []
new_video = False
def set_aspect_ratio():
s_width = root.winfo_screenwidth()
s_height = root.winfo_screenheight()
# Initial aspect ratio adjustment
new_width = root.winfo_width()
new_height = int(new_width * 9 / 16)
# If height exceeds screen, adjust width based on screen height
if new_height > s_height:
new_height = s_height
new_width = int(new_height * 16 / 9)
# If width now exceeds screen, reduce both to fit within screen
if new_width > s_width:
new_width = s_width
new_height = int(new_width * 9 / 16)
# Apply the new dimensions
root.geometry(f"{new_width}x{new_height}")
def new_coordinates(old_x, old_y, old_width=None, old_height=None):
window_width, window_height = root.winfo_width(), root.winfo_height()
new_x = old_x * window_width / 1300
new_y = old_y * window_height / 750
if old_width is not None:
new_width = old_width * window_width / 1300
new_height = old_height * window_width / 750
return new_x, new_y, new_width, new_height
return new_x, new_y
def open_hello_window():
global current_canvas, main_root, w, h
# upload canvas
img_ = Image.open(uiAssets + 'home2.png')
resized_image_ = img_.resize((root.winfo_width(), root.winfo_height()))
tk_image_ = ImageTk.PhotoImage(resized_image_)
background_image_hello = tk_image_
hello_canvas = tk.Canvas(root, width=root.winfo_width() - 4, height=root.winfo_width() - 10)
current_canvas = hello_canvas
hello_canvas.place(x=0, y=0)
hello_canvas.create_image(root.winfo_width() / 2, root.winfo_height() / 2, image=background_image_hello, anchor="c")
# settings in upload window
progressbar_placeholder = ctk.CTkProgressBar(master=hello_canvas, height=20,
width=400, bg_color="#041632", fg_color="#041632",
progress_color="#30A8E6", border_color="#30A8E6",
border_width=2, indeterminate_speed=0.01, mode='determinate'
)
progressbar_placeholder.place(x=root.winfo_width() / 2 - 200, y=root.winfo_height() / 2 + 60)
progressbar_placeholder.set(0)
# settings canvas
def wait_for_tenserflow_import():
sleep(1)
for _ in range(7): # takes around 7 seconds to import tensorflow
for __ in range(7): # each .step() increases the bar by 2%, 7x7x2 = 98% of the bar after 7 seconds
progressbar_placeholder.step()
sleep(1 / 7)
progressbar_placeholder.set(1) # set the bar to 100%
sleep(1)
hello_canvas.destroy()
return
threading.Thread(target=wait_for_tenserflow_import).start()
def seconds_to_hhmmss(seconds):
hours, remainder = divmod(seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return "{:02d}:{:02d}:{:02d}".format(int(hours), int(minutes), int(seconds))
def update_current_timestamp(stamp: ctk.CTkLabel, timestamp: str): # ,
stamp.configure(text=timestamp)
pass
video_end = False
current_canvas = None
main_root, w, h = None, 0, 0
def open_video_window():
global root, current_video_canvas
# threading.Thread(target=open_hello_window).start()
video_canvas = tk.Canvas(root, bg="#051735")
settings_inherit_root(root)
img = Image.open(uiAssets + 'blurred.png')
resized_image = img.resize((root.winfo_width(), root.winfo_height()))
tk_image = ImageTk.PhotoImage(resized_image)
background_image_loading = tk_image
def open_load_window():
global progressbar, progressbar_progress, \
progressbar_placeholder_label, current_loading_canvas, \
current_video_canvas, ended, input_video_path
bg_color = "#031532"
loading_canvas = ctk.CTkCanvas(root, width=root.winfo_width() - 4, height=root.winfo_height() - 4,
bg=bg_color) # , bg="#031532")
loading_canvas.place(x=0, y=0)
current_loading_canvas = loading_canvas
loading_canvas.create_image(0, 0, image=background_image_loading, anchor="nw", )
progressbar = ctk.CTkProgressBar(master=loading_canvas, height=int(20 * root.winfo_height() / 750),
width=int(400 * root.winfo_width() / 1300), bg_color="#3C3E46",
fg_color="#4A4C51", # bg_color: for corner
# edges, fg_color inside the bar (inactive part)
progress_color="#49FF3F", border_color="#49FF3F",
border_width=2, indeterminate_speed=0.01, mode='determinate'
)
progressbar.set(0)
loading_font = ("Lato", int(40 / 750 * root.winfo_height()))
progressbar_placeholder_label = tk.Label(loading_canvas, text='Waking Up The Robot', font=loading_font,
foreground="white",
background="#031532")
canvas_width = root.winfo_width()
canvas_height = root.winfo_height()
# Calculate the position (center horizontally, 0.3 vertically)
x_p = (canvas_width - progressbar_placeholder_label.winfo_reqwidth()) / 2
y_p = canvas_height * 0.3
# Place the label using the place method
progressbar_placeholder_label.place(x=x_p, y=y_p)
p1 = tk.Label(loading_canvas, text='Extracting Video Frames...', font=loading_font, foreground="white",
background=bg_color)
p2 = tk.Label(loading_canvas, text='Processing The Frames...', font=loading_font, foreground="white",
background=bg_color)
p3 = tk.Label(loading_canvas, text='Putting The Frames Back Together', font=loading_font, foreground="white",
background=bg_color)
p4 = tk.Label(loading_canvas, text='Almost There', font=loading_font, foreground="white",
background=bg_color)
p5 = tk.Label(loading_canvas, text='All Set!', font=loading_font, foreground="#49FF3F",
background=bg_color)
progress_feedback = [p1, p2, p3, p4, p5]
def stepper(fill=False):
# full bar is 100%
# each step is 2%
global progressbar, progressbar_progress, progressbar_placeholder_label, ended
if ended:
pass
progressbar_placeholder_label.place_forget()
x_position_p = (loading_canvas.winfo_width() - progressbar.winfo_reqwidth()) / 2
progressbar.place(x=x_position_p, y=root.winfo_height() / 2 + 60)
progressbar_progress += 2
div = 100 // (len(progress_feedback) - 1)
canvas_width_here = loading_canvas.winfo_width()
canvas_height_here = loading_canvas.winfo_height()
# Calculate the position (center horizontally, 0.3 vertically)
x_position = (canvas_width_here - progress_feedback[progressbar_progress // div].winfo_reqwidth()) / 2
y_position = canvas_height_here * 0.3
# Place the label using the place method
progress_feedback[progressbar_progress // div].place(x=x_position, y=y_position)
progress_feedback[progressbar_progress // div - 1].place_forget()
progressbar.step()
if fill:
progressbar.set(1)
progressbar.place_forget()
progress_feedback[-2].place_forget()
x_position = (canvas_width_here - progress_feedback[-1].winfo_reqwidth()) / 2
y_position = canvas_height_here * 0.4
# Place the label using the place method
progress_feedback[-1].place(x=x_position, y=y_position)
ended = True
sleep(2)
progressbar_progress = 0
User.frames_progress = 0
load_video(User.input_video_path, vid_player)
load_video(User.output_video, vid_player2)
loading_canvas.destroy()
root.maxsize()
User.finished = False
return
def fill_pb():
global ended
sleep(1)
old_progress = 0
while User.frames_progress != 100:
for _ in range(old_progress, User.frames_progress, 2):
stepper()
old_progress = User.frames_progress
sleep(.1)
while not User.finished:
sleep(.5)
else:
stepper(fill=True)
ended = False
return
threading.Thread(target=fill_pb).start()
def upload_button_func():
global input_video_path, new_video, thread_people, thread_crowd, threads_started
input_video_path = filedialog.askopenfilename(initialdir=str(Path(__file__).resolve().parent.parent),
filetypes=[("Videos", "*.mp4")])
if not input_video_path:
return
nonlocal video_title_label
new_video = True
sleep(0.002)
if (thread_people is not None and thread_crowd is not None) and threads_started:
thread_people.join()
thread_crowd.join()
threads_started = False
video_title = input_video_path.split("/")[-1]
video_title_label.configure(text=video_title)
video_title_label.place(x=45, y=30)
nonlocal play_pause_button
play_pause_button.place(x=60 * root.winfo_width() / 1300, y=470 * root.winfo_height() / 750 + 10)
if input_video_path:
User.input_video_path = input_video_path
User.wait = False
sleep(0.6)
open_load_window()
def update_duration(event):
nonlocal vid_player, vid_player2
duration = vid_player.video_info()["duration"]
progress_slider["to"] = duration
def play_pause():
global new_video, thread_people, thread_crowd, threads_started
if new_video:
declare_threads()
thread_people.start()
thread_crowd.start()
new_video = False
threads_started = True
nonlocal vid_player, vid_player2
global video_end
if vid_player.is_paused() and vid_player2.is_paused():
threading.Thread(target=vid_player.play).start()
threading.Thread(target=vid_player2.play).start()
play_pause_button["image"] = pause_button_image
else:
vid_player.pause()
vid_player2.pause()
play_pause_button["image"] = play_button_image
def video_ended(event):
nonlocal vid_player, vid_player2, current_timestamp
progress_slider.set(progress_slider["to"])
play_pause_button["image"] = play_button_image
progress_slider.set(0)
current_timestamp.configure(text="00:00:00")
def update_scale(event):
global video_end
nonlocal vid_player, vid_player2
progress_value.set(int(vid_player.current_duration()))
update_current_timestamp(current_timestamp, seconds_to_hhmmss(vid_player.current_duration()))
def load_video(video: str, video_player: TkinterVideo):
nonlocal current_pd_number, current_crowd_number, \
current_crowd_number_off, max_people_number, \
max_crowd_number, max_crowd_number_off
color_dict = {1: "#0094FF", 2: "#FF00F6", 3: "red", 4: "#FF6A00", 5: "yellow",
6: "#26FF5C"} # {1: "blue", 2: "purple", 3: "red", 4: "orange", 5: "yellow", 6: "green"}
with open(str(GP_path) + "\\uiElements\\userSettings.txt", "r") as f:
settings = f.read()
settings = [line.split(" ")[-1] for line in settings.split("\n")]
include_labels, include_crowd, include_accuracy, pedestrian_color, crowd_color, output_path = settings
include_labels, include_crowd, include_accuracy, pedestrian_color, crowd_color = int(include_labels), int(
include_crowd), int(include_accuracy), int(pedestrian_color), int(crowd_color)
global crowd_is_included
crowd_is_included = include_crowd
if bool(include_crowd):
current_crowd_number_off.place_forget()
max_crowd_number_off.place_forget()
current_crowd_number.place(x=480 / 1300 * video_canvas.winfo_width(),
y=600 / 750 * video_canvas.winfo_height())
current_crowd_number.configure(text_color=color_dict[crowd_color])
mc = User.crowd_count_second.index(max(User.crowd_count_second))
max_crowd_number.configure(text_color=color_dict[crowd_color])
max_crowd_number.configure(
text=f"{seconds_to_hhmmss(mc - 1 if mc > 1 else mc)} "
f"- {seconds_to_hhmmss(mc + 1 if mc > 1 else mc + 2)}"
f" ({max(User.crowd_count_second)})")
max_crowd_number.place(x=885 / 1300 * root.winfo_width(), y=600 / 750 * root.winfo_height())
else:
current_crowd_number.place_forget()
max_crowd_number.place_forget()
current_crowd_number_off.place(x=480 / 1300 * root.winfo_width(), y=600 / 750 * root.winfo_height())
max_crowd_number_off.place(x=885 / 1300 * root.winfo_width(), y=600 / 750 * root.winfo_height())
current_pd_number.configure(text_color=color_dict[pedestrian_color])
mp = User.pedestrian_count_second.index(max(User.pedestrian_count_second))
max_people_number.configure(text_color=color_dict[pedestrian_color])
max_people_number.configure(
text=f"{seconds_to_hhmmss(mp - 1 if mp > 1 else mp)} - "
f"{seconds_to_hhmmss(mp + 1 if mp > 1 else mp + 2)}"
f" ({max(User.pedestrian_count_second)})")
if not video:
video = filedialog.askopenfilename()
video_player.load(video)
progress_slider.configure(to=0, from_=0)
play_pause_button["image"] = play_button_image
progress_value.set(0)
def seek(value):
vid_player.seek(int(value))
vid_player.update()
vid_player2.seek(int(value))
vid_player2.update()
video_canvas.pack(fill="both", expand=True)
current_video_canvas = video_canvas
def resize_video_canvas():
video_canvas.config(width=root.winfo_width(), height=root.winfo_height())
Lato = "Lato"
video_title_label = tk.Label(root, text='Video Title', font=("Lato", int(20 / 750 * root.winfo_width())),
foreground="white", background="#051736")
current_timestamp = ctk.CTkLabel(root, text="00:00:00", font=("Lato", int(25 / 750 * root.winfo_width())),
fg_color="#051635", bg_color="#051635",
corner_radius=8)
current_timestamp.place(x=105, y=472)
pil_image2 = Image.open(uiAssets + 'settings.png')
settings_button_image = ImageTk.PhotoImage(pil_image2)
label_a = ctk.CTkLabel(root, text="Max # of People/sec: ", font=("Lato", int(20 / 750 * root.winfo_width())),
fg_color="transparent",
bg_color="#051635", text_color="white",
corner_radius=8) # .place(x=225, y=550)
current_pd_number = ctk.CTkLabel(root, text="", font=("Lato", int(30 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#30A8E6")
current_pd_number.place(x=440, y=545)
label_b = ctk.CTkLabel(root, text="Max # of Crowds/sec: ", font=("Lato", int(20 / 750 * root.winfo_width())),
fg_color="transparent",
bg_color="#051635", text_color="white",
corner_radius=8) # .place(x=225, y=600)
current_crowd_number = ctk.CTkLabel(root, text="", font=("Lato", int(30 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#30A8E6")
current_crowd_number_off = ctk.CTkLabel(root, text="Off", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#FF8484")
label_c = ctk.CTkLabel(root, text="Max # of Crowds at: ", font=("Lato", int(20 / 750 * root.winfo_width())),
fg_color="transparent",
bg_color="#051635", text_color="white",
corner_radius=8) # .place(x=650, y=600)
max_crowd_number = ctk.CTkLabel(root, text="", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#30A8E6")
current_crowd_number_off = ctk.CTkLabel(root, text="Off", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#FF8484")
label_d = ctk.CTkLabel(root, text="Max # of People at: ", font=("Lato", int(20 / 750 * root.winfo_width())),
fg_color="transparent",
bg_color="#051635", text_color="white",
corner_radius=8) # .place(x=650, y=550)
max_people_number = ctk.CTkLabel(root, text="", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#30A8E6")
max_crowd_number_off = ctk.CTkLabel(root, text="Off", font=("Lato", int(20 / 750 * root.winfo_width()), "bold"),
fg_color="transparent",
bg_color="#051635", corner_radius=8,
text_color="#FF8484")
# settings_button_image = PhotoImage(file=uiAssets + 'settings.png')
settings_open_button = tk.Button(video_canvas,
image=settings_button_image,
border=0,
anchor='n',
background="#031027",
activebackground="#031027", | command=open_settings_window | 1 | 2023-12-26 18:39:25+00:00 | 12k |
camenduru/MotionCtrl-hf | app.py | [
{
"identifier": "CAMERA_MOTION_MODE",
"path": "gradio_utils/camera_utils.py",
"snippet": "CAMERA_MOTION_MODE = [\"Basic Camera Poses\", \"Provided Complex Camera Poses\", \"Custom Camera Poses\"]"
},
{
"identifier": "process_camera",
"path": "gradio_utils/camera_utils.py",
"snippet": "de... | import argparse
import os
import tempfile
import cv2
import gradio as gr
import imageio
import numpy as np
import torch
import torchvision
from functools import partial
from omegaconf import OmegaConf
from PIL import Image
from pytorch_lightning import seed_everything
from gradio_utils.camera_utils import CAMERA_MOTION_MODE, process_camera
from gradio_utils.traj_utils import (OBJECT_MOTION_MODE, get_provided_traj,
process_points, process_traj)
from gradio_utils.utils import vis_camera
from lvdm.models.samplers.ddim import DDIMSampler
from main.evaluation.motionctrl_inference import (DEFAULT_NEGATIVE_PROMPT,
load_model_checkpoint,
post_prompt)
from utils.utils import instantiate_from_config | 7,950 | BASE_MODEL = ['LVDM/VideoCrafter', 'AnimateDiff', 'SVD']
traj_list = []
camera_dict = {
"motion":[],
"mode": "Customized Mode 1: First A then B", # "First A then B", "Both A and B", "Custom"
"speed": 1.0,
"complex": None
}
def fn_vis_camera(info_mode):
global camera_dict
RT = process_camera(camera_dict) # [t, 3, 4]
if camera_dict['complex'] is not None:
# rescale T to [-2,2]
for i in range(3):
min_T = np.min(RT[:,i,-1])
max_T = np.max(RT[:,i,-1])
if min_T < -2 or max_T > 2:
RT[:,i,-1] = RT[:,i,-1] - min_T
RT[:,i,-1] = RT[:,i,-1] / (np.max(RT[:,:,-1]) + 1e-6)
RT[:,i,-1] = RT[:,i,-1] * 4
RT[:,i,-1] = RT[:,i,-1] - 2
fig = vis_camera(RT)
if info_mode == MODE[0]:
vis_step3_prompt_generate = True
vis_prompt = True
vis_num_samples = True
vis_seed = True
vis_start = True
vis_gen_video = True
vis_object_mode = False
vis_object_info = False
else:
vis_step3_prompt_generate = False
vis_prompt = False
vis_num_samples = False
vis_seed = False
vis_start = False
vis_gen_video = False
vis_object_mode = True
vis_object_info = True
return fig, \
gr.update(visible=vis_object_mode), \
gr.update(visible=vis_object_info), \
gr.update(visible=vis_step3_prompt_generate), \
gr.update(visible=vis_prompt), \
gr.update(visible=vis_num_samples), \
gr.update(visible=vis_seed), \
gr.update(visible=vis_start), \
gr.update(visible=vis_gen_video, value=None)
def fn_vis_traj():
global traj_list
xy_range = 1024
points = process_points(traj_list)
imgs = []
for idx in range(16):
bg_img = np.ones((1024, 1024, 3), dtype=np.uint8) * 255
for i in range(15):
p = points[i]
p1 = points[i+1]
cv2.line(bg_img, p, p1, (255, 0, 0), 2)
if i == idx:
cv2.circle(bg_img, p, 2, (0, 255, 0), 20)
if idx==(15):
cv2.circle(bg_img, points[-1], 2, (0, 255, 0), 20)
imgs.append(bg_img.astype(np.uint8))
# size = (512, 512)
fps = 10
path = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
writer = imageio.get_writer(path, format='mp4', mode='I', fps=fps)
for img in imgs:
writer.append_data(img)
writer.close()
vis_step3_prompt_generate = True
vis_prompt = True
vis_num_samples = True
vis_seed = True
vis_start = True
vis_gen_video = True
return path, gr.update(visible=vis_step3_prompt_generate), \
gr.update(visible=vis_prompt), \
gr.update(visible=vis_num_samples), \
gr.update(visible=vis_seed), \
gr.update(visible=vis_start), \
gr.update(visible=vis_gen_video, value=None)
def display_camera_info(camera_dict, camera_mode=None):
if camera_dict['complex'] is not None:
res = f"complex : {camera_dict['complex']}. "
else:
res = ""
res += f"motion : {[_ for _ in camera_dict['motion']]}. "
res += f"speed : {camera_dict['speed']}. "
if camera_mode == CAMERA_MOTION_MODE[2]:
res += f"mode : {camera_dict['mode']}. "
return res
def add_traj_point(evt: gr.SelectData, ):
global traj_list
traj_list.append(evt.index)
traj_str = [f"{traj}" for traj in traj_list]
return ", ".join(traj_str)
def add_provided_traj(traj_name):
global traj_list
|
os.environ['KMP_DUPLICATE_LIB_OK']='True'
#### Description ####
title = r"""<h1 align="center">MotionCtrl: A Unified and Flexible Motion Controller for Video Generation</h1>"""
description = r"""
<b>Official Gradio demo</b> for <a href='https://github.com/TencentARC/MotionCtrl' target='_blank'><b>MotionCtrl: A Unified and Flexible Motion Controller for Video Generation</b></a>.<br>
🔥 MotionCtrl is capable of independently and flexibly controling the camera motion and object motion of a generated video, with only a unified model.<br>
🤗 Try to control the motion of the generated videos yourself!<br>
❗❗❗ Please note that current version of **MotionCtrl** is deployed on **LVDM/VideoCrafter**. The versions that depolyed on **AnimateDiff** and **SVD** will be released soon.<br>
"""
article = r"""
If MotionCtrl is helpful, please help to ⭐ the <a href='https://github.com/TencentARC/MotionCtrl' target='_blank'>Github Repo</a>. Thanks!
[](https://github.com/TencentARC/MotionCtrl)
---
📝 **Citation**
<br>
If our work is useful for your research, please consider citing:
```bibtex
@inproceedings{wang2023motionctrl,
title={MotionCtrl: A Unified and Flexible Motion Controller for Video Generation},
author={Wang, Zhouxia and Yuan, Ziyang and Wang, Xintao and Chen, Tianshui and Xia, Menghan and Luo, Ping and Shan, Yin},
booktitle={arXiv preprint arXiv:2312.03641},
year={2023}
}
```
📧 **Contact**
<br>
If you have any questions, please feel free to reach me out at <b>wzhoux@connect.hku.hk</b>.
"""
css = """
.gradio-container {width: 85% !important}
.gr-monochrome-group {border-radius: 5px !important; border: revert-layer !important; border-width: 2px !important; color: black !important;}
span.svelte-s1r2yt {font-size: 17px !important; font-weight: bold !important; color: #d30f2f !important;}
button {border-radius: 8px !important;}
.add_button {background-color: #4CAF50 !important;}
.remove_button {background-color: #f44336 !important;}
.clear_button {background-color: gray !important;}
.mask_button_group {gap: 10px !important;}
.video {height: 300px !important;}
.image {height: 300px !important;}
.video .wrap.svelte-lcpz3o {display: flex !important; align-items: center !important; justify-content: center !important;}
.video .wrap.svelte-lcpz3o > :first-child {height: 100% !important;}
.margin_center {width: 50% !important; margin: auto !important;}
.jc_center {justify-content: center !important;}
"""
T_base = [
[1.,0.,0.], ## W2C x 的正方向: 相机朝左 left
[-1.,0.,0.], ## W2C x 的负方向: 相机朝右 right
[0., 1., 0.], ## W2C y 的正方向: 相机朝上 up
[0.,-1.,0.], ## W2C y 的负方向: 相机朝下 down
[0.,0.,1.], ## W2C z 的正方向: 相机往前 zoom out
[0.,0.,-1.], ## W2C z 的负方向: 相机往前 zoom in
]
radius = 1
n = 16
# step =
look_at = np.array([0, 0, 0.8]).reshape(3,1)
# look_at = np.array([0, 0, 0.2]).reshape(3,1)
T_list = []
base_R = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
res = []
res_forsave = []
T_range = 1.8
for i in range(0, 16):
# theta = (1)*np.pi*i/n
R = base_R[:,:3]
T = np.array([0.,0.,1.]).reshape(3,1) * (i/n)*2
RT = np.concatenate([R,T], axis=1)
res.append(RT)
fig = vis_camera(res)
# MODE = ["camera motion control", "object motion control", "camera + object motion control"]
MODE = ["control camera poses", "control object trajectory", "control both camera and object motion"]
BASE_MODEL = ['LVDM/VideoCrafter', 'AnimateDiff', 'SVD']
traj_list = []
camera_dict = {
"motion":[],
"mode": "Customized Mode 1: First A then B", # "First A then B", "Both A and B", "Custom"
"speed": 1.0,
"complex": None
}
def fn_vis_camera(info_mode):
global camera_dict
RT = process_camera(camera_dict) # [t, 3, 4]
if camera_dict['complex'] is not None:
# rescale T to [-2,2]
for i in range(3):
min_T = np.min(RT[:,i,-1])
max_T = np.max(RT[:,i,-1])
if min_T < -2 or max_T > 2:
RT[:,i,-1] = RT[:,i,-1] - min_T
RT[:,i,-1] = RT[:,i,-1] / (np.max(RT[:,:,-1]) + 1e-6)
RT[:,i,-1] = RT[:,i,-1] * 4
RT[:,i,-1] = RT[:,i,-1] - 2
fig = vis_camera(RT)
if info_mode == MODE[0]:
vis_step3_prompt_generate = True
vis_prompt = True
vis_num_samples = True
vis_seed = True
vis_start = True
vis_gen_video = True
vis_object_mode = False
vis_object_info = False
else:
vis_step3_prompt_generate = False
vis_prompt = False
vis_num_samples = False
vis_seed = False
vis_start = False
vis_gen_video = False
vis_object_mode = True
vis_object_info = True
return fig, \
gr.update(visible=vis_object_mode), \
gr.update(visible=vis_object_info), \
gr.update(visible=vis_step3_prompt_generate), \
gr.update(visible=vis_prompt), \
gr.update(visible=vis_num_samples), \
gr.update(visible=vis_seed), \
gr.update(visible=vis_start), \
gr.update(visible=vis_gen_video, value=None)
def fn_vis_traj():
global traj_list
xy_range = 1024
points = process_points(traj_list)
imgs = []
for idx in range(16):
bg_img = np.ones((1024, 1024, 3), dtype=np.uint8) * 255
for i in range(15):
p = points[i]
p1 = points[i+1]
cv2.line(bg_img, p, p1, (255, 0, 0), 2)
if i == idx:
cv2.circle(bg_img, p, 2, (0, 255, 0), 20)
if idx==(15):
cv2.circle(bg_img, points[-1], 2, (0, 255, 0), 20)
imgs.append(bg_img.astype(np.uint8))
# size = (512, 512)
fps = 10
path = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
writer = imageio.get_writer(path, format='mp4', mode='I', fps=fps)
for img in imgs:
writer.append_data(img)
writer.close()
vis_step3_prompt_generate = True
vis_prompt = True
vis_num_samples = True
vis_seed = True
vis_start = True
vis_gen_video = True
return path, gr.update(visible=vis_step3_prompt_generate), \
gr.update(visible=vis_prompt), \
gr.update(visible=vis_num_samples), \
gr.update(visible=vis_seed), \
gr.update(visible=vis_start), \
gr.update(visible=vis_gen_video, value=None)
def display_camera_info(camera_dict, camera_mode=None):
if camera_dict['complex'] is not None:
res = f"complex : {camera_dict['complex']}. "
else:
res = ""
res += f"motion : {[_ for _ in camera_dict['motion']]}. "
res += f"speed : {camera_dict['speed']}. "
if camera_mode == CAMERA_MOTION_MODE[2]:
res += f"mode : {camera_dict['mode']}. "
return res
def add_traj_point(evt: gr.SelectData, ):
global traj_list
traj_list.append(evt.index)
traj_str = [f"{traj}" for traj in traj_list]
return ", ".join(traj_str)
def add_provided_traj(traj_name):
global traj_list | traj_list = get_provided_traj(traj_name) | 3 | 2023-12-27 19:32:03+00:00 | 12k |
0x00wolf/hkrsAI | hkrsai.py | [
{
"identifier": "fetch_args",
"path": "src/args.py",
"snippet": "def fetch_args():\n \"\"\"Function to handle command-line arguments\"\"\"\n p = argparse.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter,\n prog='hkrsAI.v2',\n description=DESCRIPTION,\n ep... | import sys
import os
import readline
from src.args import fetch_args
from src.pathfinder import PathFinder
from src.client import Client
from src.gpt import GPT
from src.systemprompt import SystemPrompt
from src.conversation import Conversation
from src.action import Action
from src.inputparser import InputParser
from src.dispatcher import Dispatcher
from src.logger import Logger | 7,717 |
HKRSAI = """
1 0 1
1 1 0
0 0 0
hkrsAI.v2
"""
def main():
print(HKRSAI)
args = fetch_args() # command-line arguments
paths = PathFinder(cwd=os.path.dirname(os.path.abspath(__file__)))
parser = InputParser() # Class to parse user input and return Actions.
dispatcher = Dispatcher() # Manages conversation state and turnsActions into functions.
logger = Logger(paths=paths, log_level=args.log_level, log_format=args.log_format)
|
HKRSAI = """
1 0 1
1 1 0
0 0 0
hkrsAI.v2
"""
def main():
print(HKRSAI)
args = fetch_args() # command-line arguments
paths = PathFinder(cwd=os.path.dirname(os.path.abspath(__file__)))
parser = InputParser() # Class to parse user input and return Actions.
dispatcher = Dispatcher() # Manages conversation state and turnsActions into functions.
logger = Logger(paths=paths, log_level=args.log_level, log_format=args.log_format)
| client = Client(config=paths.config) # OpenAI API client management object | 2 | 2023-12-22 07:04:47+00:00 | 12k |
hughxiouge/CompoundE3D | run.py | [
{
"identifier": "KGEModel",
"path": "model.py",
"snippet": "class KGEModel(nn.Module):\n def __init__(self, model_name, nentity, nrelation, hidden_dim, gamma, evaluator,\n double_entity_embedding=False, \n double_relation_embedding=False, triple_relation_embedding=Fals... | import argparse
import json
import logging
import os
import random
import numpy as np
import torch
import time
import os.path as osp
from torch.utils.data import DataLoader
from model import KGEModel
from dataloader import TrainDataset
from dataloader import BidirectionalOneShotIterator
from ogb.linkproppred import LinkPropPredDataset, Evaluator
from collections import defaultdict
from tqdm import tqdm
from tensorboardX import SummaryWriter | 7,747 |
def main(args):
if (not args.do_train) and (not args.do_valid) and (not args.do_test) and (not args.evaluate_train):
raise ValueError('one of train/val/test mode must be choosed.')
if args.init_checkpoint:
override_config(args)
args.save_path = 'log/%s/%s/%s-%s/%s'%(args.dataset, args.model, args.hidden_dim, args.gamma, time.time()) if args.save_path == None else args.save_path
writer = SummaryWriter(args.save_path)
# Write logs to checkpoint and console
set_logger(args)
dataset = LinkPropPredDataset(name = args.dataset)
split_dict = dataset.get_edge_split()
nentity = dataset.graph['num_nodes']
nrelation = int(max(dataset.graph['edge_reltype'])[0])+1
evaluator = Evaluator(name = args.dataset)
args.nentity = nentity
args.nrelation = nrelation
logging.info('Model: %s' % args.model)
logging.info('Dataset: %s' % args.dataset)
logging.info('#entity: %d' % nentity)
logging.info('#relation: %d' % nrelation)
train_triples = split_dict['train']
logging.info('#train: %d' % len(train_triples['head']))
valid_triples = split_dict['valid']
logging.info('#valid: %d' % len(valid_triples['head']))
test_triples = split_dict['test']
logging.info('#test: %d' % len(test_triples['head']))
logging.info('relation type %s' % args.relation_type)
print('relation type %s' % args.relation_type)
test_set_file = ''
if args.relation_type == '1-1':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/1-1-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/1-1.pt'
elif args.relation_type == '1-n':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/1-n-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/1-n.pt'
elif args.relation_type == 'n-1':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/n-1-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/n-1.pt'
elif args.relation_type == 'n-n':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/n-n-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/n-n.pt'
if test_set_file != '':
if osp.exists(test_set_pre_processed):
test_triples = torch.load(test_set_pre_processed, 'rb')
print("load pre processed test set")
else:
test_triples_new = {}
test_triples_chosen = []
test_triples_new['head'] = []
test_triples_new['relation'] = []
test_triples_new['tail'] = []
test_triples_new['head_neg'] = []
test_triples_new['tail_neg'] = []
f_test = open(test_set_file, "r")
for line in f_test:
h, r, t = line.strip().split('\t')
h, r, t = int(h), int(r), int(t)
test_triples_chosen.append((h, r, t))
f_test.close()
for idx in range(len(test_triples['head'])):
h, r, t = test_triples['head'][idx], test_triples['relation'][idx], test_triples['tail'][idx]
if (h, r, t) in test_triples_chosen:
test_triples_new['head'].append(h)
test_triples_new['relation'].append(r)
test_triples_new['tail'].append(t)
test_triples_new['head_neg'].append(test_triples['head_neg'][idx])
test_triples_new['tail_neg'].append(test_triples['tail_neg'][idx])
print('Saving ...')
torch.save(test_triples_new, test_set_pre_processed, pickle_protocol=4)
test_triples = test_triples_new
logging.info('#test: %d' % len(test_triples['head']))
train_count, train_true_head, train_true_tail = defaultdict(lambda: 4), defaultdict(list), defaultdict(list)
f_train = open("train.txt", "w")
for i in tqdm(range(len(train_triples['head']))):
head, relation, tail = train_triples['head'][i], train_triples['relation'][i], train_triples['tail'][i]
train_count[(head, relation)] += 1
train_count[(tail, -relation-1)] += 1
train_true_head[(relation, tail)].append(head)
train_true_tail[(head, relation)].append(tail)
f_train.write("\t".join([str(head), str(relation), str(tail)]) + '\n')
f_train.close()
kge_model = KGEModel(
model_name=args.model,
nentity=nentity,
nrelation=nrelation,
hidden_dim=args.hidden_dim,
gamma=args.gamma,
double_entity_embedding=args.double_entity_embedding,
double_relation_embedding=args.double_relation_embedding,
triple_relation_embedding=args.triple_relation_embedding,
quad_relation_embedding=args.quad_relation_embedding,
evaluator=evaluator
)
logging.info('Model Parameter Configuration:')
for name, param in kge_model.named_parameters():
logging.info('Parameter %s: %s, require_grad = %s' % (name, str(param.size()), str(param.requires_grad)))
if args.cuda:
kge_model = kge_model.cuda()
if args.do_train:
# Set training dataloader iterator
train_dataloader_head = DataLoader(
| #!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def parse_args(args=None):
parser = argparse.ArgumentParser(
description='Training and Testing Knowledge Graph Embedding Models',
usage='train.py [<args>] [-h | --help]'
)
parser.add_argument('--cuda', action='store_true', help='use GPU')
parser.add_argument('--do_train', action='store_true')
parser.add_argument('--do_valid', action='store_true')
parser.add_argument('--do_test', action='store_true')
parser.add_argument('--evaluate_train', action='store_true', help='Evaluate on training data')
parser.add_argument('--dataset', type=str, default='ogbl-wikikg2', help='dataset name, default to wikikg')
parser.add_argument('--model', default='TransE', type=str)
parser.add_argument('-de', '--double_entity_embedding', action='store_true')
parser.add_argument('-dr', '--double_relation_embedding', action='store_true')
parser.add_argument('-tr', '--triple_relation_embedding', action='store_true')
parser.add_argument('-qr', '--quad_relation_embedding', action='store_true')
parser.add_argument('-n', '--negative_sample_size', default=128, type=int)
parser.add_argument('-d', '--hidden_dim', default=500, type=int)
parser.add_argument('-g', '--gamma', default=12.0, type=float)
parser.add_argument('-adv', '--negative_adversarial_sampling', action='store_true')
parser.add_argument('-a', '--adversarial_temperature', default=1.0, type=float)
parser.add_argument('-b', '--batch_size', default=1024, type=int)
parser.add_argument('-r', '--regularization', default=0.0, type=float)
parser.add_argument('--test_batch_size', default=4, type=int, help='valid/test batch size')
parser.add_argument('--uni_weight', action='store_true',
help='Otherwise use subsampling weighting like in word2vec')
parser.add_argument('-lr', '--learning_rate', default=0.0001, type=float)
parser.add_argument('-cpu', '--cpu_num', default=10, type=int)
parser.add_argument('-init', '--init_checkpoint', default=None, type=str)
parser.add_argument('-save', '--save_path', default=None, type=str)
parser.add_argument('--max_steps', default=100000, type=int)
parser.add_argument('--warm_up_steps', default=None, type=int)
parser.add_argument('--save_checkpoint_steps', default=10000, type=int)
parser.add_argument('--valid_steps', default=10000, type=int)
parser.add_argument('--log_steps', default=100, type=int, help='train log every xx steps')
parser.add_argument('--test_log_steps', default=1000, type=int, help='valid/test log every xx steps')
parser.add_argument('--nentity', type=int, default=0, help='DO NOT MANUALLY SET')
parser.add_argument('--nrelation', type=int, default=0, help='DO NOT MANUALLY SET')
parser.add_argument('--print_on_screen', action='store_true', help='log on screen or not')
parser.add_argument('--ntriples_eval_train', type=int, default=200000, help='number of training triples to evaluate eventually')
parser.add_argument('--neg_size_eval_train', type=int, default=500, help='number of negative samples when evaluating training triples')
parser.add_argument('--relation_type', type=str, default='all', help='1-1, 1-n, n-1, n-n')
return parser.parse_args(args)
def override_config(args):
'''
Override model and data configuration
'''
with open(os.path.join(args.init_checkpoint, 'config.json'), 'r') as fjson:
argparse_dict = json.load(fjson)
args.dataset = argparse_dict['dataset']
args.model = argparse_dict['model']
args.double_entity_embedding = argparse_dict['double_entity_embedding']
args.double_relation_embedding = argparse_dict['double_relation_embedding']
args.triple_relation_embedding = argparse_dict['triple_relation_embedding']
args.quad_relation_embedding = argparse_dict['quad_relation_embedding']
args.hidden_dim = argparse_dict['hidden_dim']
args.test_batch_size = argparse_dict['test_batch_size']
def save_model(model, optimizer, save_variable_list, args):
'''
Save the parameters of the model and the optimizer,
as well as some other variables such as step and learning_rate
'''
argparse_dict = vars(args)
with open(os.path.join(args.save_path, 'config.json'), 'w') as fjson:
json.dump(argparse_dict, fjson)
entity_embedding = model.entity_embedding.detach().cpu().numpy()
np.save(
os.path.join(args.save_path, 'entity_embedding'),
entity_embedding
)
relation_embedding = model.relation_embedding.detach().cpu().numpy()
np.save(
os.path.join(args.save_path, 'relation_embedding'),
relation_embedding
)
def set_logger(args):
'''
Write logs to checkpoint and console
'''
if args.do_train:
log_file = os.path.join(args.save_path or args.init_checkpoint, 'train.log')
else:
log_file = os.path.join(args.save_path or args.init_checkpoint, 'test.log')
print(log_file)
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file,
filemode='w'
)
if args.print_on_screen:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def log_metrics(mode, step, metrics, writer):
'''
Print the evaluation logs
'''
for metric in metrics:
logging.info('%s %s at step %d: %f' % (mode, metric, step, metrics[metric]))
writer.add_scalar("_".join([mode, metric]), metrics[metric], step)
def main(args):
if (not args.do_train) and (not args.do_valid) and (not args.do_test) and (not args.evaluate_train):
raise ValueError('one of train/val/test mode must be choosed.')
if args.init_checkpoint:
override_config(args)
args.save_path = 'log/%s/%s/%s-%s/%s'%(args.dataset, args.model, args.hidden_dim, args.gamma, time.time()) if args.save_path == None else args.save_path
writer = SummaryWriter(args.save_path)
# Write logs to checkpoint and console
set_logger(args)
dataset = LinkPropPredDataset(name = args.dataset)
split_dict = dataset.get_edge_split()
nentity = dataset.graph['num_nodes']
nrelation = int(max(dataset.graph['edge_reltype'])[0])+1
evaluator = Evaluator(name = args.dataset)
args.nentity = nentity
args.nrelation = nrelation
logging.info('Model: %s' % args.model)
logging.info('Dataset: %s' % args.dataset)
logging.info('#entity: %d' % nentity)
logging.info('#relation: %d' % nrelation)
train_triples = split_dict['train']
logging.info('#train: %d' % len(train_triples['head']))
valid_triples = split_dict['valid']
logging.info('#valid: %d' % len(valid_triples['head']))
test_triples = split_dict['test']
logging.info('#test: %d' % len(test_triples['head']))
logging.info('relation type %s' % args.relation_type)
print('relation type %s' % args.relation_type)
test_set_file = ''
if args.relation_type == '1-1':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/1-1-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/1-1.pt'
elif args.relation_type == '1-n':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/1-n-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/1-n.pt'
elif args.relation_type == 'n-1':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/n-1-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/n-1.pt'
elif args.relation_type == 'n-n':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/n-n-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/n-n.pt'
if test_set_file != '':
if osp.exists(test_set_pre_processed):
test_triples = torch.load(test_set_pre_processed, 'rb')
print("load pre processed test set")
else:
test_triples_new = {}
test_triples_chosen = []
test_triples_new['head'] = []
test_triples_new['relation'] = []
test_triples_new['tail'] = []
test_triples_new['head_neg'] = []
test_triples_new['tail_neg'] = []
f_test = open(test_set_file, "r")
for line in f_test:
h, r, t = line.strip().split('\t')
h, r, t = int(h), int(r), int(t)
test_triples_chosen.append((h, r, t))
f_test.close()
for idx in range(len(test_triples['head'])):
h, r, t = test_triples['head'][idx], test_triples['relation'][idx], test_triples['tail'][idx]
if (h, r, t) in test_triples_chosen:
test_triples_new['head'].append(h)
test_triples_new['relation'].append(r)
test_triples_new['tail'].append(t)
test_triples_new['head_neg'].append(test_triples['head_neg'][idx])
test_triples_new['tail_neg'].append(test_triples['tail_neg'][idx])
print('Saving ...')
torch.save(test_triples_new, test_set_pre_processed, pickle_protocol=4)
test_triples = test_triples_new
logging.info('#test: %d' % len(test_triples['head']))
train_count, train_true_head, train_true_tail = defaultdict(lambda: 4), defaultdict(list), defaultdict(list)
f_train = open("train.txt", "w")
for i in tqdm(range(len(train_triples['head']))):
head, relation, tail = train_triples['head'][i], train_triples['relation'][i], train_triples['tail'][i]
train_count[(head, relation)] += 1
train_count[(tail, -relation-1)] += 1
train_true_head[(relation, tail)].append(head)
train_true_tail[(head, relation)].append(tail)
f_train.write("\t".join([str(head), str(relation), str(tail)]) + '\n')
f_train.close()
kge_model = KGEModel(
model_name=args.model,
nentity=nentity,
nrelation=nrelation,
hidden_dim=args.hidden_dim,
gamma=args.gamma,
double_entity_embedding=args.double_entity_embedding,
double_relation_embedding=args.double_relation_embedding,
triple_relation_embedding=args.triple_relation_embedding,
quad_relation_embedding=args.quad_relation_embedding,
evaluator=evaluator
)
logging.info('Model Parameter Configuration:')
for name, param in kge_model.named_parameters():
logging.info('Parameter %s: %s, require_grad = %s' % (name, str(param.size()), str(param.requires_grad)))
if args.cuda:
kge_model = kge_model.cuda()
if args.do_train:
# Set training dataloader iterator
train_dataloader_head = DataLoader( | TrainDataset(train_triples, nentity, nrelation, | 1 | 2023-12-29 22:57:53+00:00 | 12k |
daswer123/rvc-python | rvc_python/modules/vc/modules.py | [
{
"identifier": "load_audio",
"path": "rvc_python/lib/audio.py",
"snippet": "def load_audio(file, sr):\n file = (\n file.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n ) # 防止小白拷路径头尾带了空格和\"和回车\n if os.path.exists(file) == False:\n raise RuntimeError(\n ... | import traceback
import logging
import numpy as np
import soundfile as sf
import torch
from io import BytesIO
from rvc_python.lib.audio import load_audio, wav2
from rvc_python.lib.infer_pack.models import (
SynthesizerTrnMs256NSFsid,
SynthesizerTrnMs256NSFsid_nono,
SynthesizerTrnMs768NSFsid,
SynthesizerTrnMs768NSFsid_nono,
)
from rvc_python.modules.vc.pipeline import Pipeline
from rvc_python.modules.vc.utils import * | 9,846 |
logger = logging.getLogger(__name__)
class VC:
def __init__(self, lib_dir, config):
self.lib_dir = lib_dir
self.n_spk = None
self.tgt_sr = None
self.net_g = None
self.pipeline = None
self.cpt = None
self.version = None
self.if_f0 = None
self.version = None
self.hubert_model = None
self.config = config
def get_vc(self,sid,version = "v2", *to_return_protect):
# logger.info("Get sid: " + sid)
to_return_protect0 = {
"visible": self.if_f0 != 0,
"value": to_return_protect[0]
if self.if_f0 != 0 and to_return_protect
else 0.5,
"__type__": "update",
}
to_return_protect1 = {
"visible": self.if_f0 != 0,
"value": to_return_protect[1]
if self.if_f0 != 0 and to_return_protect
else 0.33,
"__type__": "update",
}
if sid == "" or sid == []:
if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
logger.info("Clean model cache")
del (self.net_g, self.n_spk, self.hubert_model, self.tgt_sr) # ,cpt
self.hubert_model = (
self.net_g
) = self.n_spk = self.hubert_model = self.tgt_sr = None
if torch.cuda.is_available():
torch.cuda.empty_cache()
###楼下不这么折腾清理不干净
self.if_f0 = self.cpt.get("f0", 1)
self.version = self.cpt.get("version", "v1")
if self.version == "v1":
if self.if_f0 == 1:
self.net_g = SynthesizerTrnMs256NSFsid(
*self.cpt["config"], is_half=self.config.is_half
)
else:
self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt["config"])
elif self.version == "v2":
if self.if_f0 == 1:
self.net_g = SynthesizerTrnMs768NSFsid(
*self.cpt["config"], is_half=self.config.is_half
)
else:
self.net_g = SynthesizerTrnMs768NSFsid_nono(*self.cpt["config"])
del self.net_g, self.cpt
if torch.cuda.is_available():
torch.cuda.empty_cache()
return (
{"visible": False, "__type__": "update"},
{
"visible": True,
"value": to_return_protect0,
"__type__": "update",
},
{
"visible": True,
"value": to_return_protect1,
"__type__": "update",
},
"",
"",
)
person = f'{sid}'
logger.info(f"Loading: {person}")
# print(sid,person)
self.cpt = torch.load(sid, map_location="cpu")
self.tgt_sr = self.cpt["config"][-1]
self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0] # n_spk
self.if_f0 = self.cpt.get("f0", 1)
self.version = version
synthesizer_class = {
("v1", 1): SynthesizerTrnMs256NSFsid,
("v1", 0): SynthesizerTrnMs256NSFsid_nono,
("v2", 1): SynthesizerTrnMs768NSFsid,
("v2", 0): SynthesizerTrnMs768NSFsid_nono,
}
self.net_g = synthesizer_class.get(
(self.version, self.if_f0), SynthesizerTrnMs256NSFsid
)(*self.cpt["config"], is_half=self.config.is_half)
del self.net_g.enc_q
self.net_g.load_state_dict(self.cpt["weight"], strict=False)
self.net_g.eval().to(self.config.device)
if self.config.is_half:
self.net_g = self.net_g.half()
else:
self.net_g = self.net_g.float()
|
logger = logging.getLogger(__name__)
class VC:
def __init__(self, lib_dir, config):
self.lib_dir = lib_dir
self.n_spk = None
self.tgt_sr = None
self.net_g = None
self.pipeline = None
self.cpt = None
self.version = None
self.if_f0 = None
self.version = None
self.hubert_model = None
self.config = config
def get_vc(self,sid,version = "v2", *to_return_protect):
# logger.info("Get sid: " + sid)
to_return_protect0 = {
"visible": self.if_f0 != 0,
"value": to_return_protect[0]
if self.if_f0 != 0 and to_return_protect
else 0.5,
"__type__": "update",
}
to_return_protect1 = {
"visible": self.if_f0 != 0,
"value": to_return_protect[1]
if self.if_f0 != 0 and to_return_protect
else 0.33,
"__type__": "update",
}
if sid == "" or sid == []:
if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
logger.info("Clean model cache")
del (self.net_g, self.n_spk, self.hubert_model, self.tgt_sr) # ,cpt
self.hubert_model = (
self.net_g
) = self.n_spk = self.hubert_model = self.tgt_sr = None
if torch.cuda.is_available():
torch.cuda.empty_cache()
###楼下不这么折腾清理不干净
self.if_f0 = self.cpt.get("f0", 1)
self.version = self.cpt.get("version", "v1")
if self.version == "v1":
if self.if_f0 == 1:
self.net_g = SynthesizerTrnMs256NSFsid(
*self.cpt["config"], is_half=self.config.is_half
)
else:
self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt["config"])
elif self.version == "v2":
if self.if_f0 == 1:
self.net_g = SynthesizerTrnMs768NSFsid(
*self.cpt["config"], is_half=self.config.is_half
)
else:
self.net_g = SynthesizerTrnMs768NSFsid_nono(*self.cpt["config"])
del self.net_g, self.cpt
if torch.cuda.is_available():
torch.cuda.empty_cache()
return (
{"visible": False, "__type__": "update"},
{
"visible": True,
"value": to_return_protect0,
"__type__": "update",
},
{
"visible": True,
"value": to_return_protect1,
"__type__": "update",
},
"",
"",
)
person = f'{sid}'
logger.info(f"Loading: {person}")
# print(sid,person)
self.cpt = torch.load(sid, map_location="cpu")
self.tgt_sr = self.cpt["config"][-1]
self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0] # n_spk
self.if_f0 = self.cpt.get("f0", 1)
self.version = version
synthesizer_class = {
("v1", 1): SynthesizerTrnMs256NSFsid,
("v1", 0): SynthesizerTrnMs256NSFsid_nono,
("v2", 1): SynthesizerTrnMs768NSFsid,
("v2", 0): SynthesizerTrnMs768NSFsid_nono,
}
self.net_g = synthesizer_class.get(
(self.version, self.if_f0), SynthesizerTrnMs256NSFsid
)(*self.cpt["config"], is_half=self.config.is_half)
del self.net_g.enc_q
self.net_g.load_state_dict(self.cpt["weight"], strict=False)
self.net_g.eval().to(self.config.device)
if self.config.is_half:
self.net_g = self.net_g.half()
else:
self.net_g = self.net_g.float()
| self.pipeline = Pipeline(self.tgt_sr, self.config,lib_dir=self.lib_dir) | 6 | 2023-12-26 19:05:42+00:00 | 12k |
open-mmlab/Amphion | models/svc/vits/vits.py | [
{
"identifier": "f0_to_coarse",
"path": "utils/f0.py",
"snippet": "def f0_to_coarse(f0, pitch_bin, pitch_min, pitch_max):\n ## TODO: Figure out the detail of this function\n\n f0_mel_min = 1127 * np.log(1 + pitch_min / 700)\n f0_mel_max = 1127 * np.log(1 + pitch_max / 700)\n\n is_torch = isi... | import copy
import torch
from torch import nn
from torch.nn import functional as F
from utils.util import *
from utils.f0 import f0_to_coarse
from modules.transformer.attentions import Encoder
from models.tts.vits.vits import ResidualCouplingBlock, PosteriorEncoder
from models.vocoders.gan.generator.bigvgan import BigVGAN
from models.vocoders.gan.generator.hifigan import HiFiGAN
from models.vocoders.gan.generator.nsfhifigan import NSFHiFiGAN
from models.vocoders.gan.generator.melgan import MelGAN
from models.vocoders.gan.generator.apnet import APNet
from modules.encoder.condition_encoder import ConditionEncoder | 7,993 | for i in range(x.size(0)):
idx_str = ids_str[i]
idx_end = idx_str + segment_size
ret[i] = x[i, idx_str:idx_end]
return ret
def rand_slice_segments_with_pitch(x, pitch, x_lengths=None, segment_size=4):
b, d, t = x.size()
if x_lengths is None:
x_lengths = t
ids_str_max = x_lengths - segment_size + 1
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
ret = slice_segments(x, ids_str, segment_size)
ret_pitch = slice_pitch_segments(pitch, ids_str, segment_size)
return ret, ret_pitch, ids_str
class ContentEncoder(nn.Module):
def __init__(
self,
out_channels,
hidden_channels,
kernel_size,
n_layers,
gin_channels=0,
filter_channels=None,
n_heads=None,
p_dropout=None,
):
super().__init__()
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.gin_channels = gin_channels
self.f0_emb = nn.Embedding(256, hidden_channels)
self.enc_ = Encoder(
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
)
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
# condition_encoder ver.
def forward(self, x, x_mask, noice_scale=1):
x = self.enc_(x * x_mask, x_mask)
stats = self.proj(x) * x_mask
m, logs = torch.split(stats, self.out_channels, dim=1)
z = (m + torch.randn_like(m) * torch.exp(logs) * noice_scale) * x_mask
return z, m, logs, x_mask
class SynthesizerTrn(nn.Module):
"""
Synthesizer for Training
"""
def __init__(self, spec_channels, segment_size, cfg):
super().__init__()
self.spec_channels = spec_channels
self.segment_size = segment_size
self.cfg = cfg
self.inter_channels = cfg.model.vits.inter_channels
self.hidden_channels = cfg.model.vits.hidden_channels
self.filter_channels = cfg.model.vits.filter_channels
self.n_heads = cfg.model.vits.n_heads
self.n_layers = cfg.model.vits.n_layers
self.kernel_size = cfg.model.vits.kernel_size
self.p_dropout = cfg.model.vits.p_dropout
self.ssl_dim = cfg.model.vits.ssl_dim
self.n_flow_layer = cfg.model.vits.n_flow_layer
self.gin_channels = cfg.model.vits.gin_channels
self.n_speakers = cfg.model.vits.n_speakers
# f0
self.n_bins = cfg.preprocess.pitch_bin
self.f0_min = cfg.preprocess.f0_min
self.f0_max = cfg.preprocess.f0_max
# TODO: sort out the config
self.cfg.model.condition_encoder.f0_min = self.cfg.preprocess.f0_min
self.cfg.model.condition_encoder.f0_max = self.cfg.preprocess.f0_max
self.condition_encoder = ConditionEncoder(self.cfg.model.condition_encoder)
self.emb_g = nn.Embedding(self.n_speakers, self.gin_channels)
self.enc_p = ContentEncoder(
self.inter_channels,
self.hidden_channels,
filter_channels=self.filter_channels,
n_heads=self.n_heads,
n_layers=self.n_layers,
kernel_size=self.kernel_size,
p_dropout=self.p_dropout,
)
assert cfg.model.generator in [
"bigvgan",
"hifigan",
"melgan",
"nsfhifigan",
"apnet",
]
self.dec_name = cfg.model.generator
temp_cfg = copy.deepcopy(cfg)
temp_cfg.preprocess.n_mel = self.inter_channels
if cfg.model.generator == "bigvgan":
temp_cfg.model.bigvgan = cfg.model.generator_config.bigvgan
self.dec = BigVGAN(temp_cfg)
elif cfg.model.generator == "hifigan":
temp_cfg.model.hifigan = cfg.model.generator_config.hifigan
self.dec = HiFiGAN(temp_cfg)
elif cfg.model.generator == "melgan":
temp_cfg.model.melgan = cfg.model.generator_config.melgan
self.dec = MelGAN(temp_cfg)
elif cfg.model.generator == "nsfhifigan":
temp_cfg.model.nsfhifigan = cfg.model.generator_config.nsfhifigan
| # Copyright (c) 2023 Amphion.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This code is modified from https://github.com/svc-develop-team/so-vits-svc/blob/4.1-Stable/models.py
def slice_pitch_segments(x, ids_str, segment_size=4):
ret = torch.zeros_like(x[:, :segment_size])
for i in range(x.size(0)):
idx_str = ids_str[i]
idx_end = idx_str + segment_size
ret[i] = x[i, idx_str:idx_end]
return ret
def rand_slice_segments_with_pitch(x, pitch, x_lengths=None, segment_size=4):
b, d, t = x.size()
if x_lengths is None:
x_lengths = t
ids_str_max = x_lengths - segment_size + 1
ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
ret = slice_segments(x, ids_str, segment_size)
ret_pitch = slice_pitch_segments(pitch, ids_str, segment_size)
return ret, ret_pitch, ids_str
class ContentEncoder(nn.Module):
def __init__(
self,
out_channels,
hidden_channels,
kernel_size,
n_layers,
gin_channels=0,
filter_channels=None,
n_heads=None,
p_dropout=None,
):
super().__init__()
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.gin_channels = gin_channels
self.f0_emb = nn.Embedding(256, hidden_channels)
self.enc_ = Encoder(
hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
)
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
# condition_encoder ver.
def forward(self, x, x_mask, noice_scale=1):
x = self.enc_(x * x_mask, x_mask)
stats = self.proj(x) * x_mask
m, logs = torch.split(stats, self.out_channels, dim=1)
z = (m + torch.randn_like(m) * torch.exp(logs) * noice_scale) * x_mask
return z, m, logs, x_mask
class SynthesizerTrn(nn.Module):
"""
Synthesizer for Training
"""
def __init__(self, spec_channels, segment_size, cfg):
super().__init__()
self.spec_channels = spec_channels
self.segment_size = segment_size
self.cfg = cfg
self.inter_channels = cfg.model.vits.inter_channels
self.hidden_channels = cfg.model.vits.hidden_channels
self.filter_channels = cfg.model.vits.filter_channels
self.n_heads = cfg.model.vits.n_heads
self.n_layers = cfg.model.vits.n_layers
self.kernel_size = cfg.model.vits.kernel_size
self.p_dropout = cfg.model.vits.p_dropout
self.ssl_dim = cfg.model.vits.ssl_dim
self.n_flow_layer = cfg.model.vits.n_flow_layer
self.gin_channels = cfg.model.vits.gin_channels
self.n_speakers = cfg.model.vits.n_speakers
# f0
self.n_bins = cfg.preprocess.pitch_bin
self.f0_min = cfg.preprocess.f0_min
self.f0_max = cfg.preprocess.f0_max
# TODO: sort out the config
self.cfg.model.condition_encoder.f0_min = self.cfg.preprocess.f0_min
self.cfg.model.condition_encoder.f0_max = self.cfg.preprocess.f0_max
self.condition_encoder = ConditionEncoder(self.cfg.model.condition_encoder)
self.emb_g = nn.Embedding(self.n_speakers, self.gin_channels)
self.enc_p = ContentEncoder(
self.inter_channels,
self.hidden_channels,
filter_channels=self.filter_channels,
n_heads=self.n_heads,
n_layers=self.n_layers,
kernel_size=self.kernel_size,
p_dropout=self.p_dropout,
)
assert cfg.model.generator in [
"bigvgan",
"hifigan",
"melgan",
"nsfhifigan",
"apnet",
]
self.dec_name = cfg.model.generator
temp_cfg = copy.deepcopy(cfg)
temp_cfg.preprocess.n_mel = self.inter_channels
if cfg.model.generator == "bigvgan":
temp_cfg.model.bigvgan = cfg.model.generator_config.bigvgan
self.dec = BigVGAN(temp_cfg)
elif cfg.model.generator == "hifigan":
temp_cfg.model.hifigan = cfg.model.generator_config.hifigan
self.dec = HiFiGAN(temp_cfg)
elif cfg.model.generator == "melgan":
temp_cfg.model.melgan = cfg.model.generator_config.melgan
self.dec = MelGAN(temp_cfg)
elif cfg.model.generator == "nsfhifigan":
temp_cfg.model.nsfhifigan = cfg.model.generator_config.nsfhifigan | self.dec = NSFHiFiGAN(temp_cfg) # TODO: nsf need f0 | 6 | 2023-11-15 09:19:27+00:00 | 12k |
banodoco/Steerable-Motion | imports/AdvancedControlNet/nodes.py | [
{
"identifier": "load_controlnet",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "def load_controlnet(ckpt_path, timestep_keyframe: TimestepKeyframeGroupImport=None, model=None):\n control = comfy_cn.load_controlnet(ckpt_path, model=model)\n # TODO: support controlnet-lllite\n # i... | import numpy as np
import folder_paths
from torch import Tensor
from .control import load_controlnet, convert_to_advanced, ControlWeightsImport, ControlWeightTypeImport,\
LatentKeyframeGroupImport, TimestepKeyframeImport, TimestepKeyframeGroupImport, is_advanced_controlnet
from .control import StrengthInterpolationImport as SI
from .weight_nodes import DefaultWeightsImport, ScaledSoftMaskedUniversalWeightsImport, ScaledSoftUniversalWeightsImport, SoftControlNetWeightsImport, CustomControlNetWeightsImport, \
SoftT2IAdapterWeightsImport, CustomT2IAdapterWeightsImport
from .latent_keyframe_nodes import LatentKeyframeGroupNodeImport, LatentKeyframeInterpolationNodeImport, LatentKeyframeBatchedGroupNodeImport, LatentKeyframeNodeImport
from .logger import logger | 10,365 | prev_timestep_kf: TimestepKeyframeGroupImport=None, prev_timestep_keyframe: TimestepKeyframeGroupImport=None, # old name
null_latent_kf_strength: float=0.0,
inherit_missing=True,
guarantee_usage=True,
mask_optional=None,
interpolation: str=SI.NONE,):
control_net_weights = control_net_weights if control_net_weights else cn_weights
prev_timestep_keyframe = prev_timestep_keyframe if prev_timestep_keyframe else prev_timestep_kf
if not prev_timestep_keyframe:
prev_timestep_keyframe = TimestepKeyframeGroupImport()
else:
prev_timestep_keyframe = prev_timestep_keyframe.clone()
keyframe = TimestepKeyframeImport(start_percent=start_percent, strength=strength, interpolation=interpolation, null_latent_kf_strength=null_latent_kf_strength,
control_weights=control_net_weights, latent_keyframes=latent_keyframe, inherit_missing=inherit_missing, guarantee_usage=guarantee_usage,
mask_hint_orig=mask_optional)
prev_timestep_keyframe.add(keyframe)
return (prev_timestep_keyframe,)
class ControlNetLoaderAdvancedImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"control_net_name": (folder_paths.get_filename_list("controlnet"), ),
},
"optional": {
"timestep_keyframe": ("TIMESTEP_KEYFRAME", ),
}
}
RETURN_TYPES = ("CONTROL_NET", )
FUNCTION = "load_controlnet"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝"
def load_controlnet(self, control_net_name,
timestep_keyframe: TimestepKeyframeGroupImport=None
):
controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
controlnet = load_controlnet(controlnet_path, timestep_keyframe)
return (controlnet,)
class DiffControlNetLoaderAdvancedImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"model": ("MODEL",),
"control_net_name": (folder_paths.get_filename_list("controlnet"), )
},
"optional": {
"timestep_keyframe": ("TIMESTEP_KEYFRAME", ),
}
}
RETURN_TYPES = ("CONTROL_NET", )
FUNCTION = "load_controlnet"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝"
def load_controlnet(self, control_net_name, model,
timestep_keyframe: TimestepKeyframeGroupImport=None
):
controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
controlnet = load_controlnet(controlnet_path, timestep_keyframe, model)
if is_advanced_controlnet(controlnet):
controlnet.verify_all_weights()
return (controlnet,)
class AdvancedControlNetApplyImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"positive": ("CONDITIONING", ),
"negative": ("CONDITIONING", ),
"control_net": ("CONTROL_NET", ),
"image": ("IMAGE", ),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
"end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
},
"optional": {
"mask_optional": ("MASK", ),
"timestep_kf": ("TIMESTEP_KEYFRAME", ),
"latent_kf_override": ("LATENT_KEYFRAME", ),
"weights_override": ("CONTROL_NET_WEIGHTS", ),
}
}
RETURN_TYPES = ("CONDITIONING","CONDITIONING")
RETURN_NAMES = ("positive", "negative")
FUNCTION = "apply_controlnet"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝"
def apply_controlnet(self, positive, negative, control_net, image, strength, start_percent, end_percent,
mask_optional: Tensor=None,
timestep_kf: TimestepKeyframeGroupImport=None, latent_kf_override: LatentKeyframeGroupImport=None,
weights_override: ControlWeightsImport=None):
if strength == 0:
return (positive, negative)
control_hint = image.movedim(-1,1)
cnets = {}
out = []
for conditioning in [positive, negative]:
c = []
for t in conditioning:
d = t[1].copy()
prev_cnet = d.get('control', None)
if prev_cnet in cnets:
c_net = cnets[prev_cnet]
else:
# copy, convert to advanced if needed, and set cond
|
class TimestepKeyframeNodeImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}, ),
},
"optional": {
"prev_timestep_kf": ("TIMESTEP_KEYFRAME", ),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ),
"cn_weights": ("CONTROL_NET_WEIGHTS", ),
"latent_keyframe": ("LATENT_KEYFRAME", ),
"null_latent_kf_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.001}, ),
"inherit_missing": ("BOOLEAN", {"default": True}, ),
"guarantee_usage": ("BOOLEAN", {"default": True}, ),
"mask_optional": ("MASK", ),
#"interpolation": ([SI.LINEAR, SI.EASE_IN, SI.EASE_OUT, SI.EASE_IN_OUT, SI.NONE], {"default": SI.NONE}, ),
}
}
RETURN_NAMES = ("TIMESTEP_KF", )
RETURN_TYPES = ("TIMESTEP_KEYFRAME", )
FUNCTION = "load_keyframe"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/keyframes"
def load_keyframe(self,
start_percent: float,
strength: float=1.0,
cn_weights: ControlWeightsImport=None, control_net_weights: ControlWeightsImport=None, # old name
latent_keyframe: LatentKeyframeGroupImport=None,
prev_timestep_kf: TimestepKeyframeGroupImport=None, prev_timestep_keyframe: TimestepKeyframeGroupImport=None, # old name
null_latent_kf_strength: float=0.0,
inherit_missing=True,
guarantee_usage=True,
mask_optional=None,
interpolation: str=SI.NONE,):
control_net_weights = control_net_weights if control_net_weights else cn_weights
prev_timestep_keyframe = prev_timestep_keyframe if prev_timestep_keyframe else prev_timestep_kf
if not prev_timestep_keyframe:
prev_timestep_keyframe = TimestepKeyframeGroupImport()
else:
prev_timestep_keyframe = prev_timestep_keyframe.clone()
keyframe = TimestepKeyframeImport(start_percent=start_percent, strength=strength, interpolation=interpolation, null_latent_kf_strength=null_latent_kf_strength,
control_weights=control_net_weights, latent_keyframes=latent_keyframe, inherit_missing=inherit_missing, guarantee_usage=guarantee_usage,
mask_hint_orig=mask_optional)
prev_timestep_keyframe.add(keyframe)
return (prev_timestep_keyframe,)
class ControlNetLoaderAdvancedImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"control_net_name": (folder_paths.get_filename_list("controlnet"), ),
},
"optional": {
"timestep_keyframe": ("TIMESTEP_KEYFRAME", ),
}
}
RETURN_TYPES = ("CONTROL_NET", )
FUNCTION = "load_controlnet"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝"
def load_controlnet(self, control_net_name,
timestep_keyframe: TimestepKeyframeGroupImport=None
):
controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
controlnet = load_controlnet(controlnet_path, timestep_keyframe)
return (controlnet,)
class DiffControlNetLoaderAdvancedImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"model": ("MODEL",),
"control_net_name": (folder_paths.get_filename_list("controlnet"), )
},
"optional": {
"timestep_keyframe": ("TIMESTEP_KEYFRAME", ),
}
}
RETURN_TYPES = ("CONTROL_NET", )
FUNCTION = "load_controlnet"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝"
def load_controlnet(self, control_net_name, model,
timestep_keyframe: TimestepKeyframeGroupImport=None
):
controlnet_path = folder_paths.get_full_path("controlnet", control_net_name)
controlnet = load_controlnet(controlnet_path, timestep_keyframe, model)
if is_advanced_controlnet(controlnet):
controlnet.verify_all_weights()
return (controlnet,)
class AdvancedControlNetApplyImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"positive": ("CONDITIONING", ),
"negative": ("CONDITIONING", ),
"control_net": ("CONTROL_NET", ),
"image": ("IMAGE", ),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
"end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001})
},
"optional": {
"mask_optional": ("MASK", ),
"timestep_kf": ("TIMESTEP_KEYFRAME", ),
"latent_kf_override": ("LATENT_KEYFRAME", ),
"weights_override": ("CONTROL_NET_WEIGHTS", ),
}
}
RETURN_TYPES = ("CONDITIONING","CONDITIONING")
RETURN_NAMES = ("positive", "negative")
FUNCTION = "apply_controlnet"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝"
def apply_controlnet(self, positive, negative, control_net, image, strength, start_percent, end_percent,
mask_optional: Tensor=None,
timestep_kf: TimestepKeyframeGroupImport=None, latent_kf_override: LatentKeyframeGroupImport=None,
weights_override: ControlWeightsImport=None):
if strength == 0:
return (positive, negative)
control_hint = image.movedim(-1,1)
cnets = {}
out = []
for conditioning in [positive, negative]:
c = []
for t in conditioning:
d = t[1].copy()
prev_cnet = d.get('control', None)
if prev_cnet in cnets:
c_net = cnets[prev_cnet]
else:
# copy, convert to advanced if needed, and set cond | c_net = convert_to_advanced(control_net.copy()).set_cond_hint(control_hint, strength, (start_percent, end_percent)) | 1 | 2023-11-11 01:26:26+00:00 | 12k |
Zaloog/kanban-python | src/kanban_python/controls.py | [
{
"identifier": "cfg",
"path": "src/kanban_python/config.py",
"snippet": "class KanbanConfig:\n def __init__(self, path=CONFIG_FILE_PATH) -> None:\n def __repr__(self) -> str:\n def save(self):\n def config(self) -> configparser.ConfigParser:\n def active_board(self) -> str:\n def acti... | from json import dump, load
from rich.pretty import pprint
from .config import (
cfg,
check_if_board_name_exists_in_config,
check_if_current_active_board_in_board_list,
delete_board_from_config,
get_json_path,
)
from .constants import (
DUMMY_DB,
KANBAN_BOARDS_PATH,
REPORT_FILE_NAME,
REPORT_FILE_PATH,
TASK_FILE_NAME,
)
from .interface import (
create_config_table,
create_github_like_report_table,
create_table,
input_ask_for_action,
input_ask_for_action_settings,
input_ask_for_change_board,
input_ask_for_delete_board,
input_ask_for_new_board_name,
input_ask_which_task_to_update,
input_ask_which_tasks_to_show,
input_change_column_settings,
input_change_done_limit_settings,
input_change_files_to_scan_settings,
input_change_footer_settings,
input_change_min_col_width_settings,
input_change_patterns_to_scan_settings,
input_confirm_add_todos_to_board,
input_confirm_delete_board,
input_confirm_set_board_active,
input_create_new_task,
input_update_task,
)
from .utils import (
check_board_name_valid,
check_if_done_col_leq_X,
check_if_there_are_visible_tasks_in_board,
check_scanner_files_valid,
check_scanner_patterns_valid,
console,
create_report_document,
current_time_to_str,
delete_json_file,
get_tag_id_choices,
move_first_done_task_to_archive,
scan_files,
scan_for_todos,
split_todo_in_tag_and_title,
) | 7,253 | if input_confirm_delete_board(board_to_delete):
board_to_delete_path = cfg.kanban_boards_dict[board_to_delete]
delete_json_file(board_to_delete_path)
delete_board_from_config(board_to_delete)
def show():
if not cfg.kanban_boards:
console.print(":warning: [red]No Boards created yet[/]:warning:")
console.print("Use 'kanban init' to create a new kanban board.")
raise KeyboardInterrupt
if not check_if_current_active_board_in_board_list():
console.print(
"[yellow]Hmm, Something went wrong.[/] "
+ f"The active board '{cfg.active_board}' is not in the list of boards."
)
change_kanban_board()
show()
return
db_data = read_db()
table = create_table(data=db_data)
console.print(table)
# Scan Functionality
#####################################################################################
def add_todos_to_board():
files = scan_files(endings=cfg.scanned_files)
todos = scan_for_todos(file_paths=files, patterns=cfg.scanned_patterns)
if not todos:
console.print(
":cross_mark: [red]Nothing found that "
+ "matches any of your provided patterns.[/]"
)
return
# TODO Write Docs for kanban scan functionality
# BUG This pattern also works
if input_confirm_add_todos_to_board(todos=todos):
todo_task_list = []
for task, file in todos:
tag, title = split_todo_in_tag_and_title(task, cfg.scanned_patterns)
new_task = {
"Title": title,
"Description": f"from {file}",
"Status": "Ready",
"Tag": tag,
"Creation_Date": current_time_to_str(),
"Begin_Time": "",
"Complete_Time": "",
"Duration": 0,
}
todo_task_list.append(new_task)
add_tasks_to_db(tasks=todo_task_list)
# Config Settings
#####################################################################################
def change_settings():
while True:
show_settings()
settings_selection = input_ask_for_action_settings()
if settings_selection == 1:
change_kanban_board()
new_min_col_widths = input_change_min_col_width_settings()
cfg.col_min_width = new_min_col_widths
done_limit = input_change_done_limit_settings()
cfg.done_limit = done_limit
footer_visible = input_change_footer_settings()
cfg.show_footer = "True" if footer_visible else "False"
if settings_selection == 2:
updated_col_config = input_change_column_settings()
cfg.kanban_columns_dict = updated_col_config
if settings_selection == 3:
while True:
new_files_to_scan = input_change_files_to_scan_settings()
if check_scanner_files_valid(new_files_to_scan):
cfg.scanned_files = new_files_to_scan
break
console.print(
f":warning: '{new_files_to_scan}' is [red]not[/] a valid."
)
while True:
new_patterns_to_scan = input_change_patterns_to_scan_settings()
if check_scanner_patterns_valid(new_patterns_to_scan):
cfg.scanned_patterns = new_patterns_to_scan
break
console.print(
f":warning: '{new_patterns_to_scan}' is [red]not[/] a valid."
)
if settings_selection == 4:
break
def show_settings():
settings_table = create_config_table()
console.print(settings_table)
# Report Creation
#####################################################################################
def create_report():
boards_dict = read_db("all")
gh_table = create_github_like_report_table(boards_dict)
console.print(gh_table)
if not REPORT_FILE_PATH.exists():
REPORT_FILE_PATH.mkdir(exist_ok=True)
create_report_document(boards_dict=boards_dict)
console.print(
"\n[bright_black]You can find your markdown report under:"
| from __future__ import annotations
# DB Controls
#####################################################################################
def create_new_db() -> None:
while True:
while True:
new_board_name = input_ask_for_new_board_name()
if check_board_name_valid(new_board_name):
break
console.print(f":warning: '{new_board_name}' is [red]not[/] a valid Name.")
if not check_if_board_name_exists_in_config(new_board_name):
break
console.print(
f":warning: Board '{new_board_name}' already exists, choose another Name."
)
cfg.kanban_boards_dict = new_board_name
# Options:
# 1. ~/.kanban-python/<BOARDNAME>.json
# 2. ~/.kanban-python/kanban_boards/<BOARDNAME>.json
# 3. ~/.kanban-python/kanban_boards/<BOARDNAME>/pykanban.json <- THIS
# 4. ~/.kanban-python/kanban_boards/<BOARDNAME>/<BOARDNAME>.json
new_db_path = KANBAN_BOARDS_PATH / new_board_name
if not new_db_path.exists():
new_db_path.mkdir()
with open(get_json_path(new_board_name), "w", encoding="utf-8") as f:
dump(DUMMY_DB, f, ensure_ascii=False, indent=4)
console.print(
f"Created new [orange3]{TASK_FILE_NAME}[/] file at "
+ f"[orange3]{KANBAN_BOARDS_PATH / new_board_name}[/] to save tasks."
)
if input_confirm_set_board_active(name=new_board_name):
cfg.active_board = new_board_name
def save_db(data):
path = cfg.active_board_path
with open(path, "w", encoding="utf-8") as f:
dump(data, f, ensure_ascii=False, indent=4)
def add_tasks_to_db(tasks: dict | list[dict]) -> None:
db_data = read_db()
if isinstance(tasks, dict):
new_id = str(max(int(i) for i in db_data.keys()) + 1)
db_data[new_id] = tasks
else:
for task in tasks:
new_id = str(max(int(i) for i in db_data.keys()) + 1)
db_data[new_id] = task
save_db(data=db_data)
def read_db(path: str = None) -> dict:
if not path:
path = cfg.active_board_path
if path == "all":
board_dict = {
b: read_single_board(b_path) for b, b_path in cfg.kanban_boards_dict.items()
}
return board_dict
try:
data = read_single_board(path)
return data
except FileNotFoundError:
print(path)
console.print(f":warning: No [orange3]{TASK_FILE_NAME}[/] file here anymore.")
console.print("Please change to another board.")
change_kanban_board()
console.print(f"[red]Seems like the previous {TASK_FILE_NAME} file was deleted[/]")
console.print(f"Create new [orange3]{TASK_FILE_NAME}[/] file here.")
create_new_db()
return read_db()
def read_single_board(path):
with open(path, "r") as file:
data = load(file)
return data
# User Action Controls
#####################################################################################
# Get User Action
def get_user_action():
return input_ask_for_action()
# Action 1
def add_new_task_to_db():
new_task = input_create_new_task()
add_tasks_to_db(tasks=new_task)
# Action 2
def update_task_from_db():
db_data = read_db()
if not check_if_there_are_visible_tasks_in_board(db_data, cfg.vis_cols):
console.print(":cross_mark:[red]No Tasks available on this Kanban board[/]")
return
selected_id = input_ask_which_task_to_update(db_data)
updated_task = input_update_task(current_task=db_data[selected_id])
db_data[selected_id] = updated_task
while not check_if_done_col_leq_X(cfg=cfg, data=db_data):
first_task_id, archive_task = move_first_done_task_to_archive(data=db_data)
db_data[first_task_id] = archive_task
save_db(data=db_data)
# Action 3
def change_kanban_board():
boards_dict = read_db(path="all")
new_active_board = input_ask_for_change_board(boards_dict)
cfg.active_board = new_active_board
# Action 4
def show_tasks():
db_data = read_db()
choices = get_tag_id_choices(db_data, cfg.vis_cols)
selection_criteria = input_ask_which_tasks_to_show(choices)
for i, task in db_data.items():
if selection_criteria in [i, task["Tag"]]:
console.print(
20 * "[bold blue]#[/]" + f" Task {i} " + 20 * "[bold blue]#[/]"
)
pprint(
{
key: val
for key, val in task.items()
if key in ["Title", "Description", "Tag", "Status", "Due_Date"]
},
console=console,
expand_all=True,
)
# Action 5
def delete_kanban_board():
board_to_delete = input_ask_for_delete_board()
if input_confirm_delete_board(board_to_delete):
board_to_delete_path = cfg.kanban_boards_dict[board_to_delete]
delete_json_file(board_to_delete_path)
delete_board_from_config(board_to_delete)
def show():
if not cfg.kanban_boards:
console.print(":warning: [red]No Boards created yet[/]:warning:")
console.print("Use 'kanban init' to create a new kanban board.")
raise KeyboardInterrupt
if not check_if_current_active_board_in_board_list():
console.print(
"[yellow]Hmm, Something went wrong.[/] "
+ f"The active board '{cfg.active_board}' is not in the list of boards."
)
change_kanban_board()
show()
return
db_data = read_db()
table = create_table(data=db_data)
console.print(table)
# Scan Functionality
#####################################################################################
def add_todos_to_board():
files = scan_files(endings=cfg.scanned_files)
todos = scan_for_todos(file_paths=files, patterns=cfg.scanned_patterns)
if not todos:
console.print(
":cross_mark: [red]Nothing found that "
+ "matches any of your provided patterns.[/]"
)
return
# TODO Write Docs for kanban scan functionality
# BUG This pattern also works
if input_confirm_add_todos_to_board(todos=todos):
todo_task_list = []
for task, file in todos:
tag, title = split_todo_in_tag_and_title(task, cfg.scanned_patterns)
new_task = {
"Title": title,
"Description": f"from {file}",
"Status": "Ready",
"Tag": tag,
"Creation_Date": current_time_to_str(),
"Begin_Time": "",
"Complete_Time": "",
"Duration": 0,
}
todo_task_list.append(new_task)
add_tasks_to_db(tasks=todo_task_list)
# Config Settings
#####################################################################################
def change_settings():
while True:
show_settings()
settings_selection = input_ask_for_action_settings()
if settings_selection == 1:
change_kanban_board()
new_min_col_widths = input_change_min_col_width_settings()
cfg.col_min_width = new_min_col_widths
done_limit = input_change_done_limit_settings()
cfg.done_limit = done_limit
footer_visible = input_change_footer_settings()
cfg.show_footer = "True" if footer_visible else "False"
if settings_selection == 2:
updated_col_config = input_change_column_settings()
cfg.kanban_columns_dict = updated_col_config
if settings_selection == 3:
while True:
new_files_to_scan = input_change_files_to_scan_settings()
if check_scanner_files_valid(new_files_to_scan):
cfg.scanned_files = new_files_to_scan
break
console.print(
f":warning: '{new_files_to_scan}' is [red]not[/] a valid."
)
while True:
new_patterns_to_scan = input_change_patterns_to_scan_settings()
if check_scanner_patterns_valid(new_patterns_to_scan):
cfg.scanned_patterns = new_patterns_to_scan
break
console.print(
f":warning: '{new_patterns_to_scan}' is [red]not[/] a valid."
)
if settings_selection == 4:
break
def show_settings():
settings_table = create_config_table()
console.print(settings_table)
# Report Creation
#####################################################################################
def create_report():
boards_dict = read_db("all")
gh_table = create_github_like_report_table(boards_dict)
console.print(gh_table)
if not REPORT_FILE_PATH.exists():
REPORT_FILE_PATH.mkdir(exist_ok=True)
create_report_document(boards_dict=boards_dict)
console.print(
"\n[bright_black]You can find your markdown report under:" | + f"\n[bold green]{REPORT_FILE_PATH/REPORT_FILE_NAME}" | 3 | 2023-11-11 14:43:55+00:00 | 12k |
AMAAI-Lab/mustango | audioldm/clap/open_clip/factory.py | [
{
"identifier": "CLAP",
"path": "audioldm/clap/open_clip/model.py",
"snippet": "class CLAP(nn.Module):\n def __init__(\n self,\n embed_dim: int,\n audio_cfg: CLAPAudioCfp,\n text_cfg: CLAPTextCfg,\n quick_gelu: bool = False,\n enable_fusion: bool = False,\n ... | import json
import logging
import os
import pathlib
import re
import torch
from copy import deepcopy
from pathlib import Path
from .model import CLAP, convert_weights_to_fp16
from .openai import load_openai_model
from .pretrained import get_pretrained_url, download_pretrained
from .transform import image_transform | 7,245 | global _MODEL_CONFIGS
config_ext = (".json",)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f"*{ext}"))
for cf in config_files:
if os.path.basename(cf)[0] == ".":
continue # Ignore hidden files
with open(cf, "r") as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ("embed_dim", "audio_cfg", "text_cfg")):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {
k: v
for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))
}
_rescan_model_configs() # initial populate of model config registry
def load_state_dict(checkpoint_path: str, map_location="cpu", skip_params=True):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
else:
state_dict = checkpoint
if skip_params:
if next(iter(state_dict.items()))[0].startswith("module"):
state_dict = {k[7:]: v for k, v in state_dict.items()}
# for k in state_dict:
# if k.startswith('transformer'):
# v = state_dict.pop(k)
# state_dict['text_branch.' + k[12:]] = v
return state_dict
def create_model(
amodel_name: str,
tmodel_name: str,
pretrained: str = "",
precision: str = "fp32",
device: torch.device = torch.device("cpu"),
jit: bool = False,
force_quick_gelu: bool = False,
openai_model_cache_dir: str = os.path.expanduser(f"{CACHE_DIR}/clip"),
skip_params=True,
pretrained_audio: str = "",
pretrained_text: str = "",
enable_fusion: bool = False,
fusion_type: str = "None"
# pretrained_image: bool = False,
):
amodel_name = amodel_name.replace(
"/", "-"
) # for callers using old naming with / in ViT names
pretrained_orig = pretrained
pretrained = pretrained.lower()
if pretrained == "openai":
if amodel_name in _MODEL_CONFIGS:
logging.info(f"Loading {amodel_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
else:
logging.error(
f"Model config for {amodel_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {amodel_name} not found.")
logging.info(f"Loading pretrained ViT-B-16 text encoder from OpenAI.")
# Hard Code in model name
model_cfg["text_cfg"]["model_type"] = tmodel_name
model = load_openai_model(
"ViT-B-16",
model_cfg,
device=device,
jit=jit,
cache_dir=openai_model_cache_dir,
enable_fusion=enable_fusion,
fusion_type=fusion_type,
)
# See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
if precision == "amp" or precision == "fp32":
model = model.float()
else:
if amodel_name in _MODEL_CONFIGS:
logging.info(f"Loading {amodel_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
else:
logging.error(
f"Model config for {amodel_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {amodel_name} not found.")
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
# if pretrained_image:
# if 'timm_amodel_name' in model_cfg.get('vision_cfg', {}):
# # pretrained weight loading for timm models set via vision_cfg
# model_cfg['vision_cfg']['timm_model_pretrained'] = True
# else:
# assert False, 'pretrained image towers currently only supported for timm models'
model_cfg["text_cfg"]["model_type"] = tmodel_name
model_cfg["enable_fusion"] = enable_fusion
model_cfg["fusion_type"] = fusion_type
model = CLAP(**model_cfg)
if pretrained:
checkpoint_path = ""
url = get_pretrained_url(amodel_name, pretrained)
if url:
|
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
CACHE_DIR = os.getenv("AUDIOLDM_CACHE_DIR", "~/.cache/audioldm")
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = (".json",)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f"*{ext}"))
for cf in config_files:
if os.path.basename(cf)[0] == ".":
continue # Ignore hidden files
with open(cf, "r") as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ("embed_dim", "audio_cfg", "text_cfg")):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {
k: v
for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))
}
_rescan_model_configs() # initial populate of model config registry
def load_state_dict(checkpoint_path: str, map_location="cpu", skip_params=True):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
else:
state_dict = checkpoint
if skip_params:
if next(iter(state_dict.items()))[0].startswith("module"):
state_dict = {k[7:]: v for k, v in state_dict.items()}
# for k in state_dict:
# if k.startswith('transformer'):
# v = state_dict.pop(k)
# state_dict['text_branch.' + k[12:]] = v
return state_dict
def create_model(
amodel_name: str,
tmodel_name: str,
pretrained: str = "",
precision: str = "fp32",
device: torch.device = torch.device("cpu"),
jit: bool = False,
force_quick_gelu: bool = False,
openai_model_cache_dir: str = os.path.expanduser(f"{CACHE_DIR}/clip"),
skip_params=True,
pretrained_audio: str = "",
pretrained_text: str = "",
enable_fusion: bool = False,
fusion_type: str = "None"
# pretrained_image: bool = False,
):
amodel_name = amodel_name.replace(
"/", "-"
) # for callers using old naming with / in ViT names
pretrained_orig = pretrained
pretrained = pretrained.lower()
if pretrained == "openai":
if amodel_name in _MODEL_CONFIGS:
logging.info(f"Loading {amodel_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
else:
logging.error(
f"Model config for {amodel_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {amodel_name} not found.")
logging.info(f"Loading pretrained ViT-B-16 text encoder from OpenAI.")
# Hard Code in model name
model_cfg["text_cfg"]["model_type"] = tmodel_name
model = load_openai_model(
"ViT-B-16",
model_cfg,
device=device,
jit=jit,
cache_dir=openai_model_cache_dir,
enable_fusion=enable_fusion,
fusion_type=fusion_type,
)
# See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
if precision == "amp" or precision == "fp32":
model = model.float()
else:
if amodel_name in _MODEL_CONFIGS:
logging.info(f"Loading {amodel_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
else:
logging.error(
f"Model config for {amodel_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {amodel_name} not found.")
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
# if pretrained_image:
# if 'timm_amodel_name' in model_cfg.get('vision_cfg', {}):
# # pretrained weight loading for timm models set via vision_cfg
# model_cfg['vision_cfg']['timm_model_pretrained'] = True
# else:
# assert False, 'pretrained image towers currently only supported for timm models'
model_cfg["text_cfg"]["model_type"] = tmodel_name
model_cfg["enable_fusion"] = enable_fusion
model_cfg["fusion_type"] = fusion_type
model = CLAP(**model_cfg)
if pretrained:
checkpoint_path = ""
url = get_pretrained_url(amodel_name, pretrained)
if url: | checkpoint_path = download_pretrained(url, root=openai_model_cache_dir) | 4 | 2023-11-14 23:29:31+00:00 | 12k |
BraveGroup/Drive-WM | tests/pipelines/shap_e/test_shap_e_img2img.py | [
{
"identifier": "PipelineTesterMixin",
"path": "tests/pipelines/test_pipelines_common.py",
"snippet": "class PipelineTesterMixin:\n \"\"\"\n This mixin is designed to be used with unittest.TestCase classes.\n It provides a set of common tests for each PyTorch pipeline, e.g. saving and loading t... | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImg2ImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils.testing_utils import (
floats_tensor,
load_image,
load_numpy,
nightly,
require_torch_gpu,
torch_device,
)
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference | 9,659 | "image_encoder": image_encoder,
"image_processor": image_processor,
"shap_e_renderer": shap_e_renderer,
"scheduler": scheduler,
}
return components
def get_dummy_inputs(self, device, seed=0):
input_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "latent",
}
return inputs
def test_shap_e(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images[0]
image_slice = image[-3:, -3:].cpu().numpy()
assert image.shape == (32, 16)
expected_slice = np.array(
[-1.0, 0.40668195, 0.57322013, -0.9469888, 0.4283227, 0.30348337, -0.81094897, 0.74555075, 0.15342723]
)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_inference_batch_consistent(self):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[2])
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(
batch_size=2,
expected_max_diff=6e-3,
)
def test_num_images_per_prompt(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
batch_size = 1
num_images_per_prompt = 2
inputs = self.get_dummy_inputs(torch_device)
for key in inputs.keys():
if key in self.batch_params:
inputs[key] = batch_size * [inputs[key]]
images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=1e-1)
def test_save_load_local(self):
super().test_save_load_local(expected_max_difference=5e-3)
@unittest.skip("Key error is raised with accelerate")
def test_sequential_cpu_offload_forward_pass(self):
pass
@nightly
@require_torch_gpu
class ShapEImg2ImgPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_shap_e_img2img(self):
input_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png"
)
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy"
)
pipe = ShapEImg2ImgPipeline.from_pretrained("openai/shap-e-img2img")
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device=torch_device).manual_seed(0)
images = pipe(
input_image,
generator=generator,
guidance_scale=3.0,
num_inference_steps=64,
frame_size=64,
output_type="np",
).images[0]
assert images.shape == (20, 64, 64, 3)
| # Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ShapEImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = ShapEImg2ImgPipeline
params = ["image"]
batch_params = ["image"]
required_optional_params = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
test_xformers_attention = False
@property
def text_embedder_hidden_size(self):
return 16
@property
def time_input_dim(self):
return 16
@property
def time_embed_dim(self):
return self.time_input_dim * 4
@property
def renderer_dim(self):
return 8
@property
def dummy_image_encoder(self):
torch.manual_seed(0)
config = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size,
image_size=32,
projection_dim=self.text_embedder_hidden_size,
intermediate_size=24,
num_attention_heads=2,
num_channels=3,
num_hidden_layers=5,
patch_size=1,
)
model = CLIPVisionModel(config)
return model
@property
def dummy_image_processor(self):
image_processor = CLIPImageProcessor(
crop_size=224,
do_center_crop=True,
do_normalize=True,
do_resize=True,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
resample=3,
size=224,
)
return image_processor
@property
def dummy_prior(self):
torch.manual_seed(0)
model_kwargs = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
model = PriorTransformer(**model_kwargs)
return model
@property
def dummy_renderer(self):
torch.manual_seed(0)
model_kwargs = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
model = ShapERenderer(**model_kwargs)
return model
def get_dummy_components(self):
prior = self.dummy_prior
image_encoder = self.dummy_image_encoder
image_processor = self.dummy_image_processor
shap_e_renderer = self.dummy_renderer
scheduler = HeunDiscreteScheduler(
beta_schedule="exp",
num_train_timesteps=1024,
prediction_type="sample",
use_karras_sigmas=True,
clip_sample=True,
clip_sample_range=1.0,
)
components = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"shap_e_renderer": shap_e_renderer,
"scheduler": scheduler,
}
return components
def get_dummy_inputs(self, device, seed=0):
input_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "latent",
}
return inputs
def test_shap_e(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images[0]
image_slice = image[-3:, -3:].cpu().numpy()
assert image.shape == (32, 16)
expected_slice = np.array(
[-1.0, 0.40668195, 0.57322013, -0.9469888, 0.4283227, 0.30348337, -0.81094897, 0.74555075, 0.15342723]
)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_inference_batch_consistent(self):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[2])
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(
batch_size=2,
expected_max_diff=6e-3,
)
def test_num_images_per_prompt(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
batch_size = 1
num_images_per_prompt = 2
inputs = self.get_dummy_inputs(torch_device)
for key in inputs.keys():
if key in self.batch_params:
inputs[key] = batch_size * [inputs[key]]
images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=1e-1)
def test_save_load_local(self):
super().test_save_load_local(expected_max_difference=5e-3)
@unittest.skip("Key error is raised with accelerate")
def test_sequential_cpu_offload_forward_pass(self):
pass
@nightly
@require_torch_gpu
class ShapEImg2ImgPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_shap_e_img2img(self):
input_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png"
)
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy"
)
pipe = ShapEImg2ImgPipeline.from_pretrained("openai/shap-e-img2img")
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device=torch_device).manual_seed(0)
images = pipe(
input_image,
generator=generator,
guidance_scale=3.0,
num_inference_steps=64,
frame_size=64,
output_type="np",
).images[0]
assert images.shape == (20, 64, 64, 3)
| assert_mean_pixel_difference(images, expected_image) | 1 | 2023-11-18 01:40:55+00:00 | 12k |
basnijholt/unidep | unidep/_cli.py | [
{
"identifier": "create_conda_env_specification",
"path": "unidep/_conda_env.py",
"snippet": "def create_conda_env_specification( # noqa: PLR0912\n resolved: dict[str, dict[Platform | None, dict[CondaPip, Spec]]],\n channels: list[str],\n platforms: list[Platform],\n selector: Literal[\"sel... | import argparse
import importlib.util
import os
import shutil
import subprocess
import sys
from pathlib import Path
from unidep._conda_env import (
create_conda_env_specification,
write_conda_environment_file,
)
from unidep._conda_lock import conda_lock_command
from unidep._conflicts import resolve_conflicts
from unidep._dependencies_parsing import (
find_requirements_files,
parse_local_dependencies,
parse_requirements,
)
from unidep._setuptools_integration import (
filter_python_dependencies,
get_python_dependencies,
)
from unidep._version import __version__
from unidep.platform_definitions import Platform
from unidep.utils import (
add_comment_to_file,
dependencies_filename,
escape_unicode,
identify_current_platform,
is_pip_installable,
parse_package_str,
warn,
)
from typing import Literal, get_args
from typing_extensions import Literal, get_args
from rich_argparse import RichHelpFormatter
from argparse import HelpFormatter as _HelpFormatter # type: ignore[assignment] | 8,256 | def _identify_conda_executable() -> str: # pragma: no cover
"""Identify the conda executable to use.
This function checks for micromamba, mamba, and conda in that order.
"""
if shutil.which("micromamba"):
return "micromamba"
if shutil.which("mamba"):
return "mamba"
if shutil.which("conda"):
return "conda"
msg = "Could not identify conda executable."
raise RuntimeError(msg)
def _format_inline_conda_package(package: str) -> str:
pkg = parse_package_str(package)
if pkg.pin is None:
return pkg.name
return f'{pkg.name}"{pkg.pin.strip()}"'
def _pip_install_local(
*folders: str | Path,
editable: bool,
dry_run: bool,
flags: list[str] | None = None,
) -> None: # pragma: no cover
pip_command = [sys.executable, "-m", "pip", "install"]
if flags:
pip_command.extend(flags)
for folder in sorted(folders):
if not os.path.isabs(folder): # noqa: PTH117
relative_prefix = ".\\" if os.name == "nt" else "./"
folder = f"{relative_prefix}{folder}" # noqa: PLW2901
if editable:
pip_command.extend(["-e", str(folder)])
else:
pip_command.append(str(folder))
print(f"📦 Installing project with `{' '.join(pip_command)}`\n")
if not dry_run:
subprocess.run(pip_command, check=True) # noqa: S603
def _install_command( # noqa: PLR0912
*files: Path,
conda_executable: str,
dry_run: bool,
editable: bool,
skip_local: bool = False,
skip_pip: bool = False,
skip_conda: bool = False,
no_dependencies: bool = False,
ignore_pins: list[str] | None = None,
overwrite_pins: list[str] | None = None,
skip_dependencies: list[str] | None = None,
verbose: bool = False,
) -> None:
"""Install the dependencies of a single `requirements.yaml` or `pyproject.toml` file.""" # noqa: E501
if no_dependencies:
skip_pip = True
skip_conda = True
files = tuple(dependencies_filename(f) for f in files)
requirements = parse_requirements(
*files,
ignore_pins=ignore_pins,
overwrite_pins=overwrite_pins,
skip_dependencies=skip_dependencies,
verbose=verbose,
)
platforms = [identify_current_platform()]
resolved = resolve_conflicts(
requirements.requirements,
platforms,
)
env_spec = create_conda_env_specification(
resolved,
requirements.channels,
platforms=platforms,
)
if env_spec.conda and not skip_conda:
conda_executable = conda_executable or _identify_conda_executable()
channel_args = ["--override-channels"] if env_spec.channels else []
for channel in env_spec.channels:
channel_args.extend(["--channel", channel])
conda_command = [
conda_executable,
"install",
"--yes",
*channel_args,
]
# When running the command in terminal, we need to wrap the pin in quotes
# so what we print is what the user would type (copy-paste).
to_print = [_format_inline_conda_package(pkg) for pkg in env_spec.conda] # type: ignore[arg-type]
conda_command_str = " ".join((*conda_command, *to_print))
print(f"📦 Installing conda dependencies with `{conda_command_str}`\n") # type: ignore[arg-type]
if not dry_run: # pragma: no cover
subprocess.run((*conda_command, *env_spec.conda), check=True) # type: ignore[arg-type] # noqa: S603
if env_spec.pip and not skip_pip:
pip_command = [sys.executable, "-m", "pip", "install", *env_spec.pip]
print(f"📦 Installing pip dependencies with `{' '.join(pip_command)}`\n")
if not dry_run: # pragma: no cover
subprocess.run(pip_command, check=True) # noqa: S603
installable = []
if not skip_local:
for file in files:
if is_pip_installable(file.parent):
installable.append(file.parent)
else: # pragma: no cover
print(
f"⚠️ Project {file.parent} is not pip installable. "
"Could not find setup.py or [build-system] in pyproject.toml.",
)
# Install local dependencies (if any) included via `local_dependencies:`
| #!/usr/bin/env python3
"""unidep - Unified Conda and Pip requirements management.
This module provides a command-line tool for managing conda environment.yaml files.
"""
from __future__ import annotations
if sys.version_info >= (3, 8):
else: # pragma: no cover
try: # pragma: no cover
class _HelpFormatter(RichHelpFormatter):
def _get_help_string(self, action: argparse.Action) -> str | None:
# escapes "[" in text, otherwise e.g., [linux] is removed
if action.help is not None:
return action.help.replace("[", r"\[")
return None
except ImportError: # pragma: no cover
_DEP_FILES = "`requirements.yaml` or `pyproject.toml`"
def _add_common_args( # noqa: PLR0912
sub_parser: argparse.ArgumentParser,
options: set[str],
) -> None: # pragma: no cover
if "directory" in options:
sub_parser.add_argument(
"-d",
"--directory",
type=Path,
default=".",
help=f"Base directory to scan for {_DEP_FILES} file(s), by default `.`",
)
if "file" in options:
sub_parser.add_argument(
"-f",
"--file",
type=Path,
default=".",
help=f"The {_DEP_FILES} file to parse, or folder"
" that contains that file, by default `.`",
)
if "verbose" in options:
sub_parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Print verbose output",
)
if "platform" in options:
current_platform = identify_current_platform()
sub_parser.add_argument(
"--platform",
"-p",
type=str,
action="append", # Allow multiple instances of -p
default=None, # Default is a list with the current platform set in `main`
choices=get_args(Platform),
help="The platform(s) to get the requirements for. "
"Multiple platforms can be specified. "
f"By default, the current platform (`{current_platform}`) is used.",
)
if "editable" in options:
sub_parser.add_argument(
"-e",
"--editable",
action="store_true",
help="Install the project in editable mode",
)
if "depth" in options:
sub_parser.add_argument(
"--depth",
type=int,
default=1,
help=f"Maximum depth to scan for {_DEP_FILES} files, by default 1",
)
if "*files" in options:
sub_parser.add_argument(
"files",
type=Path,
nargs="+",
help=f"The {_DEP_FILES} file(s) to parse"
" or folder(s) that contain"
" those file(s), by default `.`",
default=None, # default is "." set in `main`
)
if "skip-local" in options:
sub_parser.add_argument(
"--skip-local",
action="store_true",
help="Skip installing local dependencies",
)
if "skip-pip" in options:
sub_parser.add_argument(
"--skip-pip",
action="store_true",
help=f"Skip installing pip dependencies from {_DEP_FILES}",
)
if "skip-conda" in options:
sub_parser.add_argument(
"--skip-conda",
action="store_true",
help=f"Skip installing conda dependencies from {_DEP_FILES}",
)
if "skip-dependency" in options:
sub_parser.add_argument(
"--skip-dependency",
type=str,
action="append",
default=[],
help="Skip installing a specific dependency that is in one of the"
f" {_DEP_FILES}"
" files. This option can be used multiple times, each"
" time specifying a different package to skip."
" For example, use `--skip-dependency pandas` to skip installing pandas.",
)
if "no-dependencies" in options:
sub_parser.add_argument(
"--no-dependencies",
action="store_true",
help=f"Skip installing dependencies from {_DEP_FILES}"
" file(s) and only install local package(s). Useful after"
" installing a `conda-lock.yml` file because then all"
" dependencies have already been installed.",
)
if "conda-executable" in options:
sub_parser.add_argument(
"--conda-executable",
type=str,
choices=("conda", "mamba", "micromamba"),
help="The conda executable to use",
default=None,
)
if "dry-run" in options:
sub_parser.add_argument(
"--dry-run",
"--dry",
action="store_true",
help="Only print the commands that would be run",
)
if "ignore-pin" in options:
sub_parser.add_argument(
"--ignore-pin",
type=str,
action="append",
default=[],
help="Ignore the version pin for a specific package,"
" e.g., `--ignore-pin numpy`. This option can be repeated"
" to ignore multiple packages.",
)
if "overwrite-pin" in options:
sub_parser.add_argument(
"--overwrite-pin",
type=str,
action="append",
default=[],
help="Overwrite the version pin for a specific package,"
" e.g., `--overwrite-pin 'numpy==1.19.2'`. This option can be repeated"
" to overwrite the pins of multiple packages.",
)
def _parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Unified Conda and Pip requirements management.",
formatter_class=_HelpFormatter,
)
subparsers = parser.add_subparsers(dest="command", help="Subcommands")
# Subparser for the 'merge' command
merge_help = (
f"Combine multiple (or a single) {_DEP_FILES}"
" files into a"
" single Conda installable `environment.yaml` file."
)
merge_example = (
" Example usage: `unidep merge --directory . --depth 1 --output environment.yaml`" # noqa: E501
f" to search for {_DEP_FILES}"
" files in the current directory and its"
" subdirectories and create `environment.yaml`. These are the defaults, so you"
" can also just run `unidep merge`."
)
parser_merge = subparsers.add_parser(
"merge",
help=merge_help,
description=merge_help + merge_example,
formatter_class=_HelpFormatter,
)
parser_merge.add_argument(
"-o",
"--output",
type=Path,
default="environment.yaml",
help="Output file for the conda environment, by default `environment.yaml`",
)
parser_merge.add_argument(
"-n",
"--name",
type=str,
default="myenv",
help="Name of the conda environment, by default `myenv`",
)
parser_merge.add_argument(
"--stdout",
action="store_true",
help="Output to stdout instead of a file",
)
parser_merge.add_argument(
"--selector",
type=str,
choices=("sel", "comment"),
default="sel",
help="The selector to use for the environment markers, if `sel` then"
" `- numpy # [linux]` becomes `sel(linux): numpy`, if `comment` then"
" it remains `- numpy # [linux]`, by default `sel`",
)
_add_common_args(
parser_merge,
{
"directory",
"verbose",
"platform",
"depth",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
# Subparser for the 'install' command
install_help = (
f"Automatically install all dependencies from one or more {_DEP_FILES} files."
" This command first installs dependencies"
" with Conda, then with Pip. Finally, it installs local packages"
f" (those containing the {_DEP_FILES} files)"
" using `pip install [-e] ./project`."
)
install_example = (
" Example usage: `unidep install .` for a single project."
" For multiple projects: `unidep install ./project1 ./project2`."
" The command accepts both file paths and directories containing"
f" a {_DEP_FILES} file. Use `--editable` or"
" `-e` to install the local packages in editable mode. See"
f" `unidep install-all` to install all {_DEP_FILES} files in and below the"
" current folder."
)
parser_install = subparsers.add_parser(
"install",
help=install_help,
description=install_help + install_example,
formatter_class=_HelpFormatter,
)
# Add positional argument for the file
_add_common_args(
parser_install,
{
"*files",
"conda-executable",
"dry-run",
"editable",
"skip-local",
"skip-pip",
"skip-conda",
"no-dependencies",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
"verbose",
},
)
install_all_help = (
f"Install dependencies from all {_DEP_FILES}"
" files found in the current"
" directory or specified directory. This command first installs dependencies"
" using Conda, then Pip, and finally the local packages."
)
install_all_example = (
" Example usage: `unidep install-all` to install dependencies from all"
f" {_DEP_FILES}"
" files in the current directory. Use"
" `--directory ./path/to/dir` to specify a different directory. Use"
" `--depth` to control the depth of directory search. Add `--editable`"
" or `-e` for installing local packages in editable mode."
)
parser_install_all = subparsers.add_parser(
"install-all",
help=install_all_help,
description=install_all_help + install_all_example,
formatter_class=_HelpFormatter,
)
# Add positional argument for the file
_add_common_args(
parser_install_all,
{
"conda-executable",
"dry-run",
"editable",
"depth",
"directory",
"skip-local",
"skip-pip",
"skip-conda",
"no-dependencies",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
"verbose",
},
)
# Subparser for the 'conda-lock' command
conda_lock_help = (
"Generate a global `conda-lock.yml` file for a collection of"
f" {_DEP_FILES}"
" files. Additionally, create individual"
f" `conda-lock.yml` files for each {_DEP_FILES} file"
" consistent with the global lock file."
)
conda_lock_example = (
" Example usage: `unidep conda-lock --directory ./projects` to generate"
f" conda-lock files for all {_DEP_FILES}"
" files in the `./projects`"
" directory. Use `--only-global` to generate only the global lock file."
" The `--check-input-hash` option can be used to avoid regenerating lock"
" files if the input hasn't changed."
)
parser_lock = subparsers.add_parser(
"conda-lock",
help=conda_lock_help,
description=conda_lock_help + conda_lock_example,
formatter_class=_HelpFormatter,
)
parser_lock.add_argument(
"--only-global",
action="store_true",
help="Only generate the global lock file",
)
parser_lock.add_argument(
"--lockfile",
type=Path,
default="conda-lock.yml",
help="Specify a path for the global lockfile (default: `conda-lock.yml`"
" in current directory). Path should be relative, e.g.,"
" `--lockfile ./locks/example.conda-lock.yml`.",
)
parser_lock.add_argument(
"--check-input-hash",
action="store_true",
help="Check existing input hashes in lockfiles before regenerating lock files."
" This flag is directly passed to `conda-lock`.",
)
_add_common_args(
parser_lock,
{
"directory",
"verbose",
"platform",
"depth",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
# Subparser for the 'pip-compile' command
pip_compile_help = (
"Generate a fully pinned `requirements.txt` file from one or more"
f" {_DEP_FILES}"
" files using `pip-compile` from `pip-tools`. This"
f" command consolidates all pip dependencies defined in the {_DEP_FILES}"
" files and compiles them into a single `requirements.txt` file, taking"
" into account the specific versions and dependencies of each package."
)
pip_compile_example = (
" Example usage: `unidep pip-compile --directory ./projects` to generate"
f" a `requirements.txt` file for all {_DEP_FILES}"
" files in the"
" `./projects` directory. Use `--output-file requirements.txt` to specify a"
" different output file."
)
parser_pip_compile = subparsers.add_parser(
"pip-compile",
help=pip_compile_help,
description=pip_compile_help + pip_compile_example,
formatter_class=_HelpFormatter,
)
parser_pip_compile.add_argument(
"-o",
"--output-file",
type=Path,
default=None,
help="Output file for the pip requirements, by default `requirements.txt`",
)
_add_common_args(
parser_pip_compile,
{
"directory",
"verbose",
"platform",
"depth",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
parser_pip_compile.add_argument(
"extra_flags",
nargs=argparse.REMAINDER,
help="Extra flags to pass to `pip-compile`. These flags are passed directly"
" and should be provided in the format expected by `pip-compile`. For example,"
" `unidep pip-compile -- --generate-hashes --allow-unsafe`. Note that the"
" `--` is required to separate the flags for `unidep` from the flags for"
" `pip-compile`.",
)
# Subparser for the 'pip' and 'conda' command
help_str = "Get the {} requirements for the current platform only."
help_example = (
" Example usage: `unidep {which} --file folder1 --file"
" folder2/requirements.yaml --seperator ' ' --platform linux-64` to"
" extract all the {which} dependencies specific to the linux-64 platform. Note"
" that the `--file` argument can be used multiple times to specify multiple"
f" {_DEP_FILES}"
" files and that --file can also be a folder that contains"
f" a {_DEP_FILES} file."
)
parser_pip = subparsers.add_parser(
"pip",
help=help_str.format("pip"),
description=help_str.format("pip") + help_example.format(which="pip"),
formatter_class=_HelpFormatter,
)
parser_conda = subparsers.add_parser(
"conda",
help=help_str.format("conda"),
description=help_str.format("conda") + help_example.format(which="conda"),
formatter_class=_HelpFormatter,
)
for sub_parser in [parser_pip, parser_conda]:
_add_common_args(
sub_parser,
{
"verbose",
"platform",
"file",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
sub_parser.add_argument(
"--separator",
type=str,
default=" ",
help="The separator between the dependencies, by default ` `",
)
# Subparser for the 'version' command
parser_merge = subparsers.add_parser(
"version",
help="Print version information of unidep.",
formatter_class=_HelpFormatter,
)
args = parser.parse_args()
if args.command is None: # pragma: no cover
parser.print_help()
sys.exit(1)
if "file" in args and args.file.is_dir(): # pragma: no cover
args.file = dependencies_filename(args.file)
return args
def _identify_conda_executable() -> str: # pragma: no cover
"""Identify the conda executable to use.
This function checks for micromamba, mamba, and conda in that order.
"""
if shutil.which("micromamba"):
return "micromamba"
if shutil.which("mamba"):
return "mamba"
if shutil.which("conda"):
return "conda"
msg = "Could not identify conda executable."
raise RuntimeError(msg)
def _format_inline_conda_package(package: str) -> str:
pkg = parse_package_str(package)
if pkg.pin is None:
return pkg.name
return f'{pkg.name}"{pkg.pin.strip()}"'
def _pip_install_local(
*folders: str | Path,
editable: bool,
dry_run: bool,
flags: list[str] | None = None,
) -> None: # pragma: no cover
pip_command = [sys.executable, "-m", "pip", "install"]
if flags:
pip_command.extend(flags)
for folder in sorted(folders):
if not os.path.isabs(folder): # noqa: PTH117
relative_prefix = ".\\" if os.name == "nt" else "./"
folder = f"{relative_prefix}{folder}" # noqa: PLW2901
if editable:
pip_command.extend(["-e", str(folder)])
else:
pip_command.append(str(folder))
print(f"📦 Installing project with `{' '.join(pip_command)}`\n")
if not dry_run:
subprocess.run(pip_command, check=True) # noqa: S603
def _install_command( # noqa: PLR0912
*files: Path,
conda_executable: str,
dry_run: bool,
editable: bool,
skip_local: bool = False,
skip_pip: bool = False,
skip_conda: bool = False,
no_dependencies: bool = False,
ignore_pins: list[str] | None = None,
overwrite_pins: list[str] | None = None,
skip_dependencies: list[str] | None = None,
verbose: bool = False,
) -> None:
"""Install the dependencies of a single `requirements.yaml` or `pyproject.toml` file.""" # noqa: E501
if no_dependencies:
skip_pip = True
skip_conda = True
files = tuple(dependencies_filename(f) for f in files)
requirements = parse_requirements(
*files,
ignore_pins=ignore_pins,
overwrite_pins=overwrite_pins,
skip_dependencies=skip_dependencies,
verbose=verbose,
)
platforms = [identify_current_platform()]
resolved = resolve_conflicts(
requirements.requirements,
platforms,
)
env_spec = create_conda_env_specification(
resolved,
requirements.channels,
platforms=platforms,
)
if env_spec.conda and not skip_conda:
conda_executable = conda_executable or _identify_conda_executable()
channel_args = ["--override-channels"] if env_spec.channels else []
for channel in env_spec.channels:
channel_args.extend(["--channel", channel])
conda_command = [
conda_executable,
"install",
"--yes",
*channel_args,
]
# When running the command in terminal, we need to wrap the pin in quotes
# so what we print is what the user would type (copy-paste).
to_print = [_format_inline_conda_package(pkg) for pkg in env_spec.conda] # type: ignore[arg-type]
conda_command_str = " ".join((*conda_command, *to_print))
print(f"📦 Installing conda dependencies with `{conda_command_str}`\n") # type: ignore[arg-type]
if not dry_run: # pragma: no cover
subprocess.run((*conda_command, *env_spec.conda), check=True) # type: ignore[arg-type] # noqa: S603
if env_spec.pip and not skip_pip:
pip_command = [sys.executable, "-m", "pip", "install", *env_spec.pip]
print(f"📦 Installing pip dependencies with `{' '.join(pip_command)}`\n")
if not dry_run: # pragma: no cover
subprocess.run(pip_command, check=True) # noqa: S603
installable = []
if not skip_local:
for file in files:
if is_pip_installable(file.parent):
installable.append(file.parent)
else: # pragma: no cover
print(
f"⚠️ Project {file.parent} is not pip installable. "
"Could not find setup.py or [build-system] in pyproject.toml.",
)
# Install local dependencies (if any) included via `local_dependencies:` | local_dependencies = parse_local_dependencies( | 5 | 2023-11-16 04:23:01+00:00 | 12k |
BAAI-DCAI/SegVol | inference_demo.py | [
{
"identifier": "sam_model_registry",
"path": "segment_anything_volumetric/build_sam.py",
"snippet": "def build_sam_vit_3d(args, checkpoint=None):\ndef _build_sam(\n image_encoder_type,\n embed_dim,\n patch_size,\n checkpoint,\n image_size,\n):"
},
{
"identifier": "SegVol",
"p... | import argparse
import os
import torch
import torch.nn.functional as F
import json
import monai.transforms as transforms
from segment_anything_volumetric import sam_model_registry
from network.model import SegVol
from data_process.demo_data_process import process_ct_gt
from utils.monai_inferers_utils import sliding_window_inference, generate_box, select_points, build_binary_cube, build_binary_points, logits2roi_coor
from utils.visualize import draw_result | 10,330 |
def set_parse():
# %% set up parser
parser = argparse.ArgumentParser()
parser.add_argument("--test_mode", default=True, type=bool)
parser.add_argument("--resume", type = str, default = '')
parser.add_argument("-infer_overlap", default=0.5, type=float, help="sliding window inference overlap")
parser.add_argument("-spatial_size", default=(32, 256, 256), type=tuple)
parser.add_argument("-patch_size", default=(4, 16, 16), type=tuple)
parser.add_argument('-work_dir', type=str, default='./work_dir')
### demo
parser.add_argument('--demo_config', type=str, required=True)
parser.add_argument("--clip_ckpt", type = str, default = './config/clip')
args = parser.parse_args()
return args
def dice_score(preds, labels): # on GPU
assert preds.shape[0] == labels.shape[0], "predict & target batch size don't match\n" + str(preds.shape) + str(labels.shape)
predict = preds.view(1, -1)
target = labels.view(1, -1)
if target.shape[1] < 1e8:
predict = predict.cuda()
target = target.cuda()
predict = torch.sigmoid(predict)
predict = torch.where(predict > 0.5, 1., 0.)
tp = torch.sum(torch.mul(predict, target))
den = torch.sum(predict) + torch.sum(target) + 1
dice = 2 * tp / den
if target.shape[1] < 1e8:
predict = predict.cpu()
target = target.cpu()
return dice
def zoom_in_zoom_out(args, segvol_model, image, image_resize, gt3D, gt3D_resize, categories=None):
logits_labels_record = {}
image_single_resize = image_resize
image_single = image[0,0]
ori_shape = image_single.shape
for item_idx in range(len(categories)):
# get label to generate prompts
label_single = gt3D[0][item_idx]
label_single_resize = gt3D_resize[0][item_idx]
# skip meaningless categories
if torch.sum(label_single) == 0:
print('No object, skip')
continue
# generate prompts
text_single = categories[item_idx] if args.use_text_prompt else None
if categories is not None: print(f'inference |{categories[item_idx]}| target...')
points_single = None
box_single = None
if args.use_point_prompt:
point, point_label = select_points(label_single_resize, num_positive_extra=3, num_negative_extra=3)
points_single = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda())
binary_points_resize = build_binary_points(point, point_label, label_single_resize.shape)
if args.use_box_prompt:
box_single = generate_box(label_single_resize).unsqueeze(0).float().cuda()
|
def set_parse():
# %% set up parser
parser = argparse.ArgumentParser()
parser.add_argument("--test_mode", default=True, type=bool)
parser.add_argument("--resume", type = str, default = '')
parser.add_argument("-infer_overlap", default=0.5, type=float, help="sliding window inference overlap")
parser.add_argument("-spatial_size", default=(32, 256, 256), type=tuple)
parser.add_argument("-patch_size", default=(4, 16, 16), type=tuple)
parser.add_argument('-work_dir', type=str, default='./work_dir')
### demo
parser.add_argument('--demo_config', type=str, required=True)
parser.add_argument("--clip_ckpt", type = str, default = './config/clip')
args = parser.parse_args()
return args
def dice_score(preds, labels): # on GPU
assert preds.shape[0] == labels.shape[0], "predict & target batch size don't match\n" + str(preds.shape) + str(labels.shape)
predict = preds.view(1, -1)
target = labels.view(1, -1)
if target.shape[1] < 1e8:
predict = predict.cuda()
target = target.cuda()
predict = torch.sigmoid(predict)
predict = torch.where(predict > 0.5, 1., 0.)
tp = torch.sum(torch.mul(predict, target))
den = torch.sum(predict) + torch.sum(target) + 1
dice = 2 * tp / den
if target.shape[1] < 1e8:
predict = predict.cpu()
target = target.cpu()
return dice
def zoom_in_zoom_out(args, segvol_model, image, image_resize, gt3D, gt3D_resize, categories=None):
logits_labels_record = {}
image_single_resize = image_resize
image_single = image[0,0]
ori_shape = image_single.shape
for item_idx in range(len(categories)):
# get label to generate prompts
label_single = gt3D[0][item_idx]
label_single_resize = gt3D_resize[0][item_idx]
# skip meaningless categories
if torch.sum(label_single) == 0:
print('No object, skip')
continue
# generate prompts
text_single = categories[item_idx] if args.use_text_prompt else None
if categories is not None: print(f'inference |{categories[item_idx]}| target...')
points_single = None
box_single = None
if args.use_point_prompt:
point, point_label = select_points(label_single_resize, num_positive_extra=3, num_negative_extra=3)
points_single = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda())
binary_points_resize = build_binary_points(point, point_label, label_single_resize.shape)
if args.use_box_prompt:
box_single = generate_box(label_single_resize).unsqueeze(0).float().cuda() | binary_cube_resize = build_binary_cube(box_single, binary_cube_shape=label_single_resize.shape) | 6 | 2023-11-10 08:25:37+00:00 | 12k |
xk-huang/segment-caption-anything | tests/models/sca/test_modeling_sca.py | [
{
"identifier": "ScaConfig",
"path": "src/models/sca/configuration_sca.py",
"snippet": "class ScaConfig(PretrainedConfig):\n model_type = \"sca\"\n is_composition = True\n\n def __init__(\n self,\n vision_config=None,\n prompt_encoder_config=None,\n mask_caption_deco... | import sys
import pytest
import requests
import torch
import time
import numpy as np
import torch
import transformers
from PIL import Image
from src.models.sca import ScaConfig, ScaModel, ScaProcessor
from typing import Sequence
from torch.nn.utils.rnn import pad_sequence | 7,969 |
sys.path.append(".")
cache_dir = ".model.cache"
device = "cuda" if torch.cuda.is_available() else "cpu"
sam_model_name = "facebook/sam-vit-base"
text_model_name = "gpt2"
additional_num_hidden_layers = 2
@pytest.fixture
def model():
model = ScaModel.from_sam_text_pretrained(
sam_model_name, text_model_name, additional_num_hidden_layers, cache_dir=cache_dir
).to(device)
return model
@pytest.fixture
def processor():
|
sys.path.append(".")
cache_dir = ".model.cache"
device = "cuda" if torch.cuda.is_available() else "cpu"
sam_model_name = "facebook/sam-vit-base"
text_model_name = "gpt2"
additional_num_hidden_layers = 2
@pytest.fixture
def model():
model = ScaModel.from_sam_text_pretrained(
sam_model_name, text_model_name, additional_num_hidden_layers, cache_dir=cache_dir
).to(device)
return model
@pytest.fixture
def processor(): | processor = ScaProcessor.from_sam_text_pretrained(sam_model_name, text_model_name, cache_dir=cache_dir) | 1 | 2023-11-17 14:10:41+00:00 | 12k |
artwalker/EasyTranslator | easy_translator.py | [
{
"identifier": "CommandArgs",
"path": "command_args.py",
"snippet": "class CommandArgs:\r\n \"\"\"A class to read the arguments from command line .\"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"Initialize command arguments.\"\"\"\r\n # Use the argparse module in the Python standard li... | from command_args import CommandArgs
from parameter_reader import ParameterReader
from process_file import ProcessFile
| 9,055 |
class TranslateText:
"""Overall class to manage text translation."""
def __init__(self):
"""Initialize parameters about the text translation."""
# 1. Read the command line arguments.
self.commandArgs = CommandArgs()
# 2. Read the parameters from the settings.cfg file and the .env file.
# and process the parameters.
|
class TranslateText:
"""Overall class to manage text translation."""
def __init__(self):
"""Initialize parameters about the text translation."""
# 1. Read the command line arguments.
self.commandArgs = CommandArgs()
# 2. Read the parameters from the settings.cfg file and the .env file.
# and process the parameters.
| self.parameterReader = ParameterReader(self.commandArgs)
| 1 | 2023-11-10 15:56:06+00:00 | 12k |
ShipBit/wingman-ai | main.py | [
{
"identifier": "AudioRecorder",
"path": "services/audio_recorder.py",
"snippet": "class AudioRecorder(FileCreator):\n def __init__(\n self,\n app_root_dir: str,\n samplerate: int = 44100,\n channels: int = 1,\n ):\n super().__init__(app_root_dir, RECORDING_PATH)... | from os import path
from pynput import keyboard
from services.audio_recorder import AudioRecorder
from services.secret_keeper import SecretKeeper
from services.tower import Tower
from services.printr import Printr
from services.config_manager import ConfigManager
from gui.root import WingmanUI
from wingmen.wingman import Wingman
import sys
import asyncio
import threading | 9,794 |
printr = Printr()
def get_application_root(is_bundled: bool):
if is_bundled:
application_path = sys._MEIPASS
else:
application_path = path.dirname(path.abspath(__file__))
return application_path
class WingmanAI:
def __init__(self):
# pyinstaller things...
self.app_is_bundled = getattr(sys, "frozen", False)
self.app_root_dir = get_application_root(self.app_is_bundled)
self.active = False
self.active_recording = {"key": "", "wingman": None}
self.tower = None
self.config_manager = ConfigManager(self.app_root_dir, self.app_is_bundled)
self.secret_keeper = SecretKeeper(self.app_root_dir)
self.audio_recorder = AudioRecorder(self.app_root_dir)
def load_context(self, context=""):
self.active = False
try:
if self.config_manager:
config = self.config_manager.get_context_config(context)
|
printr = Printr()
def get_application_root(is_bundled: bool):
if is_bundled:
application_path = sys._MEIPASS
else:
application_path = path.dirname(path.abspath(__file__))
return application_path
class WingmanAI:
def __init__(self):
# pyinstaller things...
self.app_is_bundled = getattr(sys, "frozen", False)
self.app_root_dir = get_application_root(self.app_is_bundled)
self.active = False
self.active_recording = {"key": "", "wingman": None}
self.tower = None
self.config_manager = ConfigManager(self.app_root_dir, self.app_is_bundled)
self.secret_keeper = SecretKeeper(self.app_root_dir)
self.audio_recorder = AudioRecorder(self.app_root_dir)
def load_context(self, context=""):
self.active = False
try:
if self.config_manager:
config = self.config_manager.get_context_config(context) | self.tower = Tower( | 2 | 2023-11-15 09:36:06+00:00 | 12k |
derkalle4/python3-idotmatrix-client | core/cmd.py | [
{
"identifier": "Bluetooth",
"path": "core/bluetooth.py",
"snippet": "class Bluetooth:\n address = None\n client = None\n logging = logging.getLogger(\"idotmatrix.\" + __name__)\n mtu_size = None\n\n def __init__(self, address):\n self.logging.debug(\"initialize bluetooth for {}\".... | from datetime import datetime
from PIL import Image
from .bluetooth import Bluetooth
from .idotmatrix.chronograph import Chronograph
from .idotmatrix.clock import Clock
from .idotmatrix.common import Common
from .idotmatrix.countdown import Countdown
from .idotmatrix.gif import Gif
from .idotmatrix.image import Image
from .idotmatrix.fullscreenColor import FullscreenColor
from .idotmatrix.musicSync import MusicSync
from .idotmatrix.scoreboard import Scoreboard
from .idotmatrix.graffiti import Graffiti
import logging
import os
import time | 8,148 | parser.add_argument(
"--fullscreen-color",
action="store",
help="sets a fullscreen color. Format: <R0-255>-<G0-255>-<B0-255> (example: 255-255-255)",
)
# pixel color
parser.add_argument(
"--pixel-color",
action="append",
help="sets a pixel to a specific color. Could be used multiple times. Format: <PIXEL-X>-<PIXEL-Y>-<R0-255>-<G0-255>-<B0-255> (example: 0-0-255-255-255)",
nargs="+",
)
# scoreboard
parser.add_argument(
"--scoreboard",
action="store",
help="shows the scoreboard with the given scores. Format: <0-999>-<0-999>",
)
# image upload
parser.add_argument(
"--image",
action="store",
help="enables or disables the image mode (true = enable, false = disable)",
)
parser.add_argument(
"--set-image",
action="store",
help="uploads a given image file (fastest is png, max. pixel depending on your display). Format: ./path/to/image.png",
)
parser.add_argument(
"--process-image",
action="store",
help="processes the image instead of sending it raw (useful when the size does not match or it is not a png). Format: <AMOUNT_PIXEL>",
)
# gif upload
parser.add_argument(
"--set-gif",
action="store",
help="uploads a given gif file (pixel depending on your display). Format: ./path/to/image.gif",
)
parser.add_argument(
"--process-gif",
action="store",
help="processes the gif instead of sending it raw (useful when the size does not match). Format: <AMOUNT_PIXEL>",
)
async def run(self, args):
self.logging.info("initializing command line")
address = None
if args.address:
self.logging.debug("using --address")
address = args.address
elif "IDOTMATRIX_ADDRESS" in os.environ:
self.logging.debug("using IDOTMATRIX_ADDRESS")
address = os.environ["IDOTMATRIX_ADDRESS"]
if address is None:
self.logging.error("no device address given")
quit()
else:
self.bluetooth = Bluetooth(address)
# arguments which can be run in parallel
if args.sync_time:
await self.sync_time(args.set_time)
if args.rotate180degrees:
await self.rotate180degrees(args.rotate180degrees)
if args.togglescreen:
await self.togglescreen()
if args.set_brightness:
await self.set_brightness(args.set_brightness)
if args.set_password:
await self.set_password(args.set_password)
# arguments which cannot run in parallel
if args.test:
await self.test()
elif args.chronograph:
await self.chronograph(args.chronograph)
elif args.clock:
await self.clock(args)
elif args.countdown:
await self.countdown(args)
elif args.fullscreen_color:
await self.fullscreenColor(args.fullscreen_color)
elif args.pixel_color:
await self.pixelColor(args.pixel_color)
elif args.scoreboard:
await self.scoreboard(args.scoreboard)
elif args.image:
await self.image(args)
elif args.set_gif:
await self.gif(args)
async def test(self):
"""Tests all available options for the device"""
self.logging.info("starting test of device")
## chronograph
await self.bluetooth.send(Chronograph().setChronograph(1))
time.sleep(5)
await self.bluetooth.send(Chronograph().setChronograph(0))
time.sleep(1)
## clock
await self.bluetooth.send(Clock().setTimeIndicator(True))
await self.bluetooth.send(Clock().setClockMode(0, True, True))
time.sleep(5)
## countdown
await self.bluetooth.send(Countdown().setCountdown(1, 0, 5))
await self.bluetooth.send(Countdown().setCountdown(0, 0, 5))
time.sleep(5)
## fullscreen color
await self.bluetooth.send(FullscreenColor().setColor(255, 0, 0))
time.sleep(5)
## scoreboard
await self.bluetooth.send(Scoreboard().setScoreboard(1, 0))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(1, 1))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(1, 2))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(2, 2))
## graffiti
# load graffiti board and color pixel 0,0 red
| # python imports
# idotmatrix imports
class CMD:
bluetooth = None
logging = logging.getLogger("idotmatrix." + __name__)
def add_arguments(self, parser):
# test
parser.add_argument(
"--test",
action="store_true",
help="run the test function from the command line class",
)
# time sync
parser.add_argument(
"--sync-time",
action="store_true",
help="sync time to device",
)
parser.add_argument(
"--set-time",
action="store",
help="optionally set time to sync to device (use with --sync-time)",
default=datetime.now().strftime("%d-%m-%Y-%H:%M:%S"),
)
# device screen rotation
parser.add_argument(
"--rotate180degrees",
action="store",
help="enable 180 degree device rotation (true = enable, false = disable)",
)
# screen toggle
parser.add_argument(
"--togglescreen",
action="store_true",
help="toggles the screen on or off",
)
# brightness
parser.add_argument(
"--set-brightness",
action="store",
help="sets the brightness of the screen in percent: range 5..100",
)
# password
parser.add_argument(
"--set-password",
action="store",
help="sets password",
)
# chronograph
parser.add_argument(
"--chronograph",
action="store",
help="sets the chronograph mode: 0 = reset, 1 = (re)start, 2 = pause, 3 = continue after pause",
)
# clock
parser.add_argument(
"--clock",
action="store",
help="sets the clock mode: 0 = default, 1 = christmas, 2 = racing, 3 = inverted full screen, 4 = animated hourglass, 5 = frame 1, 6 = frame 2, 7 = frame 3",
)
parser.add_argument(
"--clock-with-date",
action="store_true",
help="shows the current date in addition to the current time.",
)
parser.add_argument(
"--clock-24h",
action="store_true",
help="shows the current time in 24h format.",
)
parser.add_argument(
"--clock-color",
action="store",
help="sets the color of the clock. Format: <R0-255>-<G0-255>-<B0-255> (example: 255-255-255)",
default="255-255-255",
)
# countdown
parser.add_argument(
"--countdown",
action="store",
help="sets the countdown mode: 0 = disable, 1 = start, 2 = pause, 3 = restart",
)
parser.add_argument(
"--countdown-time",
action="store",
help="sets the countdown mode: <MINUTES>-<SECONDS> (example: 10-30)",
default="5-0",
)
# fullscreen color
parser.add_argument(
"--fullscreen-color",
action="store",
help="sets a fullscreen color. Format: <R0-255>-<G0-255>-<B0-255> (example: 255-255-255)",
)
# pixel color
parser.add_argument(
"--pixel-color",
action="append",
help="sets a pixel to a specific color. Could be used multiple times. Format: <PIXEL-X>-<PIXEL-Y>-<R0-255>-<G0-255>-<B0-255> (example: 0-0-255-255-255)",
nargs="+",
)
# scoreboard
parser.add_argument(
"--scoreboard",
action="store",
help="shows the scoreboard with the given scores. Format: <0-999>-<0-999>",
)
# image upload
parser.add_argument(
"--image",
action="store",
help="enables or disables the image mode (true = enable, false = disable)",
)
parser.add_argument(
"--set-image",
action="store",
help="uploads a given image file (fastest is png, max. pixel depending on your display). Format: ./path/to/image.png",
)
parser.add_argument(
"--process-image",
action="store",
help="processes the image instead of sending it raw (useful when the size does not match or it is not a png). Format: <AMOUNT_PIXEL>",
)
# gif upload
parser.add_argument(
"--set-gif",
action="store",
help="uploads a given gif file (pixel depending on your display). Format: ./path/to/image.gif",
)
parser.add_argument(
"--process-gif",
action="store",
help="processes the gif instead of sending it raw (useful when the size does not match). Format: <AMOUNT_PIXEL>",
)
async def run(self, args):
self.logging.info("initializing command line")
address = None
if args.address:
self.logging.debug("using --address")
address = args.address
elif "IDOTMATRIX_ADDRESS" in os.environ:
self.logging.debug("using IDOTMATRIX_ADDRESS")
address = os.environ["IDOTMATRIX_ADDRESS"]
if address is None:
self.logging.error("no device address given")
quit()
else:
self.bluetooth = Bluetooth(address)
# arguments which can be run in parallel
if args.sync_time:
await self.sync_time(args.set_time)
if args.rotate180degrees:
await self.rotate180degrees(args.rotate180degrees)
if args.togglescreen:
await self.togglescreen()
if args.set_brightness:
await self.set_brightness(args.set_brightness)
if args.set_password:
await self.set_password(args.set_password)
# arguments which cannot run in parallel
if args.test:
await self.test()
elif args.chronograph:
await self.chronograph(args.chronograph)
elif args.clock:
await self.clock(args)
elif args.countdown:
await self.countdown(args)
elif args.fullscreen_color:
await self.fullscreenColor(args.fullscreen_color)
elif args.pixel_color:
await self.pixelColor(args.pixel_color)
elif args.scoreboard:
await self.scoreboard(args.scoreboard)
elif args.image:
await self.image(args)
elif args.set_gif:
await self.gif(args)
async def test(self):
"""Tests all available options for the device"""
self.logging.info("starting test of device")
## chronograph
await self.bluetooth.send(Chronograph().setChronograph(1))
time.sleep(5)
await self.bluetooth.send(Chronograph().setChronograph(0))
time.sleep(1)
## clock
await self.bluetooth.send(Clock().setTimeIndicator(True))
await self.bluetooth.send(Clock().setClockMode(0, True, True))
time.sleep(5)
## countdown
await self.bluetooth.send(Countdown().setCountdown(1, 0, 5))
await self.bluetooth.send(Countdown().setCountdown(0, 0, 5))
time.sleep(5)
## fullscreen color
await self.bluetooth.send(FullscreenColor().setColor(255, 0, 0))
time.sleep(5)
## scoreboard
await self.bluetooth.send(Scoreboard().setScoreboard(1, 0))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(1, 1))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(1, 2))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(2, 2))
## graffiti
# load graffiti board and color pixel 0,0 red | await self.bluetooth.send(Graffiti().setPixelColor(255, 0, 0, 0, 0)) | 10 | 2023-11-13 14:04:21+00:00 | 12k |
wjun0830/CGDETR | cg_detr/inference.py | [
{
"identifier": "AverageMeter",
"path": "utils/basic_utils.py",
"snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current/max/min value\"\"\"\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n s... | import pprint
import numpy as np
import os
import torch
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import logging
from tqdm import tqdm, trange
from collections import OrderedDict, defaultdict
from utils.basic_utils import AverageMeter
from torch.utils.data import DataLoader
from cg_detr.config import TestOptions
from cg_detr.model import build_model
from cg_detr.span_utils import span_cxw_to_xx
from cg_detr.start_end_dataset import StartEndDataset, start_end_collate, prepare_batch_inputs
from cg_detr.postprocessing_cg_detr import PostProcessorDETR
from standalone_eval.eval import eval_submission
from utils.basic_utils import save_jsonl, save_json
from utils.temporal_nms import temporal_nms
from collections import OrderedDict
from sys import argv | 10,719 |
logger = logging.getLogger(__name__)
logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO)
def post_processing_mr_nms(mr_res, nms_thd, max_before_nms, max_after_nms):
mr_res_after_nms = []
for e in mr_res:
|
logger = logging.getLogger(__name__)
logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO)
def post_processing_mr_nms(mr_res, nms_thd, max_before_nms, max_after_nms):
mr_res_after_nms = []
for e in mr_res: | e["pred_relevant_windows"] = temporal_nms( | 11 | 2023-11-10 12:45:25+00:00 | 12k |
AdmTal/music-graphs | src/generate_music_graph.py | [
{
"identifier": "AnimationFrames",
"path": "src/animation_stuff.py",
"snippet": "class AnimationFrames:\n \"\"\"\n Helper object to organize layered frames in order to produce an animation.\n self._data is a Dict. The Keys are \"layer_ids\", and the values are Lists of \"images\".\n They ar... | import os
import click
import psutil
from graphviz import Graph
from hurry.filesize import size
from concurrent.futures import ThreadPoolExecutor, as_completed
from src.animation_stuff import AnimationFrames
from src.cache_stuff import (
cleanup_cache_dir,
get_cache_dir,
)
from src.graph_stuff import (
animate_bezier_point,
animate_ellipsis_blur,
draw_fading_bezier_curve,
parse_graph,
get_node_positions,
)
from src.midi_stuff import (
get_note_start_times_in_frames,
TRACK_NOTE_DELIMITER,
)
from src.theme_stuff import Theme
from src.video_stuff import (
add_frame_to_video,
finalize_video_with_music,
initialize_video_writer,
) | 7,292 | def create_graphviz_sorted(theme, track_events_frames):
"""
This function implements a hack to force Graphviz node ordering.
Step 1: Create a bare-bones CIRCO graph with nodes added in order
Step 2: Save that graph to a file, and extract its node positions
Step 3: Generate the final NEATO graph, using hard coded node positions
"""
if theme.graphviz_engine.lower() != "circo":
click.echo(
"ERROR: Node sorting only works when graphviz engine is circo", err=True
)
cleanup_cache_dir(get_cache_dir())
exit(1)
song_graph = Graph(
"G",
engine=theme.graphviz_engine,
format="plain",
strict=True,
node_attr=theme.graphviz_node_attrs,
graph_attr=theme.graphviz_graph_attrs,
edge_attr=theme.graphviz_edge_attrs,
)
all_notes = {}
for track, note_tuples in track_events_frames.items():
if theme.skip_track(track):
continue
notes = [
[note_tuple[0] for note_tuple in list_of_note_tuples]
for frame_num, list_of_note_tuples in note_tuples.items()
]
for note in notes:
all_notes[note[0]] = True
# Create Nodes - In order
prev_note = None
all_notes = list(all_notes.keys())
if theme.nodes_sorted:
if isinstance(theme.nodes_sorted, bool):
all_notes = sorted(
all_notes, key=lambda i: int(i.split(TRACK_NOTE_DELIMITER)[1])
)
else:
all_notes = filter_and_order_custom(theme.nodes_sorted, all_notes)
for n in all_notes + [all_notes[0]]: # tack on the first to make a circle
song_graph.node(n, label=midi_note_to_pitch_class(n))
if prev_note:
song_graph.edge(n, prev_note)
prev_note = n
node_positions = get_node_positions(song_graph)
song_graph = Graph(
"G",
engine="neato",
format="xdot",
strict=True,
node_attr=theme.graphviz_node_attrs,
graph_attr=theme.graphviz_graph_attrs,
edge_attr=theme.graphviz_edge_attrs,
)
for track, note_tuples in track_events_frames.items():
if theme.skip_track(track):
continue
notes = [
[note_tuple[0] for note_tuple in list_of_note_tuples]
for frame_num, list_of_note_tuples in note_tuples.items()
]
# Create Nodes
for note in notes:
n = note[0]
song_graph.node(
n,
label=midi_note_to_pitch_class(n),
_attributes={"pos": node_positions[n]},
)
# Create Edges
melody_pairs = overlapping_pairs(notes)
for a_notes, b_notes in melody_pairs:
for a in a_notes:
for b in b_notes:
song_graph.node(b, label=midi_note_to_pitch_class(b))
song_graph.edge(a, b)
return song_graph
def create_graphviz(theme, track_events_frames):
if theme.nodes_sorted:
return create_graphviz_sorted(theme, track_events_frames)
return create_graphviz_default_sort(theme, track_events_frames)
def process_frame(current_frame, base_image, theme, offsets, FRAMES):
frame_result = base_image.copy()
for layer, layer_images in sorted(FRAMES.items()):
frame = layer_images[current_frame]
if frame:
draw_function, args = frame
frame_result = draw_function(
base_image=frame_result, # Use frame_result instead of base_image
theme=theme,
offsets=offsets,
**args,
)
return frame_result
def generate_music_graph(
midi_file_path,
default_theme_file_path,
theme_file_path,
output_path,
soundfont_file,
):
|
def midi_note_to_pitch_class(midi_note):
_, note = midi_note.split(TRACK_NOTE_DELIMITER)
midi_note = int(note)
note_names = ["C", "Db", "D", "Eb", "E", "F", "F#", "G", "Ab", "A", "Bb", "B"]
return note_names[(midi_note - 1) % 12]
def overlapping_pairs(lst):
return list(zip(lst, lst[1:])) + [(lst[-1], lst[0])] if len(lst) > 1 else []
def create_graphviz_default_sort(theme, track_events_frames):
"""Create a Graphviz without a specified order"""
song_graph = Graph(
"G",
engine=theme.graphviz_engine,
format="xdot",
strict=True,
node_attr=theme.graphviz_node_attrs,
graph_attr=theme.graphviz_graph_attrs,
edge_attr=theme.graphviz_edge_attrs,
)
for track, note_tuples in track_events_frames.items():
if theme.skip_track(track):
continue
notes = [
[note_tuple[0] for note_tuple in list_of_note_tuples]
for frame_num, list_of_note_tuples in note_tuples.items()
]
# Create Nodes
for note in notes:
n = note[0]
song_graph.node(n, label=midi_note_to_pitch_class(n))
# Create Edges
melody_pairs = overlapping_pairs(notes)
for a_notes, b_notes in melody_pairs:
for a in a_notes:
for b in b_notes:
song_graph.node(b, label=midi_note_to_pitch_class(b))
song_graph.edge(a, b)
return song_graph
def filter_and_order_custom(reference_list, input_list):
# Extract the numbers from the input strings and convert them to integers
input_numbers = [int(item.split(TRACK_NOTE_DELIMITER)[1]) for item in input_list]
# Create a mapping of number to original string for reconstruction later
number_to_string = dict(zip(input_numbers, input_list))
# Filter and order the input list based on the reference list
ordered_list = [
number_to_string[item] for item in reference_list if item in number_to_string
]
return ordered_list
def create_graphviz_sorted(theme, track_events_frames):
"""
This function implements a hack to force Graphviz node ordering.
Step 1: Create a bare-bones CIRCO graph with nodes added in order
Step 2: Save that graph to a file, and extract its node positions
Step 3: Generate the final NEATO graph, using hard coded node positions
"""
if theme.graphviz_engine.lower() != "circo":
click.echo(
"ERROR: Node sorting only works when graphviz engine is circo", err=True
)
cleanup_cache_dir(get_cache_dir())
exit(1)
song_graph = Graph(
"G",
engine=theme.graphviz_engine,
format="plain",
strict=True,
node_attr=theme.graphviz_node_attrs,
graph_attr=theme.graphviz_graph_attrs,
edge_attr=theme.graphviz_edge_attrs,
)
all_notes = {}
for track, note_tuples in track_events_frames.items():
if theme.skip_track(track):
continue
notes = [
[note_tuple[0] for note_tuple in list_of_note_tuples]
for frame_num, list_of_note_tuples in note_tuples.items()
]
for note in notes:
all_notes[note[0]] = True
# Create Nodes - In order
prev_note = None
all_notes = list(all_notes.keys())
if theme.nodes_sorted:
if isinstance(theme.nodes_sorted, bool):
all_notes = sorted(
all_notes, key=lambda i: int(i.split(TRACK_NOTE_DELIMITER)[1])
)
else:
all_notes = filter_and_order_custom(theme.nodes_sorted, all_notes)
for n in all_notes + [all_notes[0]]: # tack on the first to make a circle
song_graph.node(n, label=midi_note_to_pitch_class(n))
if prev_note:
song_graph.edge(n, prev_note)
prev_note = n
node_positions = get_node_positions(song_graph)
song_graph = Graph(
"G",
engine="neato",
format="xdot",
strict=True,
node_attr=theme.graphviz_node_attrs,
graph_attr=theme.graphviz_graph_attrs,
edge_attr=theme.graphviz_edge_attrs,
)
for track, note_tuples in track_events_frames.items():
if theme.skip_track(track):
continue
notes = [
[note_tuple[0] for note_tuple in list_of_note_tuples]
for frame_num, list_of_note_tuples in note_tuples.items()
]
# Create Nodes
for note in notes:
n = note[0]
song_graph.node(
n,
label=midi_note_to_pitch_class(n),
_attributes={"pos": node_positions[n]},
)
# Create Edges
melody_pairs = overlapping_pairs(notes)
for a_notes, b_notes in melody_pairs:
for a in a_notes:
for b in b_notes:
song_graph.node(b, label=midi_note_to_pitch_class(b))
song_graph.edge(a, b)
return song_graph
def create_graphviz(theme, track_events_frames):
if theme.nodes_sorted:
return create_graphviz_sorted(theme, track_events_frames)
return create_graphviz_default_sort(theme, track_events_frames)
def process_frame(current_frame, base_image, theme, offsets, FRAMES):
frame_result = base_image.copy()
for layer, layer_images in sorted(FRAMES.items()):
frame = layer_images[current_frame]
if frame:
draw_function, args = frame
frame_result = draw_function(
base_image=frame_result, # Use frame_result instead of base_image
theme=theme,
offsets=offsets,
**args,
)
return frame_result
def generate_music_graph(
midi_file_path,
default_theme_file_path,
theme_file_path,
output_path,
soundfont_file,
): | theme = Theme(theme_file_path, default_theme_file_path) | 10 | 2023-11-17 17:56:04+00:00 | 12k |
dazhangyu123/ACMIL | Step3_WSI_classification.py | [
{
"identifier": "save_model",
"path": "utils/utils.py",
"snippet": "def save_model(conf, epoch, model, optimizer, is_best=False, is_last=False):\n to_save = {\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'epoch': epoch,\n 'config': conf,\n }\n... | import sys
import os
import yaml
import argparse
import torch
from pprint import pprint
from torch import nn
from torch.utils.data import DataLoader
from utils.utils import save_model, Struct, set_seed, Wandb_Writer
from datasets.datasets import build_HDF5_feat_dataset
from architecture.transformer import TransformWrapper, AttnMIL
from architecture.transMIL import TransMIL
from engine import train_one_epoch, evaluate
from architecture.dsmil import MILNet, FCLayer, BClassifier
from architecture.bmil import probabilistic_MIL_Bayes_spvis
from architecture.clam import CLAM_SB, CLAM_MB
from modules import mean_max
| 10,080 |
# !/usr/bin/env python
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def get_arguments():
parser = argparse.ArgumentParser('Patch classification training', add_help=False)
parser.add_argument('--config', dest='config', default='config/camelyon_medical_ssl_config.yml',
help='settings of Tip-Adapter in yaml format')
parser.add_argument(
"--eval-only", action="store_true", help="evaluation only"
)
parser.add_argument(
"--seed", type=int, default=1, help="set the random seed to ensure reproducibility"
)
parser.add_argument('--wandb_mode', default='disabled', choices=['offline', 'online', 'disabled'],
help='the model of wandb')
parser.add_argument(
"--n_shot", type=int, default=-1, help="number of wsi images"
)
parser.add_argument(
"--w_loss", type=float, default=1.0, help="number of query token"
)
parser.add_argument(
"--arch", type=str, default='transmil', choices=['transmil', 'clam_sb', 'clam_mb', 'attnmil',
'selfattn', 'dsmil', 'bmil_spvis', 'meanmil', 'maxmil'], help="number of query token"
)
parser.add_argument(
"--n_token", type=int, default=1, help="number of query token"
)
parser.add_argument(
"--n_masked_patch", type=int, default=0, help="whether use adversarial mask"
)
args = parser.parse_args()
return args
def main():
# Load config file
args = get_arguments()
# get config
with open(args.config, "r") as ymlfile:
c = yaml.load(ymlfile, Loader=yaml.FullLoader)
c.update(vars(args))
conf = Struct(**c)
group_name = 'ds_%s_%s_arch_%s_%sepochs' % (conf.dataset, conf.pretrain, conf.arch, conf.train_epoch)
log_writer = Wandb_Writer(group_name=group_name, mode=args.wandb_mode, name=args.seed)
conf.ckpt_dir = log_writer.wandb.dir[:-5] + 'saved_models'
if conf.wandb_mode == 'disabled':
conf.ckpt_dir = os.path.join(conf.ckpt_dir, group_name, str(args.seed))
os.makedirs(conf.ckpt_dir, exist_ok=True)
print("Used config:");
pprint(vars(conf));
# Prepare dataset
set_seed(args.seed)
# define datasets and dataloaders
train_data, val_data, test_data = build_HDF5_feat_dataset(os.path.join(conf.data_dir, 'patch_feats_pretrain_%s.h5'%conf.pretrain), conf)
train_loader = DataLoader(train_data, batch_size=conf.B, shuffle=True,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=True)
val_loader = DataLoader(val_data, batch_size=conf.B, shuffle=False,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=False)
test_loader = DataLoader(test_data, batch_size=conf.B, shuffle=False,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=False)
# define network
if conf.arch == 'transmil':
net = TransMIL(conf)
elif conf.arch == 'selfattn':
net = TransformWrapper(conf)
elif conf.arch == 'clam_sb':
net = CLAM_SB(conf).to(device)
elif conf.arch == 'clam_mb':
net = CLAM_MB(conf).to(device)
elif conf.arch == 'dsmil':
|
# !/usr/bin/env python
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def get_arguments():
parser = argparse.ArgumentParser('Patch classification training', add_help=False)
parser.add_argument('--config', dest='config', default='config/camelyon_medical_ssl_config.yml',
help='settings of Tip-Adapter in yaml format')
parser.add_argument(
"--eval-only", action="store_true", help="evaluation only"
)
parser.add_argument(
"--seed", type=int, default=1, help="set the random seed to ensure reproducibility"
)
parser.add_argument('--wandb_mode', default='disabled', choices=['offline', 'online', 'disabled'],
help='the model of wandb')
parser.add_argument(
"--n_shot", type=int, default=-1, help="number of wsi images"
)
parser.add_argument(
"--w_loss", type=float, default=1.0, help="number of query token"
)
parser.add_argument(
"--arch", type=str, default='transmil', choices=['transmil', 'clam_sb', 'clam_mb', 'attnmil',
'selfattn', 'dsmil', 'bmil_spvis', 'meanmil', 'maxmil'], help="number of query token"
)
parser.add_argument(
"--n_token", type=int, default=1, help="number of query token"
)
parser.add_argument(
"--n_masked_patch", type=int, default=0, help="whether use adversarial mask"
)
args = parser.parse_args()
return args
def main():
# Load config file
args = get_arguments()
# get config
with open(args.config, "r") as ymlfile:
c = yaml.load(ymlfile, Loader=yaml.FullLoader)
c.update(vars(args))
conf = Struct(**c)
group_name = 'ds_%s_%s_arch_%s_%sepochs' % (conf.dataset, conf.pretrain, conf.arch, conf.train_epoch)
log_writer = Wandb_Writer(group_name=group_name, mode=args.wandb_mode, name=args.seed)
conf.ckpt_dir = log_writer.wandb.dir[:-5] + 'saved_models'
if conf.wandb_mode == 'disabled':
conf.ckpt_dir = os.path.join(conf.ckpt_dir, group_name, str(args.seed))
os.makedirs(conf.ckpt_dir, exist_ok=True)
print("Used config:");
pprint(vars(conf));
# Prepare dataset
set_seed(args.seed)
# define datasets and dataloaders
train_data, val_data, test_data = build_HDF5_feat_dataset(os.path.join(conf.data_dir, 'patch_feats_pretrain_%s.h5'%conf.pretrain), conf)
train_loader = DataLoader(train_data, batch_size=conf.B, shuffle=True,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=True)
val_loader = DataLoader(val_data, batch_size=conf.B, shuffle=False,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=False)
test_loader = DataLoader(test_data, batch_size=conf.B, shuffle=False,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=False)
# define network
if conf.arch == 'transmil':
net = TransMIL(conf)
elif conf.arch == 'selfattn':
net = TransformWrapper(conf)
elif conf.arch == 'clam_sb':
net = CLAM_SB(conf).to(device)
elif conf.arch == 'clam_mb':
net = CLAM_MB(conf).to(device)
elif conf.arch == 'dsmil':
| i_classifier = FCLayer(conf.D_feat, conf.n_class)
| 11 | 2023-11-12 14:07:34+00:00 | 12k |
zhang-tao-whu/DVIS_Plus | dvis_Plus/ctvis.py | [
{
"identifier": "VideoSetCriterion",
"path": "mask2former_video/modeling/criterion.py",
"snippet": "class VideoSetCriterion(nn.Module):\n \"\"\"This class computes the loss for DETR.\n The process happens in two steps:\n 1) we compute hungarian assignment between ground truth boxes and the ... | import logging
import einops
import random
import numpy as np
import torch
import torch.nn.functional as F
import torch.distributed as dist
from typing import Tuple
from detectron2.data import MetadataCatalog
from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head
from detectron2.modeling.backbone import Backbone
from detectron2.structures import Boxes, ImageList, Instances, BitMasks
from mask2former_video.modeling.criterion import VideoSetCriterion
from mask2former_video.modeling.matcher import VideoHungarianMatcher
from mask2former.modeling.matcher import HungarianMatcher
from mask2former_video.utils.memory import retry_if_cuda_oom
from scipy.optimize import linear_sum_assignment
from torch import nn
from detectron2.config import configurable
from detectron2.structures import BitMasks
from detectron2.utils.registry import Registry | 7,514 | losses = ["labels", "masks"]
criterion = VideoSetCriterion(
sem_seg_head.num_classes,
matcher=matcher,
weight_dict=weight_dict,
eos_coef=no_object_weight,
losses=losses,
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,
oversample_ratio=cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO,
importance_sample_ratio=cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO,
)
return {
"backbone": backbone,
"sem_seg_head": sem_seg_head,
"criterion": criterion,
"num_queries": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES,
"object_mask_threshold": cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD,
"overlap_threshold": cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD,
"metadata": MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
"size_divisibility": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY,
"sem_seg_postprocess_before_inference": True,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
# video
"num_frames": cfg.INPUT.SAMPLING_FRAME_NUM,
"window_inference": cfg.MODEL.MASK_FORMER.TEST.WINDOW_INFERENCE,
# ctvis
"image_matcher": image_matcher,
"cl_plugin": cl_plugin,
}
@property
def device(self):
return self.pixel_mean.device
def prepare_for_cl_plugin(self, outputs, targets):
del outputs['aux_outputs'], outputs['pred_embds'], outputs['pred_embds_without_norm'], outputs['mask_features']
for item in targets:
item["masks"] = item["masks"].squeeze(1)
item["ids"] = item["ids"].squeeze(1)
outputs['pred_masks'] = outputs['pred_masks'].squeeze(2)
outputs['pred_reid_embed'] = einops.rearrange(outputs['pred_reid_embed'], 'b c t q -> (b t) q c')
return outputs, targets
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "instances": per-region ground truth
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model (may be different
from input resolution), used in inference.
Returns:
list[dict]:
each dict has the results for one image. The dict contains the following keys:
* "sem_seg":
A Tensor that represents the
per-pixel segmentation prediced by the head.
The prediction has shape KxHxW that represents the logits of
each class for each pixel.
* "panoptic_seg":
A tuple that represent panoptic output
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment.
segments_info (list[dict]): Describe each segment in `panoptic_seg`.
Each dict contains keys "id", "category_id", "isthing".
"""
images = []
for video in batched_inputs:
for frame in video["image"]:
images.append(frame.to(self.device))
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.size_divisibility)
if not self.training and self.window_inference:
outputs = self.run_window_inference(images.tensor, window_size=3)
else:
features = self.backbone(images.tensor)
outputs = self.sem_seg_head(features)
if self.training:
# mask classification target
targets = self.prepare_targets(batched_inputs, images)
outputs, targets = self.frame_decoder_loss_reshape(outputs, targets)
# bipartite matching-based loss
losses = self.criterion(outputs, targets)
for k in list(losses.keys()):
if k in self.criterion.weight_dict:
losses[k] *= self.criterion.weight_dict[k]
else:
# remove this loss if not specified in `weight_dict`
losses.pop(k)
# for cl loss
det_outputs, gt_instances = self.prepare_for_cl_plugin(outputs, targets)
losses.update(self.cl_plugin.train_loss(
det_outputs, gt_instances, self.image_matcher))
return losses
else:
outputs = self.post_processing(outputs)
mask_cls_results = outputs["pred_logits"]
mask_pred_results = outputs["pred_masks"]
mask_cls_result = mask_cls_results[0]
mask_pred_result = mask_pred_results[0]
first_resize_size = (images.tensor.shape[-2], images.tensor.shape[-1])
input_per_image = batched_inputs[0]
image_size = images.image_sizes[0] # image size without padding after data augmentation
height = input_per_image.get("height", image_size[0]) # raw image size before data augmentation
width = input_per_image.get("width", image_size[1])
|
logger = logging.getLogger(__name__)
@META_ARCH_REGISTRY.register()
class CTMinVIS(nn.Module):
"""
Copied from "https://github.com/NVlabs/MinVIS".
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
sem_seg_head: nn.Module,
criterion: nn.Module,
num_queries: int,
object_mask_threshold: float,
overlap_threshold: float,
metadata,
size_divisibility: int,
sem_seg_postprocess_before_inference: bool,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
# video
num_frames,
window_inference,
# ctvis
image_matcher,
cl_plugin,
):
"""
Args:
backbone: a backbone module, must follow detectron2's backbone interface
sem_seg_head: a module that predicts semantic segmentation from backbone features
criterion: a module that defines the loss
num_queries: int, number of queries
object_mask_threshold: float, threshold to filter query based on classification score
for panoptic segmentation inference
overlap_threshold: overlap threshold used in general inference for panoptic segmentation
metadata: dataset meta, get `thing` and `stuff` category names for panoptic
segmentation inference
size_divisibility: Some backbones require the input height and width to be divisible by a
specific integer. We can use this to override such requirement.
sem_seg_postprocess_before_inference: whether to resize the prediction back
to original input size before semantic segmentation inference or after.
For high-resolution dataset like Mapillary, resizing predictions before
inference will cause OOM error.
pixel_mean, pixel_std: list or tuple with #channels element, representing
the per-channel mean and std to be used to normalize the input image
semantic_on: bool, whether to output semantic segmentation prediction
instance_on: bool, whether to output instance segmentation prediction
panoptic_on: bool, whether to output panoptic segmentation prediction
test_topk_per_image: int, instance segmentation parameter, keep topk instances per image
"""
super().__init__()
self.backbone = backbone
self.sem_seg_head = sem_seg_head
self.criterion = criterion
self.num_queries = num_queries
self.overlap_threshold = overlap_threshold
self.object_mask_threshold = object_mask_threshold
self.metadata = metadata
if size_divisibility < 0:
# use backbone size_divisibility if not set
size_divisibility = self.backbone.size_divisibility
self.size_divisibility = size_divisibility
self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
self.num_frames = num_frames
self.window_inference = window_inference
self.image_matcher = image_matcher
self.cl_plugin = cl_plugin
@classmethod
def from_config(cls, cfg):
backbone = build_backbone(cfg)
sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape())
# Loss parameters:
deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION
no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT
# loss weights
class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT
dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT
mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT
# building criterion
matcher = VideoHungarianMatcher(
cost_class=class_weight,
cost_mask=mask_weight,
cost_dice=dice_weight,
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,
)
# for cl loss
image_matcher = HungarianMatcher(
cost_class=class_weight,
cost_mask=mask_weight,
cost_dice=dice_weight,
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,
)
cl_plugin = build_cl_plugin(cfg) # train
weight_dict = {"loss_ce": class_weight, "loss_mask": mask_weight, "loss_dice": dice_weight}
if deep_supervision:
dec_layers = cfg.MODEL.MASK_FORMER.DEC_LAYERS
aux_weight_dict = {}
for i in range(dec_layers - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ["labels", "masks"]
criterion = VideoSetCriterion(
sem_seg_head.num_classes,
matcher=matcher,
weight_dict=weight_dict,
eos_coef=no_object_weight,
losses=losses,
num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS,
oversample_ratio=cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO,
importance_sample_ratio=cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO,
)
return {
"backbone": backbone,
"sem_seg_head": sem_seg_head,
"criterion": criterion,
"num_queries": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES,
"object_mask_threshold": cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD,
"overlap_threshold": cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD,
"metadata": MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
"size_divisibility": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY,
"sem_seg_postprocess_before_inference": True,
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
"pixel_std": cfg.MODEL.PIXEL_STD,
# video
"num_frames": cfg.INPUT.SAMPLING_FRAME_NUM,
"window_inference": cfg.MODEL.MASK_FORMER.TEST.WINDOW_INFERENCE,
# ctvis
"image_matcher": image_matcher,
"cl_plugin": cl_plugin,
}
@property
def device(self):
return self.pixel_mean.device
def prepare_for_cl_plugin(self, outputs, targets):
del outputs['aux_outputs'], outputs['pred_embds'], outputs['pred_embds_without_norm'], outputs['mask_features']
for item in targets:
item["masks"] = item["masks"].squeeze(1)
item["ids"] = item["ids"].squeeze(1)
outputs['pred_masks'] = outputs['pred_masks'].squeeze(2)
outputs['pred_reid_embed'] = einops.rearrange(outputs['pred_reid_embed'], 'b c t q -> (b t) q c')
return outputs, targets
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in (C, H, W) format.
* "instances": per-region ground truth
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model (may be different
from input resolution), used in inference.
Returns:
list[dict]:
each dict has the results for one image. The dict contains the following keys:
* "sem_seg":
A Tensor that represents the
per-pixel segmentation prediced by the head.
The prediction has shape KxHxW that represents the logits of
each class for each pixel.
* "panoptic_seg":
A tuple that represent panoptic output
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment.
segments_info (list[dict]): Describe each segment in `panoptic_seg`.
Each dict contains keys "id", "category_id", "isthing".
"""
images = []
for video in batched_inputs:
for frame in video["image"]:
images.append(frame.to(self.device))
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.size_divisibility)
if not self.training and self.window_inference:
outputs = self.run_window_inference(images.tensor, window_size=3)
else:
features = self.backbone(images.tensor)
outputs = self.sem_seg_head(features)
if self.training:
# mask classification target
targets = self.prepare_targets(batched_inputs, images)
outputs, targets = self.frame_decoder_loss_reshape(outputs, targets)
# bipartite matching-based loss
losses = self.criterion(outputs, targets)
for k in list(losses.keys()):
if k in self.criterion.weight_dict:
losses[k] *= self.criterion.weight_dict[k]
else:
# remove this loss if not specified in `weight_dict`
losses.pop(k)
# for cl loss
det_outputs, gt_instances = self.prepare_for_cl_plugin(outputs, targets)
losses.update(self.cl_plugin.train_loss(
det_outputs, gt_instances, self.image_matcher))
return losses
else:
outputs = self.post_processing(outputs)
mask_cls_results = outputs["pred_logits"]
mask_pred_results = outputs["pred_masks"]
mask_cls_result = mask_cls_results[0]
mask_pred_result = mask_pred_results[0]
first_resize_size = (images.tensor.shape[-2], images.tensor.shape[-1])
input_per_image = batched_inputs[0]
image_size = images.image_sizes[0] # image size without padding after data augmentation
height = input_per_image.get("height", image_size[0]) # raw image size before data augmentation
width = input_per_image.get("width", image_size[1])
| return retry_if_cuda_oom(self.inference_video)( | 3 | 2023-11-14 10:55:11+00:00 | 12k |
ej0cl6/TextEE | TextEE/models/DyGIEpp/data.py | [
{
"identifier": "Graph",
"path": "TextEE/models/DyGIEpp/graph.py",
"snippet": "class Graph(object):\n def __init__(self, entities, triggers, relations, roles, vocabs, gold=True):\n \"\"\"\n :param entities (list): A list of entities represented as a tuple of\n (start_offset, end_... | import copy, json, logging
import numpy as np
import torch
import ipdb
from torch.utils.data import Dataset
from collections import Counter, namedtuple, defaultdict
from itertools import combinations
from .graph import Graph
from .util import enumerate_spans, graph_add_fake_entity, graph_add_fake_trigger | 8,911 | @property
def relation_type_set(self):
type_set = set()
for inst in self.data:
for relation in inst.get('relation_mentions', []):
type_set.add(relation['relation_type'])
return type_set
@property
def role_type_set(self):
type_set = set()
for inst in self.data:
for event in inst['event_mentions']:
for arg in event['arguments']:
type_set.add(arg['role'])
return type_set
def load_data(self):
overlength_num = 0
for inst in self.raw_data:
## added
pieces = [self.tokenizer.tokenize(t, is_split_into_words=True) for t in inst['tokens']]
token_lens = [len(x) for x in pieces]
if 0 in token_lens:
raise ValueError
pieces = [p for ps in pieces for p in ps]
inst['pieces'] = pieces
inst['token_lens'] = token_lens
inst['entity_mentions'] = inst['extra_info']['entity_mentions']
inst['relation_mentions'] = inst['extra_info']['relation_mentions']
inst['event_mentions'] = inst['extra_info']['event_mentions']
##
if not self.test:
if self.max_length != -1 and len(pieces) > self.max_length - 2:
overlength_num += 1
continue
else:
if len(pieces) > self.max_length - 2:
# add token_lens until over-length
piece_counter = 0
for max_token_include, token_len in enumerate(inst['token_lens']):
if piece_counter + token_len >= self.max_length - 2:
logger.info('overlength during testing...')
break
else:
piece_counter += token_len
inst['pieces'] = inst['pieces'][:piece_counter]
inst['token_lens'] = inst['token_lens'][:max_token_include]
inst['tokens'] = inst['tokens'][:max_token_include]
self.data.append(inst)
if overlength_num:
logger.info('Discarded {} overlength instances'.format(overlength_num))
logger.info('Loaded {} DyGIEpp instances from {} E2E instances'.format(len(self), len(self.raw_data)))
def numberize(self, vocabs):
"""Numberize word pieces, labels, etcs.
:param tokenizer: Bert tokenizer.
:param vocabs (dict): a dict of vocabularies.
"""
entity_type_stoi = vocabs.get('entity_type', None)
event_type_stoi = vocabs.get('event_type', None)
relation_type_stoi = vocabs.get('relation_type', None)
role_type_stoi = vocabs.get('role_type', None)
data = []
for inst in self.data:
doc_id = inst['doc_id']
tokens = inst['tokens']
pieces = inst['pieces']
wnd_id = inst['wnd_id']
token_num = len(tokens)
token_lens = inst['token_lens']
entities = inst['entity_mentions']
entities.sort(key=lambda x: x['start'])
events = inst['event_mentions']
# events = clean_events(events)
events.sort(key=lambda x: x['trigger']['start'])
# Pad word pieces with special tokens
piece_idxs = self.tokenizer.encode(pieces,
add_special_tokens=True,
max_length=self.max_length,
truncation=True)
if sum(token_lens) < self.max_length -2:
assert sum(token_lens) +2 == len(piece_idxs)
pad_num = self.max_length - len(piece_idxs)
attn_mask = [1] * len(piece_idxs) + [0] * pad_num
pad_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.pad_token)
piece_idxs = piece_idxs + [pad_id] * pad_num
entity_list = [(e['start'], e['end'],
entity_type_stoi[e.get('entity_type', "UNK")])
for e in entities]
trigger_list = [(e['trigger']['start'], e['trigger']['end'],
event_type_stoi[e['event_type']])
for e in events]
# Argument role
role_list = get_role_list(entities, events, role_type_stoi)
# Relations
relation_list = get_relation_list(entities, inst.get('relation_mentions', []),relation_type_stoi)
# Graph
graph = Graph(
entities=entity_list,
triggers=trigger_list,
relations=relation_list,
roles=role_list,
vocabs=vocabs,
gold=True
)
# Add other span from span enumeration
|
logger = logging.getLogger(__name__)
instance_fields = [
'doc_id', 'wnd_id', 'tokens', 'pieces', 'piece_idxs',
'token_lens', 'attention_mask', 'graph', 'trigger_list'
]
batch_fields = [
'doc_ids', 'wnd_ids', 'tokens', 'pieces', 'piece_idxs',
'token_lens', 'attention_masks', 'graphs', 'token_nums',
]
Instance = namedtuple('Instance', field_names=instance_fields,
defaults=[None] * len(instance_fields))
Batch = namedtuple('Batch', field_names=batch_fields,
defaults=[None] * len(batch_fields))
def preprocess_entity(entities):
"""
We prevent the situation that there are more than 1 types for exactly same span
"""
span_map = []
entities_ = []
delete_num = 0
for ent in entities:
if (ent['start'], ent['end']) not in span_map:
entities_.append(ent)
span_map.append((ent['start'], ent['end']))
else:
delete_num += 1
if delete_num:
logger.info('remove {} entities due to span duplication'.format(delete_num))
return entities_
def get_relation_list(entities, relations, vocab,
directional=True, symmetric=None):
entity_idxs = {entity['id']: (i,entity) for i, entity in enumerate(entities)}
visited = [[0] * len(entities) for _ in range(len(entities))]
relation_list = []
for relation in relations:
arg_1 = arg_2 = None
for arg in relation['arguments']:
if arg['role'] == 'Arg-1':
arg_1 = entity_idxs[arg['entity_id']]
elif arg['role'] == 'Arg-2':
arg_2 = entity_idxs[arg['entity_id']]
if arg_1 is None or arg_2 is None:
continue
relation_type = relation['relation_type']
if (not directional and arg_1[0] > arg_2[0]) or \
(directional and symmetric and (relation_type in symmetric) and (arg_1[0] > arg_2[0])):
arg_1, arg_2 = arg_2, arg_1
if visited[arg_1[0]][arg_2[0]] == 0:
# TODO (I-Hung): This will automatically remove multi relation
# scenario, but we first stick to this setup
temp = ((arg_1[1]['start'], arg_1[1]['end'], arg_1[1].get('entity_type', 'UNK')),
(arg_2[1]['start'], arg_2[1]['end'], arg_2[1].get('entity_type', 'UNK')),
vocab[relation_type])
relation_list.append(temp)
if not directional:
temp = ((arg_2[1]['start'], arg_2[1]['end'], arg_2.get('entity_type', 'UNK')),
(arg_1[1]['start'], arg_1[1]['end'], arg_1.get('entity_type', 'UNK')),
vocab[relation_type])
relation_list.append(temp)
visited[arg_2[0]][arg_1[0]] = 1
visited[arg_1[0]][arg_2[0]] = 1
relation_list.sort(key=lambda x: (x[0][0], x[1][0]))
return relation_list
def get_role_list(entities, events, vocab):
entity_idxs = {entity['id']: (i,entity) for i, entity in enumerate(entities)}
visited = [[0] * len(entities) for _ in range(len(events))]
role_list = []
cnt = 0
for i, event in enumerate(events):
for arg in event['arguments']:
entity_idx = entity_idxs[arg['entity_id']]
if visited[i][entity_idx[0]] == 0 and arg['role'] in vocab:
# TODO (I-Hung): This will automatically remove multi role
# scenario, but we first stick to this setup
temp = ((event['trigger']['start'], event['trigger']['end'], event['event_type']),
(entity_idx[1]['start'], entity_idx[1]['end'], entity_idx[1].get('entity_type', 'UNK')),
vocab[arg['role']])
role_list.append(temp)
visited[i][entity_idx[0]] = 1
else:
cnt += 1
role_list.sort(key=lambda x: (x[0][0], x[1][0]))
if cnt:
logger.info('{} times of role are removed in gold because of span duplication'.format(cnt))
return role_list
def clean_events(events):
cleaned_map = {}
for event in events:
key = (event['trigger']['start'], event['trigger']['end'], event['event_type'], event['trigger']['text'])
if key in cleaned_map:
# do argument merging
cleaned_map[key]['arguments'].extend(event['arguments'])
else:
cleaned_map[key] = event
return list(cleaned_map.values())
class IEDataset(Dataset):
def __init__(self, raw_data, tokenizer, config, max_length=128, test=False):
self.raw_data = raw_data
self.tokenizer = tokenizer
self.data = []
self.max_length = max_length
self.test=test
self.max_entity_span = config.max_entity_span
self.min_entity_span = config.min_entity_span
self.max_trigger_span = config.max_trigger_span
self.min_trigger_span = config.min_trigger_span
self.load_data()
def __len__(self):
return len(self.data)
def __getitem__(self, item):
return self.data[item]
@property
def entity_type_set(self):
type_set = set()
for inst in self.data:
for entity in inst['entity_mentions']:
type_set.add(entity.get('entity_type', "UNK"))
return type_set
@property
def event_type_set(self):
type_set = set()
for inst in self.data:
for event in inst['event_mentions']:
type_set.add(event['event_type'])
return type_set
@property
def relation_type_set(self):
type_set = set()
for inst in self.data:
for relation in inst.get('relation_mentions', []):
type_set.add(relation['relation_type'])
return type_set
@property
def role_type_set(self):
type_set = set()
for inst in self.data:
for event in inst['event_mentions']:
for arg in event['arguments']:
type_set.add(arg['role'])
return type_set
def load_data(self):
overlength_num = 0
for inst in self.raw_data:
## added
pieces = [self.tokenizer.tokenize(t, is_split_into_words=True) for t in inst['tokens']]
token_lens = [len(x) for x in pieces]
if 0 in token_lens:
raise ValueError
pieces = [p for ps in pieces for p in ps]
inst['pieces'] = pieces
inst['token_lens'] = token_lens
inst['entity_mentions'] = inst['extra_info']['entity_mentions']
inst['relation_mentions'] = inst['extra_info']['relation_mentions']
inst['event_mentions'] = inst['extra_info']['event_mentions']
##
if not self.test:
if self.max_length != -1 and len(pieces) > self.max_length - 2:
overlength_num += 1
continue
else:
if len(pieces) > self.max_length - 2:
# add token_lens until over-length
piece_counter = 0
for max_token_include, token_len in enumerate(inst['token_lens']):
if piece_counter + token_len >= self.max_length - 2:
logger.info('overlength during testing...')
break
else:
piece_counter += token_len
inst['pieces'] = inst['pieces'][:piece_counter]
inst['token_lens'] = inst['token_lens'][:max_token_include]
inst['tokens'] = inst['tokens'][:max_token_include]
self.data.append(inst)
if overlength_num:
logger.info('Discarded {} overlength instances'.format(overlength_num))
logger.info('Loaded {} DyGIEpp instances from {} E2E instances'.format(len(self), len(self.raw_data)))
def numberize(self, vocabs):
"""Numberize word pieces, labels, etcs.
:param tokenizer: Bert tokenizer.
:param vocabs (dict): a dict of vocabularies.
"""
entity_type_stoi = vocabs.get('entity_type', None)
event_type_stoi = vocabs.get('event_type', None)
relation_type_stoi = vocabs.get('relation_type', None)
role_type_stoi = vocabs.get('role_type', None)
data = []
for inst in self.data:
doc_id = inst['doc_id']
tokens = inst['tokens']
pieces = inst['pieces']
wnd_id = inst['wnd_id']
token_num = len(tokens)
token_lens = inst['token_lens']
entities = inst['entity_mentions']
entities.sort(key=lambda x: x['start'])
events = inst['event_mentions']
# events = clean_events(events)
events.sort(key=lambda x: x['trigger']['start'])
# Pad word pieces with special tokens
piece_idxs = self.tokenizer.encode(pieces,
add_special_tokens=True,
max_length=self.max_length,
truncation=True)
if sum(token_lens) < self.max_length -2:
assert sum(token_lens) +2 == len(piece_idxs)
pad_num = self.max_length - len(piece_idxs)
attn_mask = [1] * len(piece_idxs) + [0] * pad_num
pad_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.pad_token)
piece_idxs = piece_idxs + [pad_id] * pad_num
entity_list = [(e['start'], e['end'],
entity_type_stoi[e.get('entity_type', "UNK")])
for e in entities]
trigger_list = [(e['trigger']['start'], e['trigger']['end'],
event_type_stoi[e['event_type']])
for e in events]
# Argument role
role_list = get_role_list(entities, events, role_type_stoi)
# Relations
relation_list = get_relation_list(entities, inst.get('relation_mentions', []),relation_type_stoi)
# Graph
graph = Graph(
entities=entity_list,
triggers=trigger_list,
relations=relation_list,
roles=role_list,
vocabs=vocabs,
gold=True
)
# Add other span from span enumeration | entity_spans = enumerate_spans(tokens, offset=0, | 1 | 2023-11-15 21:32:56+00:00 | 12k |
maagic6/SDIMV | SDIMV.py | [
{
"identifier": "imageProcess",
"path": "image.py",
"snippet": "class imageProcess:\n def __init__(self, fn):\n ft = filetype.guess(fn)\n self.data = {\"prompt\": \"\", \n \"negative_prompt\": \"\", \n \"steps\": \"\", \n \"sample... | import sys, subprocess, qdarkstyle
from PyQt6.QtWidgets import (
QApplication,
QFrame,
QGraphicsPixmapItem,
QGraphicsScene,
QGraphicsView,
QGridLayout,
QLabel,
QLineEdit,
QMenu,
QToolBar,
QVBoxLayout,
QHBoxLayout,
QWidget,
QPushButton,
QScrollArea,
QDockWidget,
QMessageBox,
)
from PyQt6.QtGui import QIcon, QAction, QFont, QPainter, QMovie, QPixmap, QDesktopServices
from PyQt6.QtCore import Qt, QRectF, QEvent, QUrl, QSettings, QSystemSemaphore, QSharedMemory
from PyQt6.QtMultimedia import QMediaPlayer
from PyQt6.QtMultimediaWidgets import QGraphicsVideoItem
from pathlib import Path
from qframelesswindow import FramelessMainWindow
from image import imageProcess
from file_handler import FileHandler
from custom_widgets import CustomDockWidget, CustomLineEdit, CustomTextEdit, CustomListWidget, CustomTitleBar, ZoomableGraphicsView
from icon import resource_path
from about_dialog import AboutDialog | 7,447 | self.imageScene.removeItem(self.video_item)
self.media_player.deleteLater()
self.video_item.deleteLater()
self.isMediaPlayerDeleted = True
#del self.media_player
#del self.video_item
except Exception as e:
print(f"Exception when disconnecting media player: {e}")
self.imageScene.clear()
def dragEnterEvent(self, event):
mime_data = event.mimeData()
if mime_data.hasUrls():
for url in mime_data.urls():
if url.isLocalFile():
file_path = url.toLocalFile()
if Path(file_path).is_dir() or Path(file_path).suffix.lower() in ['.png', '.gif', '.webp', '.mp4', '.jpg']:
# accept local files
event.acceptProposedAction()
return
elif url.scheme() in ('http', 'https'):
# accept image links
event.acceptProposedAction()
return
def dropEvent(self, event):
mime_data = event.mimeData()
if mime_data.hasUrls():
new_files = []
for url in mime_data.urls():
if url.isLocalFile():
file_path = url.toLocalFile()
if Path(file_path).is_dir():
new_files.extend(self.fileHandler.getFilesFromFolder(file_path))
elif 'Temp' in Path(file_path).parts:
copied_path = self.fileHandler.copyTempImage(file_path)
new_files.append(copied_path)
else:
new_files.append(file_path)
elif url.scheme() == 'http' or url.scheme() == 'https':
downloaded_path = self.fileHandler.downloadImage(url)
if downloaded_path and not self.fileHandler.isFileInList(downloaded_path):
new_files.append(downloaded_path)
new_files = [file_path for file_path in new_files if not self.fileHandler.isFileInList(file_path)]
self.fileHandler.updateFileList(new_files)
event.acceptProposedAction()
def handleItemSelectionChanged(self):
selectedItem = self.fileList.currentItem()
if selectedItem:
self.viewMetadata(selectedItem)
#if 0 <= selectedIndex < len(self.selectedFiles):
#self.viewMetadata(selectedItem)
def updateImageView(self):
self.imageView.fitInView(self.imageScene.sceneRect(), Qt.AspectRatioMode.KeepAspectRatio)
self.imageView.resetZoom()
def updateVideoView(self):
self.imageView.resetTransform()
self.imageScene.setSceneRect(self.video_item.boundingRect())
self.imageView.setScene(self.imageScene)
self.imageView.fitInView(self.imageScene.sceneRect(), Qt.AspectRatioMode.KeepAspectRatio)
self.imageView.resetZoom()
def saveSettings(self):
file_paths = self.fileHandler.getFileList()
self.settings.setValue("selectedFiles", file_paths)
self.settings.setValue("main_window_state", self.saveState())
self.settings.setValue("main_window_geometry", self.saveGeometry())
def loadSettings(self):
file_paths = self.settings.value("selectedFiles", [])
self.fileHandler.updateFileList(file_paths)
if self.settings.value("main_window_state"):
self.restoreState(self.settings.value("main_window_state"))
def closeEvent(self, event):
self.saveSettings()
event.accept()
def eventFilter(self, obj, event):
if obj == self:
if event.type() == QEvent.Type.Resize:
self.updateImageView()
if obj in (self.fileListWidget, self.imageViewWidget):
if event.type() == QEvent.Type.Move:
self.updateImageView()
return super(MainWindow, self).eventFilter(obj, event)
def showContextMenu(self, event):
menu = QMenu(self)
view_action = QAction("View", self)
view_action.triggered.connect(self.openImage)
openfolder_action = QAction("Open folder", self)
openfolder_action.triggered.connect(self.openFolder)
remove_action = QAction("Remove", self)
remove_action.triggered.connect(self.fileHandler.removeSelectedItem)
menu.addAction(view_action)
menu.addAction(openfolder_action)
menu.addAction(remove_action)
menu.exec(self.fileList.mapToGlobal(event))
def openFolder(self):
selectedItem = self.fileList.currentItem()
if selectedItem:
selectedFile = selectedItem.text()
folder_path = Path(selectedFile).parent
QDesktopServices.openUrl(QUrl.fromLocalFile(str(folder_path)))
def openImage(self):
selectedItem = self.fileList.currentItem()
if selectedItem:
selectedFile = selectedItem.text()
subprocess.run(['start', '', selectedFile], shell=True)
def showAboutDialog(self):
self.setEnabled(False)
|
class MainWindow(FramelessMainWindow):
def __init__(self):
super().__init__()
self.fileHandler = FileHandler(self)
#window size
self.setTitleBar(CustomTitleBar(self))
self.setWindowTitle('SDIMV')
self.titleBar.raise_()
self.settings = QSettings("maagic6", "SDIMV")
savedGeometry = self.settings.value("main_window_geometry")
if savedGeometry is not None:
self.restoreGeometry(savedGeometry)
else:
self.resize(720,720)
qr = self.frameGeometry()
cp = self.screen().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
iconPath = resource_path("icon/icon.ico")
self.setWindowIcon(QIcon(iconPath))
toolbar = QToolBar("Toolbar")
toolbar.setStyleSheet("QToolBar {background: transparent;}"
"QToolButton {background: transparent; border: none;}"
"QToolButton:hover {background: rgba(195, 195, 255, 50);}")
iconPath2 = resource_path("icon/add.png")
iconPath3 = resource_path("icon/remove.png")
iconPath4 = resource_path("icon/clear.png")
iconPath5 = resource_path("icon/about.png")
addAction = QAction(QIcon(iconPath2), "Add", self)
addAction.triggered.connect(self.fileHandler.openFileDialog)
removeAction = QAction(QIcon(iconPath3), "Remove", self)
removeAction.triggered.connect(self.fileHandler.removeSelectedItem)
clearAction = QAction(QIcon(iconPath4), "Clear", self)
clearAction.triggered.connect(self.fileHandler.clearFileList)
aboutAction = QAction(QIcon(iconPath5), "About", self)
aboutAction.triggered.connect(self.showAboutDialog)
toolbar.addAction(addAction)
toolbar.addAction(removeAction)
toolbar.addAction(clearAction)
toolbar.addAction(aboutAction)
toolbar.setObjectName("Toolbar")
self.addToolBar(toolbar)
self.imagePreviewFrame = QFrame()
self.imagePreviewFrame.setFrameShape(QFrame.Shape.Box)
self.imagePreviewFrame.setLineWidth(1)
self.imagePreviewFrame.setFocusPolicy(Qt.FocusPolicy.NoFocus)
self.imageFrame = QVBoxLayout()
self.imagePreviewFrame.setLayout(self.imageFrame)
self.imageScene = QGraphicsScene()
self.imageView = ZoomableGraphicsView(self.imageScene)
self.imageView.setRenderHint(QPainter.RenderHint.Antialiasing, True)
self.imageView.setDragMode(QGraphicsView.DragMode.ScrollHandDrag)
self.imageView.setFocusPolicy(Qt.FocusPolicy.NoFocus)
self.imageFrame.addWidget(self.imageView)
self.fileList = CustomListWidget()
self.fileList.setContextMenuPolicy(Qt.ContextMenuPolicy.CustomContextMenu)
self.fileList.customContextMenuRequested.connect(self.showContextMenu)
self.fileList.itemSelectionChanged.connect(self.handleItemSelectionChanged)
self.selectedFile = QLineEdit()
self.browseButton = QPushButton('Browse')
self.browseButton.clicked.connect(self.fileHandler.openFileDialog)
self.browseButton.setFocusPolicy(Qt.FocusPolicy.NoFocus)
self.clearButton = QPushButton('Clear')
self.clearButton.clicked.connect(self.fileHandler.clearFileList)
self.clearButton.setFocusPolicy(Qt.FocusPolicy.NoFocus)
bottomHalf = QScrollArea(self)
bottomHalf.setWidgetResizable(True)
scrollContent = QWidget()
self.gridLayout = QGridLayout(scrollContent)
bottomHalf.setWidget(scrollContent)
self.gridLayout.addWidget(QLabel('Selected file:'), 3, 0)
self.gridLayout.addWidget(self.selectedFile, 4, 0, 1, 5)
self.widgetInfo = [
('Positive prompt:', CustomTextEdit(), 'prompt'),
('Negative prompt:', CustomTextEdit(), 'negative_prompt'),
('Steps:', CustomLineEdit(), 'steps'),
('Sampler:', CustomLineEdit(), 'sampler'),
('CFG scale:', CustomLineEdit(), 'cfg_scale'),
('Seed:', CustomLineEdit(), 'seed'),
('Size:', CustomLineEdit(), 'size'),
('Model hash:', CustomLineEdit(), 'model_hash'),
('Model:', CustomLineEdit(), 'model'),
('LoRA:', CustomLineEdit(), 'lora'),
('Raw:', CustomTextEdit(), 'raw')
]
for row, (label_text, widget, widget_name) in enumerate(self.widgetInfo):
label = QLabel(label_text)
setattr(self, widget_name + "_label", label)
setattr(self, widget_name, widget)
self.gridLayout.addWidget(label, 2*row+5, 0, 1, 5)
self.gridLayout.addWidget(widget, 2*row+5+1, 0, 1, 5)
# set stretch factors
self.gridLayout.setColumnStretch(0, 1)
self.gridLayout.setColumnStretch(1, 1)
self.gridLayout.setColumnStretch(2, 1)
self.gridLayout.setColumnStretch(3, 1)
self.gridLayout.setColumnStretch(4, 1)
bottomHalf.setMinimumHeight(1)
# set alignments
self.gridLayout.setAlignment(Qt.AlignmentFlag.AlignTop)
self.gridLayout.setAlignment(Qt.AlignmentFlag.AlignLeft)
self.fileListWidget = CustomDockWidget(self)
self.fileListWidget.setObjectName("FileListWidget")
titleBarWidget = QWidget(self)
titleBarLayout = QHBoxLayout(titleBarWidget)
titleLabel = QLabel("File list")
titleBarLayout.addWidget(titleLabel, alignment=Qt.AlignmentFlag.AlignHCenter)
titleBarLayout.addStretch()
titleBarLayout.addWidget(self.browseButton, alignment=Qt.AlignmentFlag.AlignHCenter)
titleBarLayout.addWidget(self.clearButton, alignment=Qt.AlignmentFlag.AlignHCenter)
titleBarWidget.setMaximumHeight(10)
self.fileListWidget.setWidget(self.fileList)
self.fileListWidget.setWindowTitle("File list")
self.fileList.setAcceptDrops(True)
self.fileListWidget.setTitleBarWidget(titleLabel)
self.fileListWidget.setAcceptDrops(True)
self.addDockWidget(Qt.DockWidgetArea.LeftDockWidgetArea, self.fileListWidget)
self.imageViewWidget = QDockWidget()
self.imageViewWidget.setObjectName("ImageViewWidget")
self.imageViewWidget.setWidget(self.imagePreviewFrame)
self.imageViewWidget.setFeatures(QDockWidget.DockWidgetFeature.NoDockWidgetFeatures)
self.imagePreviewFrame.setAcceptDrops(True)
self.imageView.setAcceptDrops(True)
self.imageViewWidget.setTitleBarWidget(QLabel("Image view"))
self.imageViewWidget.setAllowedAreas(Qt.DockWidgetArea.NoDockWidgetArea)
self.imageViewWidget.setAcceptDrops(True)
self.setCentralWidget(self.imageViewWidget)
self.metadataWidget = QDockWidget()
self.metadataWidget.setObjectName("MetadataWidget")
self.metadataWidget.setWidget(bottomHalf)
self.metadataWidget.setTitleBarWidget(QLabel("Metadata"))
self.metadataWidget.setWindowTitle("Metadata")
self.addDockWidget(Qt.DockWidgetArea.LeftDockWidgetArea, self.metadataWidget)
self.setContentsMargins(0,30,0,0)
self.isMediaPlayerDeleted = False
self.isMovieDeleted = False
self.fileListWidget.dockLocationChanged.connect(self.updateImageView)
self.metadataWidget.dockLocationChanged.connect(self.updateImageView)
self.fileListWidget.installEventFilter(self)
self.imageViewWidget.installEventFilter(self)
self.metadataWidget.installEventFilter(self)
self.installEventFilter(self)
# load settings
self.loadSettings()
# enable drop events
self.setAcceptDrops(True)
self.show()
if len(sys.argv) > 1:
new_files = []
for arg in sys.argv[1:]:
file_path = Path(arg)
if file_path.is_dir():
new_files.extend(self.fileHandler.getFilesFromFolder(file_path))
elif not self.fileHandler.isFileInList(str(file_path)):
new_files.append(str(file_path).replace('\\', '/'))
self.fileHandler.updateFileList(new_files)
def viewMetadata(self, item):
if item:
selectedFile = item.text()
self.selectedFile.setText(item.text())
if Path(selectedFile).exists():
if selectedFile.lower().endswith(('.gif','.webp')):
self.cleanup()
self.movie = QMovie(selectedFile)
#self.imageScene.clear()
self.pixmap_item = QGraphicsPixmapItem()
self.imageScene.addItem(self.pixmap_item)
self.isMovieDeleted = False
self.imageView.resetTransform()
self.movie.start()
self.movie.frameChanged.connect(lambda: self.pixmap_item.setPixmap(self.movie.currentPixmap()))
self.imageScene.setSceneRect(QRectF(self.movie.currentPixmap().rect()))
self.imageView.setScene(self.imageScene)
self.imageView.fitInView(self.imageScene.sceneRect(), Qt.AspectRatioMode.KeepAspectRatio)
self.imageView.resetZoom()
elif selectedFile.lower().endswith(('.png', '.jpg', '.jpeg','.bmp')):
self.cleanup()
pixmap = QPixmap(selectedFile)
#self.imageScene.clear()
self.imageScene.addPixmap(pixmap)
self.imageView.setScene(self.imageScene)
self.imageView.resetTransform()
self.imageScene.setSceneRect(QRectF(pixmap.rect()))
self.imageView.fitInView(self.imageScene.sceneRect(), Qt.AspectRatioMode.KeepAspectRatio)
self.imageView.resetZoom()
elif selectedFile.lower().endswith(('.mp4', '.mpeg4', '.avi')):
self.cleanup()
#self.imageScene.clear()
self.imageView.resetTransform()
self.media_player = QMediaPlayer()
self.video_item = QGraphicsVideoItem()
self.imageScene.addItem(self.video_item)
self.isMediaPlayerDeleted = False
self.media_player.setVideoOutput(self.video_item)
self.media_player.setSource(QUrl.fromLocalFile(selectedFile))
self.media_player.play()
self.media_player.mediaStatusChanged.connect(self.loopVideo)
self.video_item.nativeSizeChanged.connect(self.updateVideoView)
self.imageView.fitInView(self.imageScene.sceneRect(), Qt.AspectRatioMode.KeepAspectRatio) #workaround
self.imageView.resetZoom()
with open(selectedFile, 'rb') as file:
image = imageProcess(file)
prompt = image.positivePrompt()
if prompt == -1:
for _, widget, _ in self.widgetInfo:
widget.setText('')
else:
data = image.getInfo()
for _, widget, key in self.widgetInfo:
if key == 'raw':
widget.setText(str(image.getRaw()))
else:
widget.setText(str(data[key]))
else:
self.cleanup()
#self.imageScene.clear()
self.selectedFile.clear()
for _, widget, _ in self.widgetInfo:
widget.clear()
self.fileHandler.removeSelectedItem()
else:
self.cleanup()
self.imageScene.clear()
self.selectedFile.clear()
for _, widget, _ in self.widgetInfo:
widget.clear()
def loopVideo(self, status):
if status == QMediaPlayer.MediaStatus.EndOfMedia:
self.media_player.setPosition(0)
self.media_player.play()
else:
pass
def cleanup(self):
if hasattr(self, 'movie') and self.movie is not None and self.isMovieDeleted == False:
try:
self.movie.frameChanged.disconnect()
self.movie.stop()
self.imageScene.removeItem(self.pixmap_item)
self.movie.deleteLater()
del self.movie
del self.pixmap_item
self.isMovieDeleted = True
except TypeError as e:
print(f"Exception when disconnecting movie: {e}")
if hasattr(self, 'media_player') and self.media_player is not None and self.isMediaPlayerDeleted == False:
try:
#self.media_player.setSource(QUrl())
self.media_player.mediaStatusChanged.disconnect()
self.media_player.stop()
self.imageScene.removeItem(self.video_item)
self.media_player.deleteLater()
self.video_item.deleteLater()
self.isMediaPlayerDeleted = True
#del self.media_player
#del self.video_item
except Exception as e:
print(f"Exception when disconnecting media player: {e}")
self.imageScene.clear()
def dragEnterEvent(self, event):
mime_data = event.mimeData()
if mime_data.hasUrls():
for url in mime_data.urls():
if url.isLocalFile():
file_path = url.toLocalFile()
if Path(file_path).is_dir() or Path(file_path).suffix.lower() in ['.png', '.gif', '.webp', '.mp4', '.jpg']:
# accept local files
event.acceptProposedAction()
return
elif url.scheme() in ('http', 'https'):
# accept image links
event.acceptProposedAction()
return
def dropEvent(self, event):
mime_data = event.mimeData()
if mime_data.hasUrls():
new_files = []
for url in mime_data.urls():
if url.isLocalFile():
file_path = url.toLocalFile()
if Path(file_path).is_dir():
new_files.extend(self.fileHandler.getFilesFromFolder(file_path))
elif 'Temp' in Path(file_path).parts:
copied_path = self.fileHandler.copyTempImage(file_path)
new_files.append(copied_path)
else:
new_files.append(file_path)
elif url.scheme() == 'http' or url.scheme() == 'https':
downloaded_path = self.fileHandler.downloadImage(url)
if downloaded_path and not self.fileHandler.isFileInList(downloaded_path):
new_files.append(downloaded_path)
new_files = [file_path for file_path in new_files if not self.fileHandler.isFileInList(file_path)]
self.fileHandler.updateFileList(new_files)
event.acceptProposedAction()
def handleItemSelectionChanged(self):
selectedItem = self.fileList.currentItem()
if selectedItem:
self.viewMetadata(selectedItem)
#if 0 <= selectedIndex < len(self.selectedFiles):
#self.viewMetadata(selectedItem)
def updateImageView(self):
self.imageView.fitInView(self.imageScene.sceneRect(), Qt.AspectRatioMode.KeepAspectRatio)
self.imageView.resetZoom()
def updateVideoView(self):
self.imageView.resetTransform()
self.imageScene.setSceneRect(self.video_item.boundingRect())
self.imageView.setScene(self.imageScene)
self.imageView.fitInView(self.imageScene.sceneRect(), Qt.AspectRatioMode.KeepAspectRatio)
self.imageView.resetZoom()
def saveSettings(self):
file_paths = self.fileHandler.getFileList()
self.settings.setValue("selectedFiles", file_paths)
self.settings.setValue("main_window_state", self.saveState())
self.settings.setValue("main_window_geometry", self.saveGeometry())
def loadSettings(self):
file_paths = self.settings.value("selectedFiles", [])
self.fileHandler.updateFileList(file_paths)
if self.settings.value("main_window_state"):
self.restoreState(self.settings.value("main_window_state"))
def closeEvent(self, event):
self.saveSettings()
event.accept()
def eventFilter(self, obj, event):
if obj == self:
if event.type() == QEvent.Type.Resize:
self.updateImageView()
if obj in (self.fileListWidget, self.imageViewWidget):
if event.type() == QEvent.Type.Move:
self.updateImageView()
return super(MainWindow, self).eventFilter(obj, event)
def showContextMenu(self, event):
menu = QMenu(self)
view_action = QAction("View", self)
view_action.triggered.connect(self.openImage)
openfolder_action = QAction("Open folder", self)
openfolder_action.triggered.connect(self.openFolder)
remove_action = QAction("Remove", self)
remove_action.triggered.connect(self.fileHandler.removeSelectedItem)
menu.addAction(view_action)
menu.addAction(openfolder_action)
menu.addAction(remove_action)
menu.exec(self.fileList.mapToGlobal(event))
def openFolder(self):
selectedItem = self.fileList.currentItem()
if selectedItem:
selectedFile = selectedItem.text()
folder_path = Path(selectedFile).parent
QDesktopServices.openUrl(QUrl.fromLocalFile(str(folder_path)))
def openImage(self):
selectedItem = self.fileList.currentItem()
if selectedItem:
selectedFile = selectedItem.text()
subprocess.run(['start', '', selectedFile], shell=True)
def showAboutDialog(self):
self.setEnabled(False) | about_dialog = AboutDialog(self) | 9 | 2023-11-15 19:51:29+00:00 | 12k |
chaiNNer-org/spandrel | src/spandrel/architectures/GRLIR/arch/grl.py | [
{
"identifier": "to_2tuple",
"path": "src/spandrel/architectures/__arch_helpers/timm/helpers.py",
"snippet": "def to_2tuple(x: T | Iterable[T]) -> tuple[T, T]:\n if isinstance(x, str):\n return x, x # type: ignore\n if isinstance(x, collections.abc.Iterable):\n return tuple(x) # ty... | from typing import Literal
from ...__arch_helpers.timm.helpers import to_2tuple
from ...__arch_helpers.timm.weight_init import trunc_normal_
from .config import GRLConfig
from .mixed_attn_block_efficient import (
EfficientMixAttnTransformerBlock,
get_stripe_info,
)
from .ops import (
bchw_to_blc,
blc_to_bchw,
calculate_mask,
calculate_mask_all,
get_relative_coords_table_all,
get_relative_position_index_simple,
)
from .swin_v1_block import (
build_last_conv,
)
from .upsample import Upsample, UpsampleOneStep
from fairscale.nn import checkpoint_wrapper # type: ignore
import torch
import torch.nn as nn
import torch.nn.functional as F | 8,473 | mlp_ratio=4.0,
qkv_bias=True,
qkv_proj_type="linear",
anchor_proj_type="avgpool",
anchor_one_stage=True,
anchor_window_down_factor=1,
out_proj_type: Literal["linear", "conv2d"] = "linear",
local_connection=False,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
pretrained_window_size: list[int] = [0, 0],
pretrained_stripe_size: list[int] = [0, 0],
conv_type="1conv",
init_method="n", # initialization method of the weight parameters used to train large scale models.
fairscale_checkpoint=False, # fairscale activation checkpointing
offload_to_cpu=False,
euclidean_dist=False,
):
super().__init__()
# Process the input arguments
out_channels = out_channels or in_channels
self.in_channels = in_channels
self.out_channels = out_channels
num_out_feats = 64
self.embed_dim = embed_dim
self.upscale = upscale
self.upsampler = upsampler
self.img_range = img_range
if in_channels == 3:
rgb_mean = (0.4488, 0.4371, 0.4040)
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
else:
self.mean = torch.zeros(1, 1, 1, 1)
max_stripe_size = max([0 if s is None else s for s in stripe_size]) # type: ignore
max_stripe_groups = max([0 if s is None else s for s in stripe_groups])
max_stripe_groups *= anchor_window_down_factor
self.pad_size = max(window_size, max_stripe_size, max_stripe_groups)
# if max_stripe_size >= window_size:
# self.pad_size *= anchor_window_down_factor
# if stripe_groups[0] is None and stripe_groups[1] is None:
# self.pad_size = max(stripe_size)
# else:
# self.pad_size = window_size
self.input_resolution = to_2tuple(img_size)
self.window_size = to_2tuple(window_size)
self.shift_size = [w // 2 for w in self.window_size]
self.stripe_size = stripe_size
self.stripe_groups = stripe_groups
self.pretrained_window_size = pretrained_window_size
self.pretrained_stripe_size = pretrained_stripe_size
self.anchor_window_down_factor = anchor_window_down_factor
# Head of the network. First convolution.
self.conv_first = nn.Conv2d(in_channels, embed_dim, 3, 1, 1)
# Body of the network
self.norm_start = norm_layer(embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
# stochastic depth decay rule
args = GRLConfig(
out_proj_type=out_proj_type,
local_connection=local_connection,
euclidean_dist=euclidean_dist,
)
for k, v in self.set_table_index_mask(self.input_resolution).items():
self.register_buffer(k, v, persistent=False)
self.layers = nn.ModuleList()
for i in range(len(depths)):
layer = TransformerStage(
dim=embed_dim,
input_resolution=self.input_resolution,
depth=depths[i],
num_heads_window=num_heads_window[i],
num_heads_stripe=num_heads_stripe[i],
window_size=self.window_size,
stripe_size=stripe_size,
stripe_groups=stripe_groups,
stripe_shift=stripe_shift,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qkv_proj_type=qkv_proj_type,
anchor_proj_type=anchor_proj_type,
anchor_one_stage=anchor_one_stage,
anchor_window_down_factor=anchor_window_down_factor,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[
sum(depths[:i]) : sum(
depths[: i + 1]
) # type: ignore
], # no impact on SR results
norm_layer=norm_layer,
pretrained_window_size=pretrained_window_size,
pretrained_stripe_size=pretrained_stripe_size,
conv_type=conv_type,
init_method=init_method,
fairscale_checkpoint=fairscale_checkpoint,
offload_to_cpu=offload_to_cpu,
args=args,
)
self.layers.append(layer)
self.norm_end = norm_layer(embed_dim)
# Tail of the network
self.conv_after_body = build_last_conv(conv_type, embed_dim)
#####################################################################################################
################################ 3, high quality image reconstruction ################################
if self.upsampler == "pixelshuffle":
# for classical SR
self.conv_before_upsample = nn.Sequential(
nn.Conv2d(embed_dim, num_out_feats, 3, 1, 1), nn.LeakyReLU(inplace=True)
)
| """
Efficient and Explicit Modelling of Image Hierarchies for Image Restoration
Image restoration transformers with global, regional, and local modelling
A clean version of the.
Shared buffers are used for relative_coords_table, relative_position_index, and attn_mask.
"""
from __future__ import annotations
class TransformerStage(nn.Module):
"""Transformer stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads_window (list[int]): Number of window attention heads in different layers.
num_heads_stripe (list[int]): Number of stripe attention heads in different layers.
stripe_size (list[int]): Stripe size. Default: [8, 8]
stripe_groups (list[int]): Number of stripe groups. Default: [None, None].
stripe_shift (bool): whether to shift the stripes. This is used as an ablation study.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qkv_proj_type (str): QKV projection type. Default: linear. Choices: linear, separable_conv.
anchor_proj_type (str): Anchor projection type. Default: avgpool. Choices: avgpool, maxpool, conv2d, separable_conv, patchmerging.
anchor_one_stage (bool): Whether to use one operator or multiple progressive operators to reduce feature map resolution. Default: True.
anchor_window_down_factor (int): The downscale factor used to get the anchors.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
pretrained_window_size (list[int]): pretrained window size. This is actually not used. Default: [0, 0].
pretrained_stripe_size (list[int]): pretrained stripe size. This is actually not used. Default: [0, 0].
conv_type: The convolutional block before residual connection.
init_method: initialization method of the weight parameters used to train large scale models.
Choices: n, normal -- Swin V1 init method.
l, layernorm -- Swin V2 init method. Zero the weight and bias in the post layer normalization layer.
r, res_rescale -- EDSR rescale method. Rescale the residual blocks with a scaling factor 0.1
w, weight_rescale -- MSRResNet rescale method. Rescale the weight parameter in residual blocks with a scaling factor 0.1
t, trunc_normal_ -- nn.Linear, trunc_normal; nn.Conv2d, weight_rescale
fairscale_checkpoint (bool): Whether to use fairscale checkpoint.
offload_to_cpu (bool): used by fairscale_checkpoint
args:
out_proj_type (str): Type of the output projection in the self-attention modules. Default: linear. Choices: linear, conv2d.
local_connection (bool): Whether to enable the local modelling module (two convs followed by Channel attention). For GRL base model, this is used. "local_connection": local_connection,
euclidean_dist (bool): use Euclidean distance or inner product as the similarity metric. An ablation study.
"""
def __init__(
self,
dim: int,
input_resolution: tuple[int, int],
depth: int,
num_heads_window: int,
num_heads_stripe: int,
window_size: tuple[int, int],
stripe_size,
stripe_groups,
stripe_shift,
mlp_ratio=4.0,
qkv_bias=True,
qkv_proj_type="linear",
anchor_proj_type="avgpool",
anchor_one_stage=True,
anchor_window_down_factor=1,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
norm_layer=nn.LayerNorm,
pretrained_window_size=[0, 0],
pretrained_stripe_size=[0, 0],
conv_type="1conv",
init_method="",
fairscale_checkpoint=False,
offload_to_cpu=False,
args: GRLConfig = None, # type: ignore
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.init_method = init_method
self.blocks = nn.ModuleList()
for i in range(depth):
block = EfficientMixAttnTransformerBlock(
dim=dim,
input_resolution=input_resolution,
num_heads_w=num_heads_window,
num_heads_s=num_heads_stripe,
window_size=window_size,
window_shift=i % 2 == 0,
stripe_size=stripe_size,
stripe_groups=stripe_groups,
stripe_type="H" if i % 2 == 0 else "W",
stripe_shift=i % 4 in [2, 3] if stripe_shift else False,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qkv_proj_type=qkv_proj_type,
anchor_proj_type=anchor_proj_type,
anchor_one_stage=anchor_one_stage,
anchor_window_down_factor=anchor_window_down_factor,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
pretrained_window_size=pretrained_window_size,
pretrained_stripe_size=pretrained_stripe_size,
res_scale=0.1 if init_method == "r" else 1.0,
args=args,
)
# print(fairscale_checkpoint, offload_to_cpu)
if fairscale_checkpoint:
block = checkpoint_wrapper(block, offload_to_cpu=offload_to_cpu)
self.blocks.append(block)
self.conv = build_last_conv(conv_type, dim)
def _init_weights(self):
for n, m in self.named_modules():
if self.init_method == "w":
if isinstance(m, (nn.Linear, nn.Conv2d)) and n.find("cpb_mlp") < 0:
print("nn.Linear and nn.Conv2d weight initilization")
m.weight.data *= 0.1
elif self.init_method == "l":
if isinstance(m, nn.LayerNorm):
print("nn.LayerNorm initialization")
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 0)
elif self.init_method.find("t") >= 0:
scale = 0.1 ** (len(self.init_method) - 1) * int(self.init_method[-1])
if isinstance(m, nn.Linear) and n.find("cpb_mlp") < 0:
trunc_normal_(m.weight, std=scale)
elif isinstance(m, nn.Conv2d):
m.weight.data *= 0.1
print(
"Initialization nn.Linear - trunc_normal; nn.Conv2d - weight rescale."
)
else:
raise NotImplementedError(
f"Parameter initialization method {self.init_method} not implemented in TransformerStage."
)
def forward(self, x, x_size, table_index_mask):
res = x
for blk in self.blocks:
res = blk(res, x_size, table_index_mask)
res = bchw_to_blc(self.conv(blc_to_bchw(res, x_size)))
return res + x
def flops(self):
pass
class GRL(nn.Module):
r"""Image restoration transformer with global, non-local, and local connections
Args:
img_size (int | list[int]): Input image size. Default 64
in_channels (int): Number of input image channels. Default: 3
out_channels (int): Number of output image channels. Default: None
embed_dim (int): Patch embedding dimension. Default: 96
upscale (int): Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
img_range (float): Image range. 1. or 255.
upsampler (str): The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
depths (list[int]): Depth of each Swin Transformer layer.
num_heads_window (list[int]): Number of window attention heads in different layers.
num_heads_stripe (list[int]): Number of stripe attention heads in different layers.
window_size (int): Window size. Default: 8.
stripe_size (list[int]): Stripe size. Default: [8, 8]
stripe_groups (list[int]): Number of stripe groups. Default: [None, None].
stripe_shift (bool): whether to shift the stripes. This is used as an ablation study.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qkv_proj_type (str): QKV projection type. Default: linear. Choices: linear, separable_conv.
anchor_proj_type (str): Anchor projection type. Default: avgpool. Choices: avgpool, maxpool, conv2d, separable_conv, patchmerging.
anchor_one_stage (bool): Whether to use one operator or multiple progressive operators to reduce feature map resolution. Default: True.
anchor_window_down_factor (int): The downscale factor used to get the anchors.
out_proj_type (str): Type of the output projection in the self-attention modules. Default: linear. Choices: linear, conv2d.
local_connection (bool): Whether to enable the local modelling module (two convs followed by Channel attention). For GRL base model, this is used.
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
pretrained_window_size (list[int]): pretrained window size. This is actually not used. Default: [0, 0].
pretrained_stripe_size (list[int]): pretrained stripe size. This is actually not used. Default: [0, 0].
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
conv_type (str): The convolutional block before residual connection. Default: 1conv. Choices: 1conv, 3conv, 1conv1x1, linear
init_method: initialization method of the weight parameters used to train large scale models.
Choices: n, normal -- Swin V1 init method.
l, layernorm -- Swin V2 init method. Zero the weight and bias in the post layer normalization layer.
r, res_rescale -- EDSR rescale method. Rescale the residual blocks with a scaling factor 0.1
w, weight_rescale -- MSRResNet rescale method. Rescale the weight parameter in residual blocks with a scaling factor 0.1
t, trunc_normal_ -- nn.Linear, trunc_normal; nn.Conv2d, weight_rescale
fairscale_checkpoint (bool): Whether to use fairscale checkpoint.
offload_to_cpu (bool): used by fairscale_checkpoint
euclidean_dist (bool): use Euclidean distance or inner product as the similarity metric. An ablation study.
"""
def __init__(
self,
img_size=64,
in_channels: int = 3,
out_channels: int | None = None,
embed_dim=96,
upscale=2,
img_range=1.0,
upsampler="",
depths: list[int] = [6, 6, 6, 6, 6, 6],
num_heads_window: list[int] = [3, 3, 3, 3, 3, 3],
num_heads_stripe: list[int] = [3, 3, 3, 3, 3, 3],
window_size=8,
stripe_size: list[int] = [8, 8], # used for stripe window attention
stripe_groups: list[int | None] = [None, None],
stripe_shift=False,
mlp_ratio=4.0,
qkv_bias=True,
qkv_proj_type="linear",
anchor_proj_type="avgpool",
anchor_one_stage=True,
anchor_window_down_factor=1,
out_proj_type: Literal["linear", "conv2d"] = "linear",
local_connection=False,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
pretrained_window_size: list[int] = [0, 0],
pretrained_stripe_size: list[int] = [0, 0],
conv_type="1conv",
init_method="n", # initialization method of the weight parameters used to train large scale models.
fairscale_checkpoint=False, # fairscale activation checkpointing
offload_to_cpu=False,
euclidean_dist=False,
):
super().__init__()
# Process the input arguments
out_channels = out_channels or in_channels
self.in_channels = in_channels
self.out_channels = out_channels
num_out_feats = 64
self.embed_dim = embed_dim
self.upscale = upscale
self.upsampler = upsampler
self.img_range = img_range
if in_channels == 3:
rgb_mean = (0.4488, 0.4371, 0.4040)
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
else:
self.mean = torch.zeros(1, 1, 1, 1)
max_stripe_size = max([0 if s is None else s for s in stripe_size]) # type: ignore
max_stripe_groups = max([0 if s is None else s for s in stripe_groups])
max_stripe_groups *= anchor_window_down_factor
self.pad_size = max(window_size, max_stripe_size, max_stripe_groups)
# if max_stripe_size >= window_size:
# self.pad_size *= anchor_window_down_factor
# if stripe_groups[0] is None and stripe_groups[1] is None:
# self.pad_size = max(stripe_size)
# else:
# self.pad_size = window_size
self.input_resolution = to_2tuple(img_size)
self.window_size = to_2tuple(window_size)
self.shift_size = [w // 2 for w in self.window_size]
self.stripe_size = stripe_size
self.stripe_groups = stripe_groups
self.pretrained_window_size = pretrained_window_size
self.pretrained_stripe_size = pretrained_stripe_size
self.anchor_window_down_factor = anchor_window_down_factor
# Head of the network. First convolution.
self.conv_first = nn.Conv2d(in_channels, embed_dim, 3, 1, 1)
# Body of the network
self.norm_start = norm_layer(embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
# stochastic depth decay rule
args = GRLConfig(
out_proj_type=out_proj_type,
local_connection=local_connection,
euclidean_dist=euclidean_dist,
)
for k, v in self.set_table_index_mask(self.input_resolution).items():
self.register_buffer(k, v, persistent=False)
self.layers = nn.ModuleList()
for i in range(len(depths)):
layer = TransformerStage(
dim=embed_dim,
input_resolution=self.input_resolution,
depth=depths[i],
num_heads_window=num_heads_window[i],
num_heads_stripe=num_heads_stripe[i],
window_size=self.window_size,
stripe_size=stripe_size,
stripe_groups=stripe_groups,
stripe_shift=stripe_shift,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qkv_proj_type=qkv_proj_type,
anchor_proj_type=anchor_proj_type,
anchor_one_stage=anchor_one_stage,
anchor_window_down_factor=anchor_window_down_factor,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[
sum(depths[:i]) : sum(
depths[: i + 1]
) # type: ignore
], # no impact on SR results
norm_layer=norm_layer,
pretrained_window_size=pretrained_window_size,
pretrained_stripe_size=pretrained_stripe_size,
conv_type=conv_type,
init_method=init_method,
fairscale_checkpoint=fairscale_checkpoint,
offload_to_cpu=offload_to_cpu,
args=args,
)
self.layers.append(layer)
self.norm_end = norm_layer(embed_dim)
# Tail of the network
self.conv_after_body = build_last_conv(conv_type, embed_dim)
#####################################################################################################
################################ 3, high quality image reconstruction ################################
if self.upsampler == "pixelshuffle":
# for classical SR
self.conv_before_upsample = nn.Sequential(
nn.Conv2d(embed_dim, num_out_feats, 3, 1, 1), nn.LeakyReLU(inplace=True)
) | self.upsample = Upsample(upscale, num_out_feats) | 12 | 2023-11-17 01:11:47+00:00 | 12k |
motexture/VSeq2VSeq | models/unet.py | [
{
"identifier": "TransformerTemporalModel",
"path": "models/transformers.py",
"snippet": "class TransformerTemporalModel(ModelMixin, ConfigMixin):\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channel... | from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from safetensors.torch import load_file
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from diffusers.models.modeling_utils import ModelMixin
from diffusers.utils import WEIGHTS_NAME
from .transformers import TransformerTemporalModel
from .resnet import Conditioner
from .unet_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block
)
import torch
import torch.nn as nn
import torch.utils.checkpoint
import os | 9,319 | The tuple of upsample blocks to use.
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
The tuple of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
If `None`, it will skip the normalization and activation layers in post-processing
norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.
attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
"""
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
up_block_types: Tuple[str] = ("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"),
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1024,
attention_head_dim: Union[int, Tuple[int]] = 64,
):
super().__init__()
self.sample_size = sample_size
self.gradient_checkpointing = False
# Check inputs
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_out_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = Conditioner(in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding)
# time
time_embed_dim = block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], True, 0)
timestep_input_dim = block_out_channels[0]
self.hidden_time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
self.conditioning_time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
self.transformer_in = TransformerTemporalModel(
num_attention_heads=8,
attention_head_dim=attention_head_dim,
in_channels=block_out_channels[0],
num_layers=1,
)
# class embedding
self.down_blocks = nn.ModuleList([])
self.up_blocks = nn.ModuleList([])
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding
)
self.down_blocks.append(down_block)
# mid
| # Copyright 2023 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved.
# Copyright 2023 The ModelScope Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
"""
Args:
sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model.
"""
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
r"""
UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep
and returns sample shaped output.
This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
implements for all the models (such as downloading or saving, etc.)
Parameters:
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
Height and width of input/output sample.
in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.
out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
The tuple of downsample blocks to use.
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`):
The tuple of upsample blocks to use.
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
The tuple of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
If `None`, it will skip the normalization and activation layers in post-processing
norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.
attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
"""
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
up_block_types: Tuple[str] = ("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"),
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1024,
attention_head_dim: Union[int, Tuple[int]] = 64,
):
super().__init__()
self.sample_size = sample_size
self.gradient_checkpointing = False
# Check inputs
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_out_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = Conditioner(in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding)
# time
time_embed_dim = block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], True, 0)
timestep_input_dim = block_out_channels[0]
self.hidden_time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
self.conditioning_time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
self.transformer_in = TransformerTemporalModel(
num_attention_heads=8,
attention_head_dim=attention_head_dim,
in_channels=block_out_channels[0],
num_layers=1,
)
# class embedding
self.down_blocks = nn.ModuleList([])
self.up_blocks = nn.ModuleList([])
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding
)
self.down_blocks.append(down_block)
# mid | self.mid_block = UNetMidBlock3DCrossAttn( | 5 | 2023-11-14 09:09:09+00:00 | 12k |
TCLResearchEurope/torch-dag | torch_dag_algorithms/pruning/orbitalize_model.py | [
{
"identifier": "DagModule",
"path": "torch_dag/core/dag_module.py",
"snippet": "class DagModule(torch.nn.Module):\n MAX_LEN_REPR = None\n\n def __init__(\n self,\n name: str,\n vertices: Optional[List[Vertex]] = None,\n output_vertex: Optional[InnerVert... | import argparse
import logging
import os
from torch_dag.core.dag_module import DagModule
from torch_dag.commons.flops_computation import log_dag_characteristics
from torch_dag_algorithms.pruning import dag_orbitalizer
from torch_dag_algorithms.pruning import constants
from torch_dag.visualization.visualize_dag import DagVisualizer | 7,264 | #
# Copyright © TCL Research Europe. All rights reserved.
#
logger = logging.getLogger(__name__)
PRUNING_MODES = [
constants.PRUNING_DEFAULT_MODE_NAME,
constants.PRUNING_BLOCK_SNPE_MODE_NAME,
constants.PRUNING_WHOLE_BLOCK_MODE_NAME,
]
def parse_args():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
"--model_path",
type=str,
)
arg_parser.add_argument(
"--saving_path",
type=str,
)
arg_parser.add_argument(
"--block_size",
default=8,
type=int,
)
arg_parser.add_argument(
"--pruning_mode",
type=str,
default=constants.PRUNING_BLOCK_SNPE_MODE_NAME,
choices=PRUNING_MODES,
)
arg_parser.add_argument(
"--input_shape",
type=int,
nargs="+",
help="Input shape to the orbitalized model (including batch dimension).",
)
args = arg_parser.parse_args()
return args
def orbitalize_model(
model_path,
input_shape,
pruning_mode,
block_size,
saving_path,
):
path = model_path
dag = DagModule.load(path)
dag.eval()
input_shape = tuple(input_shape)
log_dag_characteristics(dag, input_shape_without_batch=input_shape[1:])
orbitalizer = dag_orbitalizer.GeneralOrbitalizer(
pruning_mode=pruning_mode,
block_size=block_size,
)
dag, found_final_orbits = orbitalizer.orbitalize(
dag=dag,
prune_stem=True,
input_shape=input_shape,
skip_orbits_with_channels_less_than_block_size=True,
)
if not saving_path:
saving_path = os.path.join(path, "orbitalized")
else:
saving_path = saving_path
dag.save(os.path.join(saving_path))
| #
# Copyright © TCL Research Europe. All rights reserved.
#
logger = logging.getLogger(__name__)
PRUNING_MODES = [
constants.PRUNING_DEFAULT_MODE_NAME,
constants.PRUNING_BLOCK_SNPE_MODE_NAME,
constants.PRUNING_WHOLE_BLOCK_MODE_NAME,
]
def parse_args():
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
"--model_path",
type=str,
)
arg_parser.add_argument(
"--saving_path",
type=str,
)
arg_parser.add_argument(
"--block_size",
default=8,
type=int,
)
arg_parser.add_argument(
"--pruning_mode",
type=str,
default=constants.PRUNING_BLOCK_SNPE_MODE_NAME,
choices=PRUNING_MODES,
)
arg_parser.add_argument(
"--input_shape",
type=int,
nargs="+",
help="Input shape to the orbitalized model (including batch dimension).",
)
args = arg_parser.parse_args()
return args
def orbitalize_model(
model_path,
input_shape,
pruning_mode,
block_size,
saving_path,
):
path = model_path
dag = DagModule.load(path)
dag.eval()
input_shape = tuple(input_shape)
log_dag_characteristics(dag, input_shape_without_batch=input_shape[1:])
orbitalizer = dag_orbitalizer.GeneralOrbitalizer(
pruning_mode=pruning_mode,
block_size=block_size,
)
dag, found_final_orbits = orbitalizer.orbitalize(
dag=dag,
prune_stem=True,
input_shape=input_shape,
skip_orbits_with_channels_less_than_block_size=True,
)
if not saving_path:
saving_path = os.path.join(path, "orbitalized")
else:
saving_path = saving_path
dag.save(os.path.join(saving_path)) | visualizer = DagVisualizer(dag=dag) | 4 | 2023-11-17 15:36:44+00:00 | 12k |
joyn-gg/discord.http | discord_http/backend.py | [
{
"identifier": "Command",
"path": "discord_http/commands.py",
"snippet": "class Command:\n def __init__(\n self,\n command: Callable,\n name: str,\n description: Optional[str] = None,\n guild_ids: Optional[list[Union[utils.Snowflake, int]]] = None,\n type: A... | import asyncio
import logging
import signal
from datetime import datetime
from hypercorn.asyncio import serve
from hypercorn.config import Config as HyperConfig
from nacl.exceptions import BadSignatureError
from nacl.signing import VerifyKey
from quart import Quart, request, abort
from quart import Response as QuartResponse
from quart.logging import default_handler
from quart.utils import MustReloadError, restart
from typing import Optional, Any, Union, TYPE_CHECKING
from .commands import Command, Interaction, Listener, SubGroup
from .enums import InteractionType
from .errors import CheckFailed
from .response import BaseResponse, Ping, MessageResponse
from .client import Client
from .context import Context | 7,291 | def error_messages(
self,
ctx: "Context",
e: Exception
) -> Optional[MessageResponse]:
"""
Used to return error messages to Discord
Parameters
----------
ctx: `Context`
The context of the command
e: `Exception`
The exception that was raised
Returns
-------
`Optional[MessageResponse]`
The message response provided by the library error handler
"""
if isinstance(e, CheckFailed):
return ctx.response.send_message(
content=str(e),
ephemeral=True
)
def _dig_subcommand(
self,
cmd: Union[Command, SubGroup],
data: dict
) -> tuple[Optional[Command], list[dict]]:
""" Used to dig through subcommands to execute correct command/autocomplete """
data_options: list[dict] = data["data"].get("options", [])
while isinstance(cmd, SubGroup):
find_next_step = next((
g for g in data_options
if g.get("name", None) and not g.get("value", None)
), None)
if not find_next_step:
return abort(400, "invalid command")
cmd = cmd.subcommands.get(find_next_step["name"], None) # type: ignore
if not cmd:
_log.warn(
f"Unhandled subcommand: {find_next_step['name']} "
"(not found in local command list)"
)
return abort(404, "command not found")
data_options = find_next_step.get("options", [])
return cmd, data_options
async def _index_interaction(self) -> Union[BaseResponse, QuartResponse, dict]:
"""
The main function to handle all HTTP requests sent by Discord
Please do not touch this function, unless you know what you're doing
"""
await self.validate_request()
data = await request.json
if self.debug_events:
self.bot.dispatch("raw_interaction", data)
context = self.bot._context(self.bot, data)
data_type = data.get("type", -1)
match data_type:
case InteractionType.ping:
_ping = Ping(state=self.bot.state, data=data)
if self.bot.has_any_dispatch("ping"):
self.bot.dispatch("ping", _ping)
else:
_log.info(f"Discord HTTP Ping | {_ping}")
return context.response.pong()
case InteractionType.application_command:
_log.debug("Received slash command, processing...")
command_name = data["data"]["name"]
cmd = self.bot.commands.get(command_name)
if not cmd:
_log.warn(
f"Unhandeled command: {command_name} "
"(not found in local command list)"
)
return QuartResponse(
"command not found",
status=404
)
cmd, data_options = self._dig_subcommand(cmd, data)
try:
payload = await cmd._make_context_and_run(
context=context
)
return QuartResponse(
payload.to_multipart(),
content_type=payload.content_type
)
except Exception as e:
if self.bot.has_any_dispatch("interaction_error"):
self.bot.dispatch("interaction_error", context, e)
else:
_log.error(
f"Error while running command {cmd.name}",
exc_info=e
)
_send_error = self.error_messages(context, e)
if _send_error and isinstance(_send_error, BaseResponse):
return _send_error.to_dict()
return abort(500)
case x if x in (
|
if TYPE_CHECKING:
_log = logging.getLogger(__name__)
__all__ = (
"DiscordHTTP",
)
def _cancel_all_tasks(loop: asyncio.AbstractEventLoop) -> None:
""" Used by Quart to cancel all tasks on shutdown. """
tasks = [
task for task in asyncio.all_tasks(loop)
if not task.done()
]
if not tasks:
return
for task in list(tasks):
task.cancel()
if task.get_coro().__name__ == "_windows_signal_support":
tasks.remove(task)
loop.run_until_complete(
asyncio.gather(*tasks, return_exceptions=True)
)
for task in tasks:
if not task.cancelled() and task.exception() is not None:
loop.call_exception_handler({
"message": "unhandled exception during shutdown",
"exception": task.exception(),
"task": task
})
class DiscordHTTP(Quart):
def __init__(self, *, client: "Client"):
"""
This serves as the fundemental HTTP server for Discord Interactions
We recommend to not touch this class, unless you know what you're doing
"""
self.uptime: datetime = datetime.now()
self.bot: "Client" = client
self.loop = self.bot.loop
self.debug_events = self.bot.debug_events
self._cog_commands: dict[str, Command] = {}
self._cog_interactions: dict[str, Interaction] = {}
self._cog_listeners: list[Listener] = []
super().__init__(__name__)
# Remove Quart's default logging handler
_quart_log = logging.getLogger("quart.app")
_quart_log.removeHandler(default_handler)
_quart_log.setLevel(logging.CRITICAL)
async def validate_request(self) -> None:
""" Used to validate requests sent by Discord Webhooks """
if not self.bot.public_key:
return abort(401, "invalid public key")
verify_key = VerifyKey(bytes.fromhex(self.bot.public_key))
signature: str = request.headers.get("X-Signature-Ed25519", "")
timestamp: str = request.headers.get("X-Signature-Timestamp", "")
try:
data = await request.data
body = data.decode("utf-8")
verify_key.verify(
f"{timestamp}{body}".encode(),
bytes.fromhex(signature)
)
except BadSignatureError:
abort(401, "invalid request signature")
except Exception:
abort(400, "invalid request body")
def error_messages(
self,
ctx: "Context",
e: Exception
) -> Optional[MessageResponse]:
"""
Used to return error messages to Discord
Parameters
----------
ctx: `Context`
The context of the command
e: `Exception`
The exception that was raised
Returns
-------
`Optional[MessageResponse]`
The message response provided by the library error handler
"""
if isinstance(e, CheckFailed):
return ctx.response.send_message(
content=str(e),
ephemeral=True
)
def _dig_subcommand(
self,
cmd: Union[Command, SubGroup],
data: dict
) -> tuple[Optional[Command], list[dict]]:
""" Used to dig through subcommands to execute correct command/autocomplete """
data_options: list[dict] = data["data"].get("options", [])
while isinstance(cmd, SubGroup):
find_next_step = next((
g for g in data_options
if g.get("name", None) and not g.get("value", None)
), None)
if not find_next_step:
return abort(400, "invalid command")
cmd = cmd.subcommands.get(find_next_step["name"], None) # type: ignore
if not cmd:
_log.warn(
f"Unhandled subcommand: {find_next_step['name']} "
"(not found in local command list)"
)
return abort(404, "command not found")
data_options = find_next_step.get("options", [])
return cmd, data_options
async def _index_interaction(self) -> Union[BaseResponse, QuartResponse, dict]:
"""
The main function to handle all HTTP requests sent by Discord
Please do not touch this function, unless you know what you're doing
"""
await self.validate_request()
data = await request.json
if self.debug_events:
self.bot.dispatch("raw_interaction", data)
context = self.bot._context(self.bot, data)
data_type = data.get("type", -1)
match data_type:
case InteractionType.ping:
_ping = Ping(state=self.bot.state, data=data)
if self.bot.has_any_dispatch("ping"):
self.bot.dispatch("ping", _ping)
else:
_log.info(f"Discord HTTP Ping | {_ping}")
return context.response.pong()
case InteractionType.application_command:
_log.debug("Received slash command, processing...")
command_name = data["data"]["name"]
cmd = self.bot.commands.get(command_name)
if not cmd:
_log.warn(
f"Unhandeled command: {command_name} "
"(not found in local command list)"
)
return QuartResponse(
"command not found",
status=404
)
cmd, data_options = self._dig_subcommand(cmd, data)
try:
payload = await cmd._make_context_and_run(
context=context
)
return QuartResponse(
payload.to_multipart(),
content_type=payload.content_type
)
except Exception as e:
if self.bot.has_any_dispatch("interaction_error"):
self.bot.dispatch("interaction_error", context, e)
else:
_log.error(
f"Error while running command {cmd.name}",
exc_info=e
)
_send_error = self.error_messages(context, e)
if _send_error and isinstance(_send_error, BaseResponse):
return _send_error.to_dict()
return abort(500)
case x if x in ( | InteractionType.message_component, | 4 | 2023-11-14 12:50:42+00:00 | 12k |
newcastleuniversity/DISPEL | dispel/providers/mobilized/io.py | [
{
"identifier": "Evaluation",
"path": "dispel/data/core.py",
"snippet": "class Evaluation(Epoch):\n \"\"\"Evaluation information for a :class:`Reading`.\n\n The evaluation corresponds to the json related task, whereas the session corresponds\n to the group of tasks that the evaluation finds its... | from collections.abc import MutableMapping
from typing import Dict, Tuple
from dispel.data.core import Evaluation, Reading
from dispel.data.epochs import EpochDefinition
from dispel.data.levels import Context, Level
from dispel.data.raw import (
RawDataSet,
RawDataSetDefinition,
RawDataSetSource,
RawDataValueDefinition,
)
from dispel.data.values import Value, ValueDefinition
import numpy as np
import pandas as pd
import scipy.io | 8,519 | data_to_cat = [
*[sub_dict[sensor_location][sensor_uni] for sensor_uni in set_sensors_uni],
*[sub_dict[sensor_location][sensor_xyz] for sensor_xyz in set_sensors_xyz],
]
columns = [
*[sensor_uni for sensor_uni in set_sensors_uni],
*[
f"{sensor_xyz}_{axis}"
for sensor_xyz in set_sensors_xyz
for axis in ["x", "y", "z"]
],
]
for remaining_measures in SET_REMAINING_MEASURES:
incl_remaining = remaining_measures in set_sensors
if incl_remaining:
data_to_cat.append(sub_dict[sensor_location][remaining_measures])
columns += [
f"{remaining_measures}_{n}"
for n in range(sub_dict[sensor_location][remaining_measures].shape[1])
]
df = pd.DataFrame(np.concatenate(data_to_cat, axis=1), columns=columns)
return df
def pre_formatting_yar(dict_mat: Dict) -> Tuple[str, Dict]:
"""Pre-format a YAR files."""
# Instantiate the data dictionary to use to create the reading
data_t1 = dict_mat["data"]["TimeMeasure1"]
# Give a name to the source here we choose YAR
source = "YAR"
return source, data_t1
def parse_mobilized_yar(path: str, verbose: bool = True) -> Reading:
"""Create a reading from mobilize-d .mat yar file."""
# Read the .mat file
dict_mat = read_matlab_file(path)
# Instantiate the reading start and end, they will be updated with recording min
# and max timestamps
reading_start = np.nan
reading_end = np.nan
# Instantiate the data dictionary and source
source, data_t1 = pre_formatting_yar(dict_mat)
# Instantiate an empty list of levels
list_level = []
# Go through the recordings
for it_level, (level_name, recording) in enumerate(data_t1.items()):
# Instantiate required variables
start = np.nan
end = np.nan
context = {}
raw_data_sets = []
if verbose:
print("___________")
print(f"Reading Level {level_name}")
# Go through assessments in the recording
for assessment, item in recording.items():
# If variable are contextual add them to context
if assessment in RECORDING_CONTEXT_KEYS:
context[assessment] = item.squeeze()
continue
# Else create a new level in the context to store information linked to
# the assessment
context[assessment] = {}
if verbose:
print("- - - - -")
print(f"{level_name}: assessment {assessment}")
# Specific case of Standards (here it is not about Acc, Gyr, Mag but
# pressure)
if assessment == "Standards":
# Go through the sources
for source in item.keys():
if verbose:
print(
f"{level_name}: assessment {assessment} - source {source}"
)
# Create a sub_dict at the level of the source
sub_dict = data_t1[level_name][assessment][source]
# create a new level in the context to store information linked
# to the source
context[assessment][source] = {}
# Usual case
if source != "INDIP":
# Go through sensor locations
for sensor_location in sub_dict.keys():
if verbose:
print(
f"{level_name}: assessment {assessment} - "
f"source {source} - "
f"sensor_location {sensor_location}"
)
# Storing contextual sensor frequency information
context[assessment][source][sensor_location] = {}
context[assessment][source][sensor_location][
"Fs"
] = sub_dict[sensor_location]["Fs"]
# Create a dataframe out of the source and sensor location
df = df_from_source(sub_dict, sensor_location)
# Create an identifier for the dataset
dataset_id = f"{assessment}-{source}-{sensor_location}"
# Create the definitions
definitions = [
| """Functionality to read Mobilize-D YAR files."""
# Define required CONSTANTS
RECORDING_CONTEXT_KEYS = {"StartDateTime", "TimeZone"}
SET_META_INFO = {"Fs", "Presence"}
SET_UNICOLUMN = {"Timestamp", "Bar"}
SET_REMAINING_MEASURES = {"Distance", "NormalizedPressure"}
SET_XYZ = {"Acc", "Gyr", "Mag"}
# Functions to read and unwrap matlab yar files
def unwrap_mat(data: np.ndarray):
"""Unwrap array generated by scipy load mat."""
if data.dtype.names is None:
return data
return {n: unwrap_mat(data[n][0, 0]) for n in data.dtype.names}
def read_matlab_file(path: str) -> dict:
"""Format matlab file to a dictionary."""
if not path.endswith(".mat"):
raise NotImplementedError("Only .mat files are supported.", path)
mat = scipy.io.loadmat(path)
res = {
"__header__": mat["__header__"],
"__version__": mat["__version__"],
"__globals__": mat["__globals__"],
"data": unwrap_mat(mat["data"]),
}
return res
# Function to create a Context from dictionary format
def flatten(dictionary, parent_key="", separator="_"):
"""Flatten a dictionary."""
items = []
for key, value in dictionary.items():
new_key = parent_key + separator + key if parent_key else key
if isinstance(value, MutableMapping):
items.extend(flatten(value, new_key, separator=separator).items())
else:
items.append((new_key, value))
return dict(items)
def parse_context(context: Dict) -> Context:
"""Parse the context information available.
Parameters
----------
context
A dictionary extracted from a mobilize-D .mat file
Returns
-------
Context
The context representation of the passed ``data``.
"""
values = [Value(ValueDefinition(item, item), context[item]) for item in context]
return Context(values)
def context_from_dict(dictionary: Dict) -> Context:
"""Flatten and cast to dictionary."""
return parse_context(flatten(dictionary, separator="."))
def df_from_source(sub_dict, sensor_location):
"""Create a dataframe from a nested dictionary and a sensor location."""
# Define sensor with multiple columns
set_sensors = set(sub_dict[sensor_location].keys()) - SET_META_INFO
set_sensors_uni = set_sensors & SET_UNICOLUMN
set_sensors_xyz = set_sensors & SET_XYZ
data_to_cat = [
*[sub_dict[sensor_location][sensor_uni] for sensor_uni in set_sensors_uni],
*[sub_dict[sensor_location][sensor_xyz] for sensor_xyz in set_sensors_xyz],
]
columns = [
*[sensor_uni for sensor_uni in set_sensors_uni],
*[
f"{sensor_xyz}_{axis}"
for sensor_xyz in set_sensors_xyz
for axis in ["x", "y", "z"]
],
]
for remaining_measures in SET_REMAINING_MEASURES:
incl_remaining = remaining_measures in set_sensors
if incl_remaining:
data_to_cat.append(sub_dict[sensor_location][remaining_measures])
columns += [
f"{remaining_measures}_{n}"
for n in range(sub_dict[sensor_location][remaining_measures].shape[1])
]
df = pd.DataFrame(np.concatenate(data_to_cat, axis=1), columns=columns)
return df
def pre_formatting_yar(dict_mat: Dict) -> Tuple[str, Dict]:
"""Pre-format a YAR files."""
# Instantiate the data dictionary to use to create the reading
data_t1 = dict_mat["data"]["TimeMeasure1"]
# Give a name to the source here we choose YAR
source = "YAR"
return source, data_t1
def parse_mobilized_yar(path: str, verbose: bool = True) -> Reading:
"""Create a reading from mobilize-d .mat yar file."""
# Read the .mat file
dict_mat = read_matlab_file(path)
# Instantiate the reading start and end, they will be updated with recording min
# and max timestamps
reading_start = np.nan
reading_end = np.nan
# Instantiate the data dictionary and source
source, data_t1 = pre_formatting_yar(dict_mat)
# Instantiate an empty list of levels
list_level = []
# Go through the recordings
for it_level, (level_name, recording) in enumerate(data_t1.items()):
# Instantiate required variables
start = np.nan
end = np.nan
context = {}
raw_data_sets = []
if verbose:
print("___________")
print(f"Reading Level {level_name}")
# Go through assessments in the recording
for assessment, item in recording.items():
# If variable are contextual add them to context
if assessment in RECORDING_CONTEXT_KEYS:
context[assessment] = item.squeeze()
continue
# Else create a new level in the context to store information linked to
# the assessment
context[assessment] = {}
if verbose:
print("- - - - -")
print(f"{level_name}: assessment {assessment}")
# Specific case of Standards (here it is not about Acc, Gyr, Mag but
# pressure)
if assessment == "Standards":
# Go through the sources
for source in item.keys():
if verbose:
print(
f"{level_name}: assessment {assessment} - source {source}"
)
# Create a sub_dict at the level of the source
sub_dict = data_t1[level_name][assessment][source]
# create a new level in the context to store information linked
# to the source
context[assessment][source] = {}
# Usual case
if source != "INDIP":
# Go through sensor locations
for sensor_location in sub_dict.keys():
if verbose:
print(
f"{level_name}: assessment {assessment} - "
f"source {source} - "
f"sensor_location {sensor_location}"
)
# Storing contextual sensor frequency information
context[assessment][source][sensor_location] = {}
context[assessment][source][sensor_location][
"Fs"
] = sub_dict[sensor_location]["Fs"]
# Create a dataframe out of the source and sensor location
df = df_from_source(sub_dict, sensor_location)
# Create an identifier for the dataset
dataset_id = f"{assessment}-{source}-{sensor_location}"
# Create the definitions
definitions = [ | RawDataValueDefinition(column, column.upper()) | 8 | 2023-11-14 10:06:46+00:00 | 12k |
NevermindNilas/TheAnimeScripter | src/segment/train.py | [
{
"identifier": "InSPyReNet",
"path": "src/segment/model/inspyrenet.py",
"snippet": "class InSPyReNet(nn.Module):\n def __init__(\n self,\n backbone,\n in_channels,\n depth=64,\n base_size=(384, 384),\n threshold: Optional[int] = 512,\n **kwargs,\n ... | import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import torch.optim as optim
from pytorch_lightning import Trainer
from .model import ISNetDIS, ISNetGTEncoder, U2NET, U2NET_full2, U2NET_lite2, MODNet \
, InSPyReNet, InSPyReNet_Res2Net50, InSPyReNet_SwinB | 7,792 |
# warnings.filterwarnings("ignore")
net_names = ["isnet_is", "isnet", "isnet_gt", "u2net", "u2netl", "modnet", "inspyrnet_res", "inspyrnet_swin"]
def get_net(net_name, img_size):
if net_name == "isnet":
return ISNetDIS()
elif net_name == "isnet_is":
return ISNetDIS()
elif net_name == "isnet_gt":
return ISNetGTEncoder()
elif net_name == "u2net":
|
# warnings.filterwarnings("ignore")
net_names = ["isnet_is", "isnet", "isnet_gt", "u2net", "u2netl", "modnet", "inspyrnet_res", "inspyrnet_swin"]
def get_net(net_name, img_size):
if net_name == "isnet":
return ISNetDIS()
elif net_name == "isnet_is":
return ISNetDIS()
elif net_name == "isnet_gt":
return ISNetGTEncoder()
elif net_name == "u2net": | return U2NET_full2() | 7 | 2023-11-14 22:10:11+00:00 | 12k |
chuzhumin98/LLM_Eval | PRE/process.py | [
{
"identifier": "DataLoader",
"path": "PRE/data.py",
"snippet": "class DataLoader:\n '''\n The loader to load for evaluated task, with given prompt template to generate a series of prompts feeding for each LLM\n '''\n def __init__(self, args):\n self.path_data = args['path_data'] # th... | import os
import yaml
import json, csv
import copy
import sys
from PRE.data import DataLoader
from PRE.api import Auto_API
from PRE.exam import EXAM
from PRE.eval import PRE | 7,721 | '''
The procedure of the whole peer review framework
'''
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_dir)
class Process:
'''
The control of the whole peer review process
'''
@staticmethod
def run(args): # the API used for automatic evaluation
Process.collect_task_response(args)
qualified_apis, scores_qualified = Process.conduct_qualified_exam(args)
args['config_evaluators'] = qualified_apis
args['scores_evaluators'] = scores_qualified
# print(scores_qualified)
Process.peer_review_and_evaluate(args)
return None
@staticmethod
def collect_task_response(args):
path_config_api_evaluatee = args['config_api_evaluatee']
path_config_task_data = args['config_task_data']
task_name = args['task_name']
save_dir = args['save_dir'] # the task result save dir, the task save filename = [save_dir] / task_responses / [task_name]_[model_name].json, each line is one result with json {response: str}
os.makedirs(os.path.join(save_dir, "task_responses"), exist_ok=True)
if not os.path.exists(path_config_api_evaluatee):
raise FileExistsError("Load api_evaluatee config failed: file not exist!")
if not os.path.exists(path_config_task_data):
raise FileExistsError("Load task_data config failed: file not exist!")
config_apis = yaml.load_all(open(path_config_api_evaluatee, 'r'), Loader=yaml.FullLoader) # series of APIs
config_task = yaml.load(open(path_config_task_data, 'r'), Loader=yaml.FullLoader) # single task config
process_num = args['process_num'] # multi-process or not
data_loader = DataLoader(config_task) # a task data loader
| '''
The procedure of the whole peer review framework
'''
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_dir)
class Process:
'''
The control of the whole peer review process
'''
@staticmethod
def run(args): # the API used for automatic evaluation
Process.collect_task_response(args)
qualified_apis, scores_qualified = Process.conduct_qualified_exam(args)
args['config_evaluators'] = qualified_apis
args['scores_evaluators'] = scores_qualified
# print(scores_qualified)
Process.peer_review_and_evaluate(args)
return None
@staticmethod
def collect_task_response(args):
path_config_api_evaluatee = args['config_api_evaluatee']
path_config_task_data = args['config_task_data']
task_name = args['task_name']
save_dir = args['save_dir'] # the task result save dir, the task save filename = [save_dir] / task_responses / [task_name]_[model_name].json, each line is one result with json {response: str}
os.makedirs(os.path.join(save_dir, "task_responses"), exist_ok=True)
if not os.path.exists(path_config_api_evaluatee):
raise FileExistsError("Load api_evaluatee config failed: file not exist!")
if not os.path.exists(path_config_task_data):
raise FileExistsError("Load task_data config failed: file not exist!")
config_apis = yaml.load_all(open(path_config_api_evaluatee, 'r'), Loader=yaml.FullLoader) # series of APIs
config_task = yaml.load(open(path_config_task_data, 'r'), Loader=yaml.FullLoader) # single task config
process_num = args['process_num'] # multi-process or not
data_loader = DataLoader(config_task) # a task data loader | apis = [Auto_API.instantiate_api(config_api['api_type'], config_api) for config_api in config_apis] # store for all valid apis | 1 | 2023-11-16 18:40:23+00:00 | 12k |
believethehype/nostrdvm | nostr_dvm/dvm.py | [
{
"identifier": "EventDefinitions",
"path": "nostr_dvm/utils/definitions.py",
"snippet": "class EventDefinitions:\n KIND_DM = 4\n KIND_ZAP = 9735\n KIND_ANNOUNCEMENT = 31990\n KIND_NIP94_METADATA = 1063\n KIND_FEEDBACK = 7000\n KIND_NIP90_EXTRACT_TEXT = 5000\n KIND_NIP90_RESULT_EXTR... | import json
import os
import subprocess
import time
from datetime import timedelta
from sys import platform
from nostr_sdk import PublicKey, Keys, Client, Tag, Event, EventBuilder, Filter, HandleNotification, Timestamp, \
init_logger, LogLevel, Options, nip04_encrypt, ClientSigner
from nostr_dvm.utils.definitions import EventDefinitions, RequiredJobToWatch, JobToWatch
from nostr_dvm.utils.dvmconfig import DVMConfig
from nostr_dvm.utils.admin_utils import admin_make_database_updates, AdminConfig
from nostr_dvm.utils.backend_utils import get_amount_per_task, check_task_is_supported, get_task
from nostr_dvm.utils.database_utils import create_sql_table, get_or_add_user, update_user_balance, update_sql_table
from nostr_dvm.utils.mediasource_utils import input_data_file_duration
from nostr_dvm.utils.nostr_utils import get_event_by_id, get_referenced_event_by_id, send_event, check_and_decrypt_tags
from nostr_dvm.utils.output_utils import build_status_reaction
from nostr_dvm.utils.zap_utils import check_bolt11_ln_bits_is_paid, create_bolt11_ln_bits, parse_zap_event_tags, \
parse_amount_from_bolt11_invoice, zaprequest, pay_bolt11_ln_bits, create_bolt11_lud16
from nostr_dvm.utils.cashu_utils import redeem_cashu | 10,396 |
use_logger = False
if use_logger:
init_logger(LogLevel.DEBUG)
class DVM:
dvm_config: DVMConfig
admin_config: AdminConfig
keys: Keys
client: Client
job_list: list
jobs_on_hold_list: list
def __init__(self, dvm_config, admin_config=None):
self.dvm_config = dvm_config
self.admin_config = admin_config
self.keys = Keys.from_sk_str(dvm_config.PRIVATE_KEY)
wait_for_send = True
skip_disconnected_relays = True
opts = (Options().wait_for_send(wait_for_send).send_timeout(timedelta(seconds=self.dvm_config.RELAY_TIMEOUT))
.skip_disconnected_relays(skip_disconnected_relays))
signer = ClientSigner.keys(self.keys)
self.client = Client.with_opts(signer,opts)
self.job_list = []
self.jobs_on_hold_list = []
pk = self.keys.public_key()
print("Nostr DVM public key: " + str(pk.to_bech32()) + " Hex: " + str(pk.to_hex()) + " Supported DVM tasks: " +
', '.join(p.NAME + ":" + p.TASK for p in self.dvm_config.SUPPORTED_DVMS) + "\n")
for relay in self.dvm_config.RELAY_LIST:
self.client.add_relay(relay)
self.client.connect()
zap_filter = Filter().pubkey(pk).kinds([EventDefinitions.KIND_ZAP]).since(Timestamp.now())
kinds = [EventDefinitions.KIND_NIP90_GENERIC]
for dvm in self.dvm_config.SUPPORTED_DVMS:
if dvm.KIND not in kinds:
kinds.append(dvm.KIND)
dvm_filter = (Filter().kinds(kinds).since(Timestamp.now()))
self.client.subscribe([dvm_filter, zap_filter])
create_sql_table(self.dvm_config.DB)
admin_make_database_updates(adminconfig=self.admin_config, dvmconfig=self.dvm_config, client=self.client)
class NotificationHandler(HandleNotification):
client = self.client
dvm_config = self.dvm_config
keys = self.keys
def handle(self, relay_url, nostr_event):
if EventDefinitions.KIND_NIP90_EXTRACT_TEXT <= nostr_event.kind() <= EventDefinitions.KIND_NIP90_GENERIC:
handle_nip90_job_event(nostr_event)
elif nostr_event.kind() == EventDefinitions.KIND_ZAP:
handle_zap(nostr_event)
def handle_msg(self, relay_url, msg):
return
def handle_nip90_job_event(nip90_event):
nip90_event = check_and_decrypt_tags(nip90_event, self.dvm_config)
if nip90_event is None:
return
user = get_or_add_user(self.dvm_config.DB, nip90_event.pubkey().to_hex(), client=self.client,
config=self.dvm_config)
cashu = ""
p_tag_str = ""
for tag in nip90_event.tags():
if tag.as_vec()[0] == "cashu":
cashu = tag.as_vec()[1]
elif tag.as_vec()[0] == "p":
p_tag_str = tag.as_vec()[1]
|
use_logger = False
if use_logger:
init_logger(LogLevel.DEBUG)
class DVM:
dvm_config: DVMConfig
admin_config: AdminConfig
keys: Keys
client: Client
job_list: list
jobs_on_hold_list: list
def __init__(self, dvm_config, admin_config=None):
self.dvm_config = dvm_config
self.admin_config = admin_config
self.keys = Keys.from_sk_str(dvm_config.PRIVATE_KEY)
wait_for_send = True
skip_disconnected_relays = True
opts = (Options().wait_for_send(wait_for_send).send_timeout(timedelta(seconds=self.dvm_config.RELAY_TIMEOUT))
.skip_disconnected_relays(skip_disconnected_relays))
signer = ClientSigner.keys(self.keys)
self.client = Client.with_opts(signer,opts)
self.job_list = []
self.jobs_on_hold_list = []
pk = self.keys.public_key()
print("Nostr DVM public key: " + str(pk.to_bech32()) + " Hex: " + str(pk.to_hex()) + " Supported DVM tasks: " +
', '.join(p.NAME + ":" + p.TASK for p in self.dvm_config.SUPPORTED_DVMS) + "\n")
for relay in self.dvm_config.RELAY_LIST:
self.client.add_relay(relay)
self.client.connect()
zap_filter = Filter().pubkey(pk).kinds([EventDefinitions.KIND_ZAP]).since(Timestamp.now())
kinds = [EventDefinitions.KIND_NIP90_GENERIC]
for dvm in self.dvm_config.SUPPORTED_DVMS:
if dvm.KIND not in kinds:
kinds.append(dvm.KIND)
dvm_filter = (Filter().kinds(kinds).since(Timestamp.now()))
self.client.subscribe([dvm_filter, zap_filter])
create_sql_table(self.dvm_config.DB)
admin_make_database_updates(adminconfig=self.admin_config, dvmconfig=self.dvm_config, client=self.client)
class NotificationHandler(HandleNotification):
client = self.client
dvm_config = self.dvm_config
keys = self.keys
def handle(self, relay_url, nostr_event):
if EventDefinitions.KIND_NIP90_EXTRACT_TEXT <= nostr_event.kind() <= EventDefinitions.KIND_NIP90_GENERIC:
handle_nip90_job_event(nostr_event)
elif nostr_event.kind() == EventDefinitions.KIND_ZAP:
handle_zap(nostr_event)
def handle_msg(self, relay_url, msg):
return
def handle_nip90_job_event(nip90_event):
nip90_event = check_and_decrypt_tags(nip90_event, self.dvm_config)
if nip90_event is None:
return
user = get_or_add_user(self.dvm_config.DB, nip90_event.pubkey().to_hex(), client=self.client,
config=self.dvm_config)
cashu = ""
p_tag_str = ""
for tag in nip90_event.tags():
if tag.as_vec()[0] == "cashu":
cashu = tag.as_vec()[1]
elif tag.as_vec()[0] == "p":
p_tag_str = tag.as_vec()[1]
| task_supported, task = check_task_is_supported(nip90_event, client=self.client, | 7 | 2023-11-17 18:32:56+00:00 | 12k |
embrake/Aquilify | aquilify/wrappers/request.py | [
{
"identifier": "AwaitableOrContextManager",
"path": "aquilify/utils/_utils.py",
"snippet": "class AwaitableOrContextManager(Protocol[T_co]):\n def __await__(self) -> typing.Generator[typing.Any, None, T_co]:\n ... # pragma: no cover\n\n async def __aenter__(self) -> T_co:\n ... # ... | import json
import typing
import anyio
from http import cookies as http_cookies
from urllib.parse import parse_qs
from ..utils._utils import AwaitableOrContextManager, AwaitableOrContextManagerWrapper
from ..datastructure.core import URL, Address, FormData, Headers, State
from ..exception.http_exception import HTTPException
from ..datastructure.formparser import FormParser, MultiPartException, MultiPartParser
from ..datastructure.user_agent import UserAgentParser
from ..types import Message, Receive, Scope, Send
from multipart.multipart import parse_options_header | 7,281 |
try:
except ModuleNotFoundError:
parse_options_header = None
SERVER_PUSH_HEADERS_TO_COPY = {
"accept",
"accept-encoding",
"accept-language",
"cache-control",
"user-agent",
}
def cookie_parser(cookie_string: str) -> typing.Dict[str, str]:
cookie_dict: typing.Dict[str, str] = {}
for chunk in cookie_string.split(";"):
if "=" in chunk:
key, val = chunk.split("=", 1)
else:
key, val = "", chunk
key, val = key.strip(), val.strip()
if key or val:
cookie_dict[key] = http_cookies._unquote(val)
return cookie_dict
class ClientDisconnect(Exception):
pass
class HTTPConnection(typing.Mapping[str, typing.Any]):
def __init__(self, scope: Scope, receive: typing.Optional[Receive] = None) -> None:
assert scope["type"] in ("http", "websocket")
self.scope = scope
def __getitem__(self, key: str) -> typing.Any:
return self.scope[key]
def __iter__(self) -> typing.Iterator[str]:
return iter(self.scope)
def __len__(self) -> int:
return len(self.scope)
__eq__ = object.__eq__
__hash__ = object.__hash__
@property
def app(self) -> typing.Any:
return self.scope["app"]
@property
|
try:
except ModuleNotFoundError:
parse_options_header = None
SERVER_PUSH_HEADERS_TO_COPY = {
"accept",
"accept-encoding",
"accept-language",
"cache-control",
"user-agent",
}
def cookie_parser(cookie_string: str) -> typing.Dict[str, str]:
cookie_dict: typing.Dict[str, str] = {}
for chunk in cookie_string.split(";"):
if "=" in chunk:
key, val = chunk.split("=", 1)
else:
key, val = "", chunk
key, val = key.strip(), val.strip()
if key or val:
cookie_dict[key] = http_cookies._unquote(val)
return cookie_dict
class ClientDisconnect(Exception):
pass
class HTTPConnection(typing.Mapping[str, typing.Any]):
def __init__(self, scope: Scope, receive: typing.Optional[Receive] = None) -> None:
assert scope["type"] in ("http", "websocket")
self.scope = scope
def __getitem__(self, key: str) -> typing.Any:
return self.scope[key]
def __iter__(self) -> typing.Iterator[str]:
return iter(self.scope)
def __len__(self) -> int:
return len(self.scope)
__eq__ = object.__eq__
__hash__ = object.__hash__
@property
def app(self) -> typing.Any:
return self.scope["app"]
@property | def url(self) -> URL: | 2 | 2023-11-16 08:26:02+00:00 | 12k |
IBM/oper8 | oper8/x/utils/deps_annotation.py | [
{
"identifier": "DEPS_ANNOTATION",
"path": "oper8/x/utils/constants.py",
"snippet": "DEPS_ANNOTATION = \"oper8.org/dependency-hash\""
},
{
"identifier": "Component",
"path": "oper8/component.py",
"snippet": "class Component(Node, abc.ABC):\n \"\"\"\n This file defines the top-level... | from typing import List, Tuple, Union
from .constants import DEPS_ANNOTATION
from oper8 import Component, Session
from oper8.utils import merge_configs
import hashlib
import json
import alog | 9,053 | """
This module holds shared functionality for adding dependency annotations to all
resources that need them.
A dependency annotation on a Pod encodes a unique hash of the set of
data-resources that the Pod depends on. For example, if a Pod mounds a Secret
and a ConfigMap, the dependency annotation will hold a unique hash of the data
content of these secrets. The role of the dependency annotation is to force a
rollover when upstream data-resources change their content so that the content
is guaranteed to be picked up by the consuming Pod.
"""
# Standard
# First Party
# Local
log = alog.use_channel("DEPS")
## Common Functions ############################################################
@alog.logged_function(log.debug)
def add_deps_annotation(
component: Component,
session: Session,
resource_definition: dict,
) -> dict:
"""Add the dependency hash annotation to any pods found in the given object
Args:
component: Component
The component that this resource belongs to
session: Session
The session for this deploy
resource_definition: dict
The dict representation of the resource to modify
Returns:
resource_definition: dict
The dict representation of the resource with any modifications
applied
"""
resource_name = "{}/{}".format(
resource_definition.get("kind"),
resource_definition.get("metadata", {}).get("name"),
)
# Look for any/all pod annotations
pod = _find_pod(resource_definition)
if pod is not None:
log.debug2("Found Pod for [%s]", resource_name)
log.debug4(pod)
# Traverse through and look for anything that looks like a secret or
# configmap reference
deps_map = _find_pod_data_deps(pod)
log.debug3("Deps Map: %s", deps_map)
if deps_map:
# Go through each dependency and determine if it needs to be fetched
# of if it's part of the owning component
deps_list = []
for dep_kind, dep_names in deps_map.items():
for dep_name in dep_names:
# Look for this object in the objects managed by this
# component.
#
# NOTE: This will only be the components which have been
# declared earlier in the chart or have explicitly been
# marked as upstreams of this object.
found_in_component = False
for obj in component.managed_objects:
log.debug4("Checking %s/%s", obj.kind, obj.name)
if obj.kind == dep_kind and obj.name == dep_name:
log.debug3(
"Found intra-chart dependency of %s: %s",
resource_name,
obj,
)
deps_list.append(obj.definition)
found_in_component = True
break
# If not found in the component, add it as a lookup
if not found_in_component:
log.debug3(
"Found extra-chart dependency of %s: %s/%s",
resource_name,
dep_kind,
dep_name,
)
deps_list.append((dep_kind, dep_name))
# Add the annotation with the full list
md = pod.setdefault("metadata", {})
annos = md.setdefault("annotations", {})
| """
This module holds shared functionality for adding dependency annotations to all
resources that need them.
A dependency annotation on a Pod encodes a unique hash of the set of
data-resources that the Pod depends on. For example, if a Pod mounds a Secret
and a ConfigMap, the dependency annotation will hold a unique hash of the data
content of these secrets. The role of the dependency annotation is to force a
rollover when upstream data-resources change their content so that the content
is guaranteed to be picked up by the consuming Pod.
"""
# Standard
# First Party
# Local
log = alog.use_channel("DEPS")
## Common Functions ############################################################
@alog.logged_function(log.debug)
def add_deps_annotation(
component: Component,
session: Session,
resource_definition: dict,
) -> dict:
"""Add the dependency hash annotation to any pods found in the given object
Args:
component: Component
The component that this resource belongs to
session: Session
The session for this deploy
resource_definition: dict
The dict representation of the resource to modify
Returns:
resource_definition: dict
The dict representation of the resource with any modifications
applied
"""
resource_name = "{}/{}".format(
resource_definition.get("kind"),
resource_definition.get("metadata", {}).get("name"),
)
# Look for any/all pod annotations
pod = _find_pod(resource_definition)
if pod is not None:
log.debug2("Found Pod for [%s]", resource_name)
log.debug4(pod)
# Traverse through and look for anything that looks like a secret or
# configmap reference
deps_map = _find_pod_data_deps(pod)
log.debug3("Deps Map: %s", deps_map)
if deps_map:
# Go through each dependency and determine if it needs to be fetched
# of if it's part of the owning component
deps_list = []
for dep_kind, dep_names in deps_map.items():
for dep_name in dep_names:
# Look for this object in the objects managed by this
# component.
#
# NOTE: This will only be the components which have been
# declared earlier in the chart or have explicitly been
# marked as upstreams of this object.
found_in_component = False
for obj in component.managed_objects:
log.debug4("Checking %s/%s", obj.kind, obj.name)
if obj.kind == dep_kind and obj.name == dep_name:
log.debug3(
"Found intra-chart dependency of %s: %s",
resource_name,
obj,
)
deps_list.append(obj.definition)
found_in_component = True
break
# If not found in the component, add it as a lookup
if not found_in_component:
log.debug3(
"Found extra-chart dependency of %s: %s/%s",
resource_name,
dep_kind,
dep_name,
)
deps_list.append((dep_kind, dep_name))
# Add the annotation with the full list
md = pod.setdefault("metadata", {})
annos = md.setdefault("annotations", {}) | md["annotations"] = merge_configs( | 3 | 2023-11-15 16:43:29+00:00 | 12k |
smrfeld/tsmixer-pytorch | main.py | [
{
"identifier": "plot_preds",
"path": "utils/plotting.py",
"snippet": "def plot_preds(preds: List[List[float]], preds_gt: List[List[float]], no_feats_plot: int, fname_save: Optional[str] = None, inputs: Optional[List[List[float]]] = None, show: bool = True):\n \"\"\"Plot predictions\n\n Args:\n ... | from utils import TSMixer, plot_preds, plot_loss, TSMixerConf, TSMixerGridSearch
import argparse
import yaml
import os | 7,246 |
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--command", type=str, required=True, choices=["train", "predict", "loss", "grid-search"], help="Command to run")
parser.add_argument("--conf", type=str, required=False, help="Path to the configuration file")
parser.add_argument("--no-feats-plot", type=int, required=False, default=6, help="Number of features to plot")
parser.add_argument("--show", action="store_true", required=False, help="Show plots")
args = parser.parse_args()
if args.command == "train":
# Load configuration
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
tsmixer = TSMixer(conf)
# Train
tsmixer.train()
elif args.command == "predict":
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
# Load best checkpoint
conf.initialize = TSMixerConf.Initialize.FROM_BEST_CHECKPOINT
tsmixer = TSMixer(conf)
# Predict on validation dataset
data = tsmixer.predict_val_dataset(max_samples=10, save_inputs=False)
# Plot predictions
data_plt = data[0]
assert args.no_feats_plot is not None, "Must provide number of features to plot"
plot_preds(
preds=data_plt.pred,
preds_gt=data_plt.pred_gt,
no_feats_plot=args.no_feats_plot,
show=args.show,
fname_save=os.path.join(conf.image_dir, "preds.png")
)
elif args.command == "loss":
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
train_data = conf.load_training_metadata_or_new()
|
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--command", type=str, required=True, choices=["train", "predict", "loss", "grid-search"], help="Command to run")
parser.add_argument("--conf", type=str, required=False, help="Path to the configuration file")
parser.add_argument("--no-feats-plot", type=int, required=False, default=6, help="Number of features to plot")
parser.add_argument("--show", action="store_true", required=False, help="Show plots")
args = parser.parse_args()
if args.command == "train":
# Load configuration
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
tsmixer = TSMixer(conf)
# Train
tsmixer.train()
elif args.command == "predict":
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
# Load best checkpoint
conf.initialize = TSMixerConf.Initialize.FROM_BEST_CHECKPOINT
tsmixer = TSMixer(conf)
# Predict on validation dataset
data = tsmixer.predict_val_dataset(max_samples=10, save_inputs=False)
# Plot predictions
data_plt = data[0]
assert args.no_feats_plot is not None, "Must provide number of features to plot"
plot_preds(
preds=data_plt.pred,
preds_gt=data_plt.pred_gt,
no_feats_plot=args.no_feats_plot,
show=args.show,
fname_save=os.path.join(conf.image_dir, "preds.png")
)
elif args.command == "loss":
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
train_data = conf.load_training_metadata_or_new() | plot_loss( | 1 | 2023-11-18 19:56:18+00:00 | 12k |
Jisencc/yolov5_dual_weighting | utils/dataloaders.py | [
{
"identifier": "Albumentations",
"path": "utils/augmentations.py",
"snippet": "class Albumentations:\n # YOLOv5 Albumentations class (optional, only used if package is installed)\n def __init__(self, size=640):\n self.transform = None\n prefix = colorstr('albumentations: ')\n ... | import contextlib
import glob
import hashlib
import json
import math
import os
import random
import shutil
import time
import numpy as np
import psutil
import torch
import torch.nn.functional as F
import torchvision
import yaml
import mss
import pafy
from itertools import repeat
from multiprocessing.pool import Pool, ThreadPool
from pathlib import Path
from threading import Thread
from urllib.parse import urlparse
from PIL import ExifTags, Image, ImageOps
from torch.utils.data import DataLoader, Dataset, dataloader, distributed
from tqdm import tqdm
from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste,
letterbox, mixup, random_perspective)
from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, check_dataset, check_requirements,
check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy,
xywh2xyxy, xywhn2xyxy, xyxy2xywhn)
from utils.torch_utils import torch_distributed_zero_first | 8,344 | im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
im = np.ascontiguousarray(im) # contiguous
self.frame += 1
return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s
class LoadImages:
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line
path = Path(path).read_text().rsplit()
files = []
for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:
p = str(Path(p).resolve())
if '*' in p:
files.extend(sorted(glob.glob(p, recursive=True))) # glob
elif os.path.isdir(p):
files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir
elif os.path.isfile(p):
files.append(p) # files
else:
raise FileNotFoundError(f'{p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
self.transforms = transforms # optional
self.vid_stride = vid_stride # video frame-rate stride
if any(videos):
self._new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
for _ in range(self.vid_stride):
self.cap.grab()
ret_val, im0 = self.cap.retrieve()
while not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
path = self.files[self.count]
self._new_video(path)
ret_val, im0 = self.cap.read()
self.frame += 1
# im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False
s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '
else:
# Read image
self.count += 1
im0 = cv2.imread(path) # BGR
assert im0 is not None, f'Image Not Found {path}'
s = f'image {self.count}/{self.nf} {path}: '
if self.transforms:
im = self.transforms(im0) # transforms
else:
im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
im = np.ascontiguousarray(im) # contiguous
return path, im, im0, self.cap, s
def _new_video(self, path):
# Create a new video capture object
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)
self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees
# self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493
def _cv2_rotate(self, im):
# Rotate a cv2 video manually
if self.orientation == 0:
return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)
elif self.orientation == 180:
return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE)
elif self.orientation == 90:
return cv2.rotate(im, cv2.ROTATE_180)
return im
def __len__(self):
return self.nf # number of files
class LoadStreams:
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
torch.backends.cudnn.benchmark = True # faster for fixed-size inference
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
self.vid_stride = vid_stride # video frame-rate stride
sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]
n = len(sources)
| # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
"""
Dataloaders and dataset utils
"""
# Parameters
HELP_URL = 'See https://docs.ultralytics.com/yolov5/tutorials/train_custom_data'
IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes
VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
RANK = int(os.getenv('RANK', -1))
PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(paths):
# Returns a single hash value of a list of paths (files or dirs)
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.sha256(str(size).encode()) # hash sizes
h.update(''.join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
with contextlib.suppress(Exception):
rotation = dict(img._getexif().items())[orientation]
if rotation in [6, 8]: # rotation 270 or 90
s = (s[1], s[0])
return s
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info['exif'] = exif.tobytes()
return image
def seed_worker(worker_id):
# Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
def create_dataloader(path,
imgsz,
batch_size,
stride,
single_cls=False,
hyp=None,
augment=False,
cache=False,
pad=0.0,
rect=False,
rank=-1,
workers=8,
image_weights=False,
quad=False,
prefix='',
shuffle=False,
seed=0):
if rect and shuffle:
LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False')
shuffle = False
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
dataset = LoadImagesAndLabels(
path,
imgsz,
batch_size,
augment=augment, # augmentation
hyp=hyp, # hyperparameters
rect=rect, # rectangular batches
cache_images=cache,
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nd = torch.cuda.device_count() # number of CUDA devices
nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates
generator = torch.Generator()
generator.manual_seed(6148914691236517205 + seed + RANK)
return loader(dataset,
batch_size=batch_size,
shuffle=shuffle and sampler is None,
num_workers=nw,
sampler=sampler,
pin_memory=PIN_MEMORY,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn,
worker_init_fn=seed_worker,
generator=generator), dataset
class InfiniteDataLoader(dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for _ in range(len(self)):
yield next(self.iterator)
class _RepeatSampler:
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadScreenshots:
# YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"`
def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None):
# source = [screen_number left top width height] (pixels)
check_requirements('mss')
source, *params = source.split()
self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0
if len(params) == 1:
self.screen = int(params[0])
elif len(params) == 4:
left, top, width, height = (int(x) for x in params)
elif len(params) == 5:
self.screen, left, top, width, height = (int(x) for x in params)
self.img_size = img_size
self.stride = stride
self.transforms = transforms
self.auto = auto
self.mode = 'stream'
self.frame = 0
self.sct = mss.mss()
# Parse monitor shape
monitor = self.sct.monitors[self.screen]
self.top = monitor['top'] if top is None else (monitor['top'] + top)
self.left = monitor['left'] if left is None else (monitor['left'] + left)
self.width = width or monitor['width']
self.height = height or monitor['height']
self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height}
def __iter__(self):
return self
def __next__(self):
# mss screen capture: get raw pixels from the screen as np array
im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR
s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: '
if self.transforms:
im = self.transforms(im0) # transforms
else:
im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
im = np.ascontiguousarray(im) # contiguous
self.frame += 1
return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s
class LoadImages:
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line
path = Path(path).read_text().rsplit()
files = []
for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:
p = str(Path(p).resolve())
if '*' in p:
files.extend(sorted(glob.glob(p, recursive=True))) # glob
elif os.path.isdir(p):
files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir
elif os.path.isfile(p):
files.append(p) # files
else:
raise FileNotFoundError(f'{p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
self.transforms = transforms # optional
self.vid_stride = vid_stride # video frame-rate stride
if any(videos):
self._new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
for _ in range(self.vid_stride):
self.cap.grab()
ret_val, im0 = self.cap.retrieve()
while not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
path = self.files[self.count]
self._new_video(path)
ret_val, im0 = self.cap.read()
self.frame += 1
# im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False
s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '
else:
# Read image
self.count += 1
im0 = cv2.imread(path) # BGR
assert im0 is not None, f'Image Not Found {path}'
s = f'image {self.count}/{self.nf} {path}: '
if self.transforms:
im = self.transforms(im0) # transforms
else:
im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
im = np.ascontiguousarray(im) # contiguous
return path, im, im0, self.cap, s
def _new_video(self, path):
# Create a new video capture object
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)
self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees
# self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493
def _cv2_rotate(self, im):
# Rotate a cv2 video manually
if self.orientation == 0:
return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)
elif self.orientation == 180:
return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE)
elif self.orientation == 90:
return cv2.rotate(im, cv2.ROTATE_180)
return im
def __len__(self):
return self.nf # number of files
class LoadStreams:
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
torch.backends.cudnn.benchmark = True # faster for fixed-size inference
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
self.vid_stride = vid_stride # video frame-rate stride
sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]
n = len(sources) | self.sources = [clean_str(x) for x in sources] # clean source names for later | 8 | 2023-11-12 13:28:26+00:00 | 12k |
BSoD123456/ffta_us_cn | ffta_modifier.py | [
{
"identifier": "load_rom",
"path": "ffta_sect.py",
"snippet": "def report(*args):\ndef readval_le(raw, offset, size, signed):\ndef writeval_le(val, dst, offset, size):\ndef rvs_endian(src, size, dst_signed):\n def __init__(self, raw, offset):\n def raw(self):\n def mod(self):\n def buf_offs... | import json, re
import os, os.path, shutil
import pdb
from ffta_sect import load_rom
from ffta_charset import c_ffta_charset_ocr, c_ffta_charset_dynamic
from ffta_font_generator import make_ffta_font_gen
from ffta_parser import make_script_parser, collect_text_cmds
from hexdump import hexdump as hd
from pprint import pprint | 7,522 | assert vd > vs
return sl
sl += 1
def _next_sinfo(self, si, sinfo):
itr, idxp, (vidxp, val) = sinfo
try:
nvidxp, nval = next(itr)
except StopIteration:
infi = (INF,)
sinfo[1] = infi
sinfo[2] = (infi, None)
return
sinfo[2] = (nvidxp, nval)
if si >= len(self.trmpg):
sinfo[1] = nvidxp
return
tpgs = self.trmpg[si]
cpg = None
for i in range(len(nvidxp), -1, -1):
pg = nvidxp[:i]
if pg in tpgs:
cpg = pg
break
if cpg is None:
sinfo[1] = nvidxp
return
sl = self._sublen_idx(nvidxp, vidxp)
if sl < len(cpg):
sinfo[1] = cpg
return
ridxp = []
for i in range(len(nvidxp)):
v = self._getidxv(idxp, i)
if i > sl:
v = 0
elif i == sl:
v += 1
ridxp.append(v)
sinfo[1] = tuple(ridxp)
def _next(self):
mnidxp = None
cidxps = []
for si, (itr, idxp, _) in enumerate(self.stats):
cidxp = self._calc_cidx(idxp, si)
cidxps.append(cidxp)
if mnidxp is None or self._cmp_idx(cidxp, mnidxp) < 0:
mnidxp = cidxp
if mnidxp and mnidxp[0] == INF:
return None, True
rs = []
for si, (sinfo, cidxp) in enumerate(zip(self.stats, cidxps)):
itr, idxp, (vidxp, val) = sinfo
if self._cmp_idx(cidxp, mnidxp) == 0:
rs.append((vidxp, val))
self._next_sinfo(si, sinfo)
else:
rs.append((vidxp, None))
return rs, False
def iter(self):
self.reset()
while True:
rs, is_done = self._next()
if is_done:
return
yield tuple(rs)
class c_ffta_modifier:
def __init__(self, conf):
self.conf = conf
def load(self):
self.srom = {}
self.chst = {}
for nm, rconf in self.conf['roms']['src'].items():
rom, chst = self.load_rom(rconf)
self.srom[nm] = rom
self.chst[nm] = chst
self.fntgen, self.chst['font'] = self.load_font()
self.txts = self.load_texts()
def export(self):
rmk = None
sben = self.conf.get('sandbox', {}).get('enable', False)
sbon = self.conf.get('sandbox', {}).get('only', False)
if not sben or not sbon:
rmk = self.export_rom(self.conf['roms']['dst']['rels'])
if sben:
sbrmk = self.export_rom(self.conf['roms']['dst']['sndbx'], as_sndbx = True)
if rmk is None:
rmk = sbrmk
return rmk
def export_rom(self, rom_conf, *args, **kargs):
rmk = self.repack(*args, **kargs)
if not rmk:
report('warning', f'something wrong while repacking')
return
self.save_rom(rom_conf['path'], rmk)
return rmk
def load_rom(self, rom_conf):
lfunc = load_rom[rom_conf['type']]
rom = lfunc(rom_conf['path'])
if 'charset' in rom_conf:
if 'charset_ocr' in rom_conf and rom_conf['charset_ocr']:
chstrom = rom
else:
chstrom = None
chst = c_ffta_charset_ocr(rom_conf['charset'], chstrom)
chst.load()
else:
chst = None
return rom, chst
def load_font(self):
conf = self.conf['font']
| #! python3
# coding: utf-8
CONF = {
'roms': {
'src': {
'base': {
'path': r'roms\fftaus.gba',
'type': 'us',
'charset': 'charset_us.json',
},
'text': {
'path': r'roms\fftacnfx.gba',
'type': 'cn',
'charset': 'charset_cn.json',
'charset_ocr': True,
},
},
'dst': {
'rels': {
'path': r'roms\fftauscn.gba',
},
'sndbx': {
'path': r'roms\fftauscn_sndbx.gba',
},
},
},
'work': {
'text': {
'raw': {
# comparison
'comp': 'raw_txt_comp_wk.json',
# uncovered
'uncv': 'raw_txt_uncv_wk.json',
},
'src': {
# base rom
'base': 'src_txt_base_wk.json',
# text rom
'text': 'src_txt_text_wk.json',
},
'mod': {
# translate
'trans': 'trans_txt.json',
},
'fix': {
# fix cn text
'fcomp': 'trans_fix_txt.json',
},
},
},
'font': {
# 精品点阵体9×9 BoutiqueBitmap9x9
# from https://github.com/scott0107000/BoutiqueBitmap9x9
'src': 'font/BoutiqueBitmap9x9_1.7.ttf',
'size': 10,
'offset': (0, 1),
'charset': 'charset_uscn_wk.json',
'charset_nosave': True,
# only hanzi
'dybase': 0x122,
},
'text': {
'skip': {
'@[40]@[42]',
'@[42]',
'dummy@[40]@[42]',
'dummy@[42]',
'dummy',
'Dummy',
},
'skipf': [],
'skipf_defer': [],
'modf': [],
'align': {
's_text': [
((36,), (35,)),
((60,), (60,)),
],
'pages:battle': [
((54,), (51,)),
],
'pages:quest/': [
((1, 0), (0, 200)),
],
'fx_text': [
((8, 60), (8, 58)),
((8, 61), (8, 60)),
((8, 62), (8, 62)),
((25,), (24,)),
],
'words:refer': [
((107,), (104,)),
],
'words:rumor': [
((62,), (61,)),
((63,), (63,)),
],
'words:battle': [
((179,), (176,)),
((543,), (531,)),
],
},
'trim': {
's_text': [{
(61,),
}, {
(61,),
}],
},
},
'sandbox': {
'enable': False,
'only': True,
'scene': {
'boot': 2,
},
'script': {
'__arg_scene_text_start': None,
'scene': lambda txts: (lambda c, f: {
2: {
0: [
#0x19, 0xdc, 0xa,
*c('''
<C: 05>
<48: 00>
<1F: 0F 43 00 20 00>
<29: 0F 00>
<27: 0F 07 00 00>
#<1F: 11 87 02 20 00>
#<24: 11>
#<2E: 11 00 00 08 00 00 00>
#<1F: 10 87 02 20 00>
#<24: 10>
#<2E: 10 00 00 08 00 00 00>
<4B: 14>
<4D: 64 01 00 01>
<47: 90 00 E8 00 01 00>
<3F: 87 77 00>
<3F: 43 53 01>
<3F: 53 54 02>
<3F: 77 57 03>
<3F: 57 56 04>
<3F: 87 67 05>
<3F: 67 66 06>
<3F: 87 57 07>
<3F: 56 46 08>
<3F: 46 45 09>
<3F: 57 47 0A>
<3F: 47 46 0B>
<3F: 66 65 0C>
'''),
*f['fade'](False),
*f['wait'](60),
0x19, 'start',
*c('''
flip:
<27: 0F 06 00 00>
<02: >
'''),
'start:',
*f['move'](0xf, 5, 4, 3),
*f['face'](0xf, 0),
0x27, 0xF, 0x16, 0, 0,
0x12, 0xaf,
0x71, 0x3, 0x0,
0x19, 'skip2',
*c('''
<1F: 4E 56 00 20 00>
<27: 4E 05 00 00>
'''),
*[
v
for tab in txts
for v in [
*c('''
<27: 0F 06 00 00>
<27: 0F 06 00 00>
'''),
*[
v
for targs in tab
for v in [
0x1, 'flip',
*(
f['text_full'](*targs) if targs[1] == 0xf else
f['text_full'](*targs, chlp=0x63, chld=3)
),
]
],
0x12, 0xb0,
0x71, 0x3, 0x0,
0x19, 'skip',
]
],
'skip:',
0x27, 0x4E, 6, 0, 0,
'skip2:',
*f['move'](0xf, 6, 4, 3),
*f['move'](0xf, 7, 8, 0),
*f['wait'](30),
*f['text_full'](171, 0xf, 0x80, 2),
*f['face'](0xf, 3),
*f['text_full'](172, 0xf, 0x80),
*f['text_full'](173, 0xf, 0x80),
*f['face'](0xf, 0),
*f['fade'](True),
*f['setflag'](0x301),
*f['done'](5),
],
},
})(lambda s: [
int(v, 16) if len(v) <= 2 else v
for r in [rr.split('#')[0].strip() for rr in s.splitlines()] if r
for v in (r[1:-1].replace(':', '').split() if r.startswith('<') else [r])
], {
'wait': lambda frms: [
0x15, frms,
],
'move': lambda ch, x, y, d, spd=1: [
0x3e, ch, x, y, d, 0, spd, 0,
],
'warp': lambda ch, x, y: [
0x2f, ch, x, y,
],
'face': lambda ch, d, spd=1: [
0x25, ch, d, spd,
],
'fade': lambda is_out, frms=60: [
0x6, 0x31, frms, 0x0,
] if is_out else [
0x6, 0x13, frms, 0x64,
],
'load': lambda sc: [
# load scene
0x1c, sc, 0x0,
],
'done': lambda typ=2: [
# safe return
0x17, typ,
],
'setflag': lambda fidx, val=1: [
0x1a, fidx & 0xff, fidx >> 8, val,
],
'text_full': lambda tidx, prt, flg, sc=0, sub=0, chlp=0, chld=0: (
lambda rsub, rtidx: [
*([
# set sc_idx at 0x2002192 = 0x162 + 0x2002030
0x1b, 0x62, 0x1, sc,
] if sc > 0 else []),
*([
# set sub_idx at 0x2003c2a = 0x1bfa + 0x2002030
0x1b, 0xfa, 0x1b, rsub,
] if sub > 0 else []),
*([
# load char at chlp
0x1f, prt, chlp, chld, 0x20, 0x0,
] if prt > 1 and chlp > 0 else []),
*([
0xf, rtidx, prt, flg,
] if sub > 0 else [
0xf, tidx, prt, flg,
]),
*([
# unload char
0x24, prt,
0x22, prt,
] if prt > 1 and chlp > 0 else []),
#*([], prt == 0x17 and breakpoint())[0],
]
)(
tidx // 24 + 1 + 10 * sub, tidx % 24
),
}),
},
'text': {
's_text': {
'1/171': '对了,@[4D]还有一件事。@[40]@[42]',
'1/172': '从这里出去后,@[4D]所有流言都将被解锁。@[4F]@[42]读过第1条流言后,@[4D]新增的20个任务@[4D]会解锁前10个。@[4F]@[42]读过第2条后,@[4D]会解锁后10个。@[40]@[42]',
'1/173': '但是因为任务@[4D]最多只能有15个,@[4D]请存档后分别解锁。@[40]@[42]',
},
'fx_text': {
'1/47': '要偷看书的后面吗?@[4D]@[3210]是/@[ ]否@[ ]@[42]',
'1/48': '要继续吗?@[4D]@[3210]是/@[ ]否@[ ]@[42]',
},
},
'direct': {
'rumor_data': {
(0, 6): {
'flag1': 0x301,
'val1': 0,
'flag2': 0,
'val2': 0,
},
(6, 0x7f): {
'flag1': 0x301,
'val1': 1,
'flag2': 0,
'val2': 0,
},
},
'quest_data': {
(377, 397): {
'flag1': 0x301,
'val1': 1,
'flag2': 0x507,
'val2': 1,
'flag3': 0,
'val3': 0,
'nest': 0,
},
(387, 397): {
'flag2': 0x508,
'val2': 1,
},
381: {
'_uk3': 161,
},
(384, 386): {
'_uk3': 161,
},
387: {
'_uk3': 161,
},
(393, 395): {
'_uk2': 0,
},
},
},
},
}
def chk_has_japanese(txt, *_):
for c in txt:
oc = ord(c)
if (0x3040 < oc < 0x3094 or
0x30a0 < oc < 0x30fb):
return True
return False
def chk_invalid_words(txt, tname, *_):
if tname == 'words:rumor':
return txt.isdigit()
return False
CONF['text']['skipf'].extend([
chk_has_japanese,
chk_invalid_words,
])
def mod_static_refer(bt, tt, tname, bidxp, tidxp, btxts, ttxts):
REF_TOP = 104
if not '@[51' in tt:
return tt
bwt = btxts['words:refer']
twt = ttxts['words:refer']
def _rplc(m):
refv = int(m.group(1), 16)
refi = (refv,)
if refv < REF_TOP:
sv = bwt[refi]
#if not sv.startswith('CRN_'):
return m.group(0)
assert refi in twt
return twt[refi]
return re.sub(r'\@\[51([0-9a-fA-F]{2})\]', _rplc, tt)
CONF['text']['modf'].extend([
mod_static_refer,
])
def codejumper(cd):
labs = {}
r = []
for c in cd:
if isinstance(c, str):
if c.endswith(':'):
labs[c[:-1]] = len(r)
else:
r.append(c)
r.append(None)
else:
r.append(c)
#dirty = False
for i in range(len(r)):
c = r[i]
if not isinstance(c, str):
continue
if not c in labs:
raise ValueError(f'unknown lable: {c}')
assert i < len(r) - 1 and r[i+1] is None
d = labs[c] - i - 2
if d < 0:
d += 0x10000
r[i] = (d & 0xff)
r[i+1] = (d >> 8)
#dirty = True
return r
CONF['sandbox']['script']['__mod_scene'] = codejumper
def report(*args):
r = ' '.join(a for a in args if a)
print(r)
return r
INF = float('inf')
class c_tab_align_iter:
def __init__(self, *tabs, align_map = [], trim_page = []):
self.tabs = tabs
self.amap = self._hndl_amap(align_map)
self.trmpg = trim_page
def _hndl_amap(self, align_map):
add_lsts = []
for amap_itm in align_map:
mxidxp = None
cidxps = []
for i, idxp in enumerate(amap_itm):
while i >= len(add_lsts):
add_lsts.append([])
add_lst = add_lsts[i]
cidxp = idxp
for abas, adst in add_lst:
cidxp, _ = self._add_idx(cidxp, abas, adst)
cidxps.append(cidxp)
if mxidxp is None or self._cmp_idx(cidxp, mxidxp) > 0:
mxidxp = cidxp
for i, cidxp in enumerate(cidxps):
add_lst = add_lsts[i]
if self._cmp_idx(cidxp, mxidxp) == 0:
continue
add_lst.append((cidxp, mxidxp))
return add_lsts
def _iter_tab(self, idx):
tab = self.tabs[idx]
if tab:
yield from tab.items()
def reset(self):
self.stats = []
for i in range(len(self.tabs)):
itr = self._iter_tab(i)
zidx = tuple()
sinfo = [itr, zidx, (zidx, None)]
self._next_sinfo(i, sinfo)
self.stats.append(sinfo)
@staticmethod
def _getidxv(idxpath, i):
if i < len(idxpath):
return idxpath[i]
else:
return 0
def _cmp_idx(self, idxp1, idxp2):
for i in range(max(len(idxp1), len(idxp2))):
v1 = self._getidxv(idxp1, i)
v2 = self._getidxv(idxp2, i)
if v1 > v2:
return 1
elif v1 < v2:
return -1
return 0
def _trim_idx(self, idxp):
for i in range(len(idxp) - 1, -1, -1):
if idxp[i] != 0:
break
else:
return tuple()
return tuple(idxp[:i+1])
def _add_idx(self, src, abas, adst):
if self._cmp_idx(src, abas) < 0:
return src, False
r = []
do_add = True
for i in range(max(len(src), len(abas), len(adst))):
vs = self._getidxv(src, i)
vb = self._getidxv(abas, i)
vd = self._getidxv(adst, i)
vr = vs
if do_add:
vr += vd - vb
if vs != vb:
do_add = False
r.append(vr)
return self._trim_idx(r), True
def _calc_cidx(self, idxp, si):
if si >= len(self.amap):
return idxp
cidxp = idxp
for abas, adst in self.amap[si]:
cidxp, is_done = self._add_idx(cidxp, abas, adst)
if not is_done:
break
return cidxp
def _sublen_idx(self, dst, src):
sl = 0
for i in range(max(len(dst), len(src))):
vd = self._getidxv(dst, i)
vs = self._getidxv(src, i)
if vd != vs:
assert vd > vs
return sl
sl += 1
def _next_sinfo(self, si, sinfo):
itr, idxp, (vidxp, val) = sinfo
try:
nvidxp, nval = next(itr)
except StopIteration:
infi = (INF,)
sinfo[1] = infi
sinfo[2] = (infi, None)
return
sinfo[2] = (nvidxp, nval)
if si >= len(self.trmpg):
sinfo[1] = nvidxp
return
tpgs = self.trmpg[si]
cpg = None
for i in range(len(nvidxp), -1, -1):
pg = nvidxp[:i]
if pg in tpgs:
cpg = pg
break
if cpg is None:
sinfo[1] = nvidxp
return
sl = self._sublen_idx(nvidxp, vidxp)
if sl < len(cpg):
sinfo[1] = cpg
return
ridxp = []
for i in range(len(nvidxp)):
v = self._getidxv(idxp, i)
if i > sl:
v = 0
elif i == sl:
v += 1
ridxp.append(v)
sinfo[1] = tuple(ridxp)
def _next(self):
mnidxp = None
cidxps = []
for si, (itr, idxp, _) in enumerate(self.stats):
cidxp = self._calc_cidx(idxp, si)
cidxps.append(cidxp)
if mnidxp is None or self._cmp_idx(cidxp, mnidxp) < 0:
mnidxp = cidxp
if mnidxp and mnidxp[0] == INF:
return None, True
rs = []
for si, (sinfo, cidxp) in enumerate(zip(self.stats, cidxps)):
itr, idxp, (vidxp, val) = sinfo
if self._cmp_idx(cidxp, mnidxp) == 0:
rs.append((vidxp, val))
self._next_sinfo(si, sinfo)
else:
rs.append((vidxp, None))
return rs, False
def iter(self):
self.reset()
while True:
rs, is_done = self._next()
if is_done:
return
yield tuple(rs)
class c_ffta_modifier:
def __init__(self, conf):
self.conf = conf
def load(self):
self.srom = {}
self.chst = {}
for nm, rconf in self.conf['roms']['src'].items():
rom, chst = self.load_rom(rconf)
self.srom[nm] = rom
self.chst[nm] = chst
self.fntgen, self.chst['font'] = self.load_font()
self.txts = self.load_texts()
def export(self):
rmk = None
sben = self.conf.get('sandbox', {}).get('enable', False)
sbon = self.conf.get('sandbox', {}).get('only', False)
if not sben or not sbon:
rmk = self.export_rom(self.conf['roms']['dst']['rels'])
if sben:
sbrmk = self.export_rom(self.conf['roms']['dst']['sndbx'], as_sndbx = True)
if rmk is None:
rmk = sbrmk
return rmk
def export_rom(self, rom_conf, *args, **kargs):
rmk = self.repack(*args, **kargs)
if not rmk:
report('warning', f'something wrong while repacking')
return
self.save_rom(rom_conf['path'], rmk)
return rmk
def load_rom(self, rom_conf):
lfunc = load_rom[rom_conf['type']]
rom = lfunc(rom_conf['path'])
if 'charset' in rom_conf:
if 'charset_ocr' in rom_conf and rom_conf['charset_ocr']:
chstrom = rom
else:
chstrom = None
chst = c_ffta_charset_ocr(rom_conf['charset'], chstrom)
chst.load()
else:
chst = None
return rom, chst
def load_font(self):
conf = self.conf['font'] | chst = c_ffta_charset_dynamic( | 2 | 2023-11-12 18:43:53+00:00 | 12k |
bytedance/LapNet | lapnet/networks/orig_ferminet.py | [
{
"identifier": "envelopes",
"path": "lapnet/envelopes.py",
"snippet": "_MAX_POLY_ORDER = 5 # highest polynomial used in envelopes\n PRE_ORBITAL = enum.auto()\n PRE_DETERMINANT = enum.auto()\n POST_DETERMINANT = enum.auto()\n ISOTROPIC = enum.auto()\n ABS_ISOTROPIC = enum.auto()\n DIAGONAL = enum... | import functools
import attr
import chex
import jax
import lapjax.numpy as jnp
from typing import Any, Iterable, Mapping, Optional, Sequence, Tuple, Union
from lapnet import envelopes
from lapnet import sto
from lapnet.utils import scf
from .protocol import *
from .utils import construct_input_features
from lapnet.networks import network_blocks | 7,429 | output_dims = dims_orbital_in
elif options.envelope.apply_type == envelopes.EnvelopeType.PRE_DETERMINANT:
# Applied to orbitals.
output_dims = nspin_orbitals
elif options.envelope.apply_type == envelopes.EnvelopeType.POST_DETERMINANT:
# Applied to all determinants.
output_dims = 1
else:
raise ValueError('Unknown envelope type')
params['envelope'] = options.envelope.init(
natom=natom, output_dims=output_dims, hf=hf_solution, ndim=ndim)
# orbital shaping
key, subkey = jax.random.split(key, num=2)
params['orbital'] = init_orbital_shaping(
key=subkey,
input_dim=dims_orbital_in,
nspin_orbitals=nspin_orbitals,
bias_orbitals=options.bias_orbitals)
if hf_solution is not None:
params['single'], params['orbital'] = init_to_hf_solution(
hf_solution=hf_solution,
single_layers=params['single'],
orbital_layer=params['orbital'],
determinants=options.determinants,
active_spin_channels=active_spin_channels,
eps=eps)
return params
## Network layers ##
def make_ferminet_features(charges: Optional[jnp.ndarray] = None,
nspins: Optional[Tuple[int, ...]] = None,
ndim: int = 3) -> FeatureLayer:
"""Returns the init and apply functions for the standard features."""
del charges, nspins
def init() -> Tuple[Tuple[int, int], Param]:
return (ndim + 1, ndim + 1), {}
def apply(ae, r_ae, ee, r_ee) -> Tuple[jnp.ndarray, jnp.ndarray]:
ae_features = jnp.concatenate((r_ae, ae), axis=2)
ae_features = jnp.reshape(ae_features, [jnp.shape(ae_features)[0], -1])
ee_features = jnp.concatenate((r_ee, ee), axis=2)
return ae_features, ee_features
return FeatureLayer(init=init, apply=apply)
def construct_symmetric_features(h_one: jnp.ndarray, h_two: jnp.ndarray,
nspins: Tuple[int, int]) -> jnp.ndarray:
"""Combines intermediate features from rank-one and -two streams.
Args:
h_one: set of one-electron features. Shape: (nelectrons, n1), where n1 is
the output size of the previous layer.
h_two: set of two-electron features. Shape: (nelectrons, nelectrons, n2),
where n2 is the output size of the previous layer.
nspins: Number of spin-up and spin-down electrons.
Returns:
array containing the permutation-equivariant features: the input set of
one-electron features, the mean of the one-electron features over each
(occupied) spin channel, and the mean of the two-electron features over each
(occupied) spin channel. Output shape (nelectrons, 3*n1 + 2*n2) if there are
both spin-up and spin-down electrons and (nelectrons, 2*n1 + n2) otherwise.
"""
# Split features into spin up and spin down electrons
spin_partitions = network_blocks.array_partitions(nspins)
h_ones = jnp.split(h_one, spin_partitions, axis=0)
h_twos = jnp.split(h_two, spin_partitions, axis=0)
# Construct inputs to next layer
# h.size == 0 corresponds to unoccupied spin channels.
g_one = [jnp.mean(h, axis=0, keepdims=True) for h in h_ones if h.size > 0]
g_two = [jnp.mean(h, axis=0) for h in h_twos if h.size > 0]
g_one = [jnp.tile(g, [h_one.shape[0], 1]) for g in g_one]
return jnp.concatenate([h_one] + g_one + g_two, axis=1)
def fermi_net_orbitals(
params,
pos: jnp.ndarray,
atoms: jnp.ndarray,
nspins: Tuple[int, ...],
options: FermiNetOptions = FermiNetOptions(),
):
"""Forward evaluation of the Fermionic Neural Network up to the orbitals.
Args:
params: A dictionary of parameters, containing fields:
`atoms`: atomic positions, used to construct input features.
`single`: a list of dictionaries with params 'w' and 'b', weights for the
one-electron stream of the network.
`double`: a list of dictionaries with params 'w' and 'b', weights for the
two-electron stream of the network.
`orbital`: a list of two weight matrices, for spin up and spin down (no
bias is necessary as it only adds a constant to each row, which does not
change the determinant).
`dets`: weight on the linear combination of determinants
`envelope`: a dictionary with fields `sigma` and `pi`, weights for the
multiplicative envelope.
pos: The electron positions, a 3N dimensional vector.
atoms: Array with positions of atoms.
nspins: Tuple with number of spin up and spin down electrons.
options: Network configuration.
Returns:
One matrix (two matrices if options.full_det is False) that exchange columns
under the exchange of inputs of shape (ndet, nalpha+nbeta, nalpha+nbeta) (or
(ndet, nalpha, nalpha) and (ndet, nbeta, nbeta)) and a tuple of (ae, r_ae,
r_ee), the atom-electron vectors, distances and electron-electron distances.
"""
| # Copyright 2020 DeepMind Technologies Limited.
# Copyright 2023 Bytedance Ltd. and/or its affiliate
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Fermionic Neural Network in JAX."""
# import jax.numpy as jnp
## Network settings ##
@attr.s(auto_attribs=True, kw_only=True)
class FermiNetOptions:
"""Options controlling the FermiNet architecture.
Attributes:
ndim: dimension of system. Change only with caution.
hidden_dims: Tuple of pairs, where each pair contains the number of hidden
units in the one-electron and two-electron stream in the corresponding
layer of the FermiNet. The number of layers is given by the length of the
tuple.
use_last_layer: If true, the outputs of the one- and two-electron streams
are combined into permutation-equivariant features and passed into the
final orbital-shaping layer. Otherwise, just the output of the
one-electron stream is passed into the orbital-shaping layer.
determinants: Number of determinants to use.
full_det: If true, evaluate determinants over all electrons. Otherwise,
block-diagonalise determinants into spin channels.
bias_orbitals: If true, include a bias in the final linear layer to shape
the outputs into orbitals.
envelope_label: Envelope to use to impose orbitals go to zero at infinity.
See envelopes module.
envelope: Envelope object to create and apply the multiplicative envelope.
feature_layer: Feature object to create and apply the input features for the
one- and two-electron layers.
"""
ndim: int = 3
hidden_dims: LayerArgs = ((256, 32), (256, 32), (256, 32), (256, 32))
use_last_layer: bool = False
determinants: int = 16
full_det: bool = True
bias_orbitals: bool = False
envelope_label: envelopes.EnvelopeLabel = envelopes.EnvelopeLabel.ABS_ISOTROPIC
envelope: envelopes.Envelope = attr.ib(
default=attr.Factory(
lambda self: envelopes.get_envelope(self.envelope_label),
takes_self=True))
feature_layer: FeatureLayer = attr.ib(
default=attr.Factory(
lambda self: make_ferminet_features(ndim=self.ndim), takes_self=True))
## Network initialisation ##
def init_layers(
key: chex.PRNGKey, dims_one_in: Sequence[int], dims_one_out: Sequence[int],
dims_two_in: Sequence[int],
dims_two_out: Sequence[int]) -> Tuple[Sequence[Param], Sequence[Param]]:
"""Initialises parameters for the FermiNet layers.
The final two-electron layer is not strictly necessary (i.e.
FermiNetOptions.use_last_layer is False), in which case the two-electron
stream contains one fewer layers than the one-electron stream.
Args:
key: JAX RNG state.
dims_one_in: dimension of inputs to each one-electron layer.
dims_one_out: dimension of outputs (number of hidden units) in each
one-electron layer.
dims_two_in: dimension of inputs to each two-electron layer.
dims_two_out: dimension of outputs (number of hidden units) in each
two-electron layer.
Returns:
Pair of sequences (length: number of layers) of parameters for one- and
two-electon streams.
Raises:
ValueError: if dims_one_in and dims_one_out are different lengths, or
similarly for dims_two_in and dims_two_out, or if the number of one-electron
layers is not equal to or one more than the number of two electron layers.
"""
if len(dims_one_in) != len(dims_one_out):
raise ValueError(
'Length of one-electron stream inputs and outputs not identical.')
if len(dims_two_in) != len(dims_two_out):
raise ValueError(
'Length of two-electron stream inputs and outputs not identical.')
if len(dims_two_in) not in (len(dims_one_out), len(dims_one_out) - 1):
raise ValueError('Number of layers in two electron stream must match or be '
'one fewer than the number of layers in the one-electron '
'stream')
single = []
double = []
ndouble_layers = len(dims_two_in)
for i in range(len(dims_one_in)):
key, subkey = jax.random.split(key)
single.append(
network_blocks.init_linear_layer(
subkey,
in_dim=dims_one_in[i],
out_dim=dims_one_out[i],
include_bias=True))
if i < ndouble_layers:
key, subkey = jax.random.split(key)
double.append(
network_blocks.init_linear_layer(
subkey,
in_dim=dims_two_in[i],
out_dim=dims_two_out[i],
include_bias=True))
return single, double
def init_orbital_shaping(
key: chex.PRNGKey,
input_dim: int,
nspin_orbitals: Sequence[int],
bias_orbitals: bool,
) -> Sequence[Param]:
"""Initialises orbital shaping layer.
Args:
key: JAX RNG state.
input_dim: dimension of input activations to the orbital shaping layer.
nspin_orbitals: total number of orbitals in each spin-channel.
bias_orbitals: whether to include a bias in the layer.
Returns:
Parameters of length len(nspin_orbitals) for the orbital shaping for each
spin channel.
"""
orbitals = []
for nspin_orbital in nspin_orbitals:
key, subkey = jax.random.split(key)
orbitals.append(
network_blocks.init_linear_layer(
subkey,
in_dim=input_dim,
out_dim=nspin_orbital,
include_bias=bias_orbitals))
return orbitals
def init_to_hf_solution(
hf_solution: scf.Scf,
single_layers: Sequence[Param],
orbital_layer: Sequence[Param],
determinants: int,
active_spin_channels: Sequence[int],
eps: float = 0.01) -> Tuple[Sequence[Param], Sequence[Param]]:
"""Sets initial parameters to match Hartree-Fock.
NOTE: this does not handle the envelope parameters, which are done in the
appropriate envelope initialisation functions. Not all envelopes support HF
initialisation.
Args:
hf_solution: Hartree-Fock state to match.
single_layers: parameters (weights and biases) for the one-electron stream,
with length: number of layers in the one-electron stream.
orbital_layer: parameters for the orbital-shaping layer, length: number of
spin-channels in the system.
determinants: Number of determinants used in the final wavefunction.
active_spin_channels: Number of particles in each spin channel containing at
least one particle.
eps: scaling factor for all weights and biases such that they are
initialised close to zero unless otherwise required to match Hartree-Fock.
Returns:
Tuple of parameters for the one-electron stream and the orbital shaping
layer respectively.
"""
# Scale all params in one-electron stream to be near zero.
single_layers = jax.tree_map(lambda param: param * eps, single_layers)
# Initialize first layer of Fermi Net to match s- or p-type orbitals.
# The sto and sto-poly envelopes can exactly capture the s-type orbital,
# so the effect of the neural network part is constant, while the p-type
# orbital also has a term multiplied by x, y or z.
j = 0
for ia, atom in enumerate(hf_solution.molecule):
coeffs = sto.STO_6G_COEFFS[atom.symbol]
for orb in coeffs.keys():
if orb[1] == 's':
single_layers[0]['b'] = single_layers[0]['b'].at[j].set(1.0)
j += 1
elif orb[1] == 'p':
w = single_layers[0]['w']
w = w.at[ia * 4 + 1:(ia + 1) * 4, j:j + 3].set(jnp.eye(3))
single_layers[0]['w'] = w
j += 3
else:
raise NotImplementedError('HF Initialization not implemented for '
'%s orbitals' % orb[1])
# Scale all params in orbital shaping to be near zero.
orbital_layer = jax.tree_map(lambda param: param * eps, orbital_layer)
for i, spin in enumerate(active_spin_channels):
# Initialize last layer to match Hartree-Fock weights on basis set.
norb = hf_solution.mean_field.mo_coeff[i].shape[0]
mat = hf_solution.mean_field.mo_coeff[i][:, :spin]
w = orbital_layer[i]['w']
for j in range(determinants):
w = w.at[:norb, j * spin:(j + 1) * spin].set(mat)
orbital_layer[i]['w'] = w
return single_layers, orbital_layer
def init_fermi_net_params(
key: chex.PRNGKey,
atoms: jnp.ndarray,
nspins: Tuple[int, ...],
options: FermiNetOptions,
hf_solution: Optional[scf.Scf] = None,
eps: float = 0.01,
) -> ParamTree:
"""Initializes parameters for the Fermionic Neural Network.
Args:
key: JAX RNG state.
atoms: (natom, ndim) array of atom positions.
nspins: A tuple with either the number of spin-up and spin-down electrons,
or the total number of electrons. If the latter, the spins are instead
given as an input to the network.
options: network options.
hf_solution: If present, initialise the parameters to match the Hartree-Fock
solution. Otherwise a random initialisation is use.
eps: If hf_solution is present, scale all weights and biases except the
first layer by this factor such that they are initialised close to zero.
Returns:
PyTree of network parameters. Spin-dependent parameters are only created for
spin channels containing at least one particle.
"""
if options.envelope_label in (envelopes.EnvelopeLabel.STO,
envelopes.EnvelopeLabel.STO_POLY):
if options.bias_orbitals:
raise ValueError('Cannot bias orbitals w/STO envelope.')
if hf_solution is not None:
if options.use_last_layer:
raise ValueError('Cannot use last layer w/HF init')
if options.envelope.apply_type not in ('sto', 'sto-poly'):
raise ValueError('When using HF init, '
'envelope_type must be `sto` or `sto-poly`.')
active_spin_channels = [spin for spin in nspins if spin > 0]
nchannels = len(active_spin_channels)
if nchannels == 0:
raise ValueError('No electrons present!')
params = {}
(num_one_features, num_two_features), params['input'] = (
options.feature_layer.init())
# The input to layer L of the one-electron stream is from
# construct_symmetric_features and shape (nelectrons, nfeatures), where
# nfeatures is i) output from the previous one-electron layer; ii) the mean
# for each spin channel from each layer; iii) the mean for each spin channel
# from each two-electron layer. We don't create features for spin channels
# which contain no electrons (i.e. spin-polarised systems).
nfeatures = lambda out1, out2: (nchannels + 1) * out1 + nchannels * out2
natom, ndim = atoms.shape
# one-electron stream, per electron:
# - one-electron features per atom (default: electron-atom vectors
# (ndim/atom) and distances (1/atom)),
# two-electron stream, per pair of electrons:
# - two-electron features per electron pair (default: electron-electron
# vector (dim) and distance (1))
feature_one_dims = natom * num_one_features
feature_two_dims = num_two_features
dims_one_in = (
[nfeatures(feature_one_dims, feature_two_dims)] +
[nfeatures(hdim[0], hdim[1]) for hdim in options.hidden_dims[:-1]])
dims_one_out = [hdim[0] for hdim in options.hidden_dims]
if options.use_last_layer:
dims_two_in = ([feature_two_dims] +
[hdim[1] for hdim in options.hidden_dims[:-1]])
dims_two_out = [hdim[1] for hdim in options.hidden_dims]
else:
dims_two_in = ([feature_two_dims] +
[hdim[1] for hdim in options.hidden_dims[:-2]])
dims_two_out = [hdim[1] for hdim in options.hidden_dims[:-1]]
if not options.use_last_layer:
# Just pass the activations from the final layer of the one-electron stream
# directly to orbital shaping.
dims_orbital_in = options.hidden_dims[-1][0]
else:
dims_orbital_in = nfeatures(options.hidden_dims[-1][0],
options.hidden_dims[-1][1])
# How many spin-orbitals do we need to create per spin channel?
nspin_orbitals = []
for nspin in active_spin_channels:
if options.full_det:
# Dense determinant. Need N orbitals per electron per determinant.
norbitals = sum(nspins) * options.determinants
else:
# Spin-factored block-diagonal determinant. Need nspin orbitals per
# electron per determinant.
norbitals = nspin * options.determinants
nspin_orbitals.append(norbitals)
# Layer initialisation
key, subkey = jax.random.split(key, num=2)
params['single'], params['double'] = init_layers(
key=subkey,
dims_one_in=dims_one_in,
dims_one_out=dims_one_out,
dims_two_in=dims_two_in,
dims_two_out=dims_two_out)
# create envelope params
if options.envelope.apply_type == envelopes.EnvelopeType.PRE_ORBITAL:
# Applied to output from final layer of 1e stream.
output_dims = dims_orbital_in
elif options.envelope.apply_type == envelopes.EnvelopeType.PRE_DETERMINANT:
# Applied to orbitals.
output_dims = nspin_orbitals
elif options.envelope.apply_type == envelopes.EnvelopeType.POST_DETERMINANT:
# Applied to all determinants.
output_dims = 1
else:
raise ValueError('Unknown envelope type')
params['envelope'] = options.envelope.init(
natom=natom, output_dims=output_dims, hf=hf_solution, ndim=ndim)
# orbital shaping
key, subkey = jax.random.split(key, num=2)
params['orbital'] = init_orbital_shaping(
key=subkey,
input_dim=dims_orbital_in,
nspin_orbitals=nspin_orbitals,
bias_orbitals=options.bias_orbitals)
if hf_solution is not None:
params['single'], params['orbital'] = init_to_hf_solution(
hf_solution=hf_solution,
single_layers=params['single'],
orbital_layer=params['orbital'],
determinants=options.determinants,
active_spin_channels=active_spin_channels,
eps=eps)
return params
## Network layers ##
def make_ferminet_features(charges: Optional[jnp.ndarray] = None,
nspins: Optional[Tuple[int, ...]] = None,
ndim: int = 3) -> FeatureLayer:
"""Returns the init and apply functions for the standard features."""
del charges, nspins
def init() -> Tuple[Tuple[int, int], Param]:
return (ndim + 1, ndim + 1), {}
def apply(ae, r_ae, ee, r_ee) -> Tuple[jnp.ndarray, jnp.ndarray]:
ae_features = jnp.concatenate((r_ae, ae), axis=2)
ae_features = jnp.reshape(ae_features, [jnp.shape(ae_features)[0], -1])
ee_features = jnp.concatenate((r_ee, ee), axis=2)
return ae_features, ee_features
return FeatureLayer(init=init, apply=apply)
def construct_symmetric_features(h_one: jnp.ndarray, h_two: jnp.ndarray,
nspins: Tuple[int, int]) -> jnp.ndarray:
"""Combines intermediate features from rank-one and -two streams.
Args:
h_one: set of one-electron features. Shape: (nelectrons, n1), where n1 is
the output size of the previous layer.
h_two: set of two-electron features. Shape: (nelectrons, nelectrons, n2),
where n2 is the output size of the previous layer.
nspins: Number of spin-up and spin-down electrons.
Returns:
array containing the permutation-equivariant features: the input set of
one-electron features, the mean of the one-electron features over each
(occupied) spin channel, and the mean of the two-electron features over each
(occupied) spin channel. Output shape (nelectrons, 3*n1 + 2*n2) if there are
both spin-up and spin-down electrons and (nelectrons, 2*n1 + n2) otherwise.
"""
# Split features into spin up and spin down electrons
spin_partitions = network_blocks.array_partitions(nspins)
h_ones = jnp.split(h_one, spin_partitions, axis=0)
h_twos = jnp.split(h_two, spin_partitions, axis=0)
# Construct inputs to next layer
# h.size == 0 corresponds to unoccupied spin channels.
g_one = [jnp.mean(h, axis=0, keepdims=True) for h in h_ones if h.size > 0]
g_two = [jnp.mean(h, axis=0) for h in h_twos if h.size > 0]
g_one = [jnp.tile(g, [h_one.shape[0], 1]) for g in g_one]
return jnp.concatenate([h_one] + g_one + g_two, axis=1)
def fermi_net_orbitals(
params,
pos: jnp.ndarray,
atoms: jnp.ndarray,
nspins: Tuple[int, ...],
options: FermiNetOptions = FermiNetOptions(),
):
"""Forward evaluation of the Fermionic Neural Network up to the orbitals.
Args:
params: A dictionary of parameters, containing fields:
`atoms`: atomic positions, used to construct input features.
`single`: a list of dictionaries with params 'w' and 'b', weights for the
one-electron stream of the network.
`double`: a list of dictionaries with params 'w' and 'b', weights for the
two-electron stream of the network.
`orbital`: a list of two weight matrices, for spin up and spin down (no
bias is necessary as it only adds a constant to each row, which does not
change the determinant).
`dets`: weight on the linear combination of determinants
`envelope`: a dictionary with fields `sigma` and `pi`, weights for the
multiplicative envelope.
pos: The electron positions, a 3N dimensional vector.
atoms: Array with positions of atoms.
nspins: Tuple with number of spin up and spin down electrons.
options: Network configuration.
Returns:
One matrix (two matrices if options.full_det is False) that exchange columns
under the exchange of inputs of shape (ndet, nalpha+nbeta, nalpha+nbeta) (or
(ndet, nalpha, nalpha) and (ndet, nbeta, nbeta)) and a tuple of (ae, r_ae,
r_ee), the atom-electron vectors, distances and electron-electron distances.
"""
| ae, ee, r_ae, r_ee = construct_input_features(pos, atoms) | 3 | 2023-11-13 08:19:53+00:00 | 12k |
civrealm/civrealm | src/civrealm/envs/freeciv_wrapper/tensor_wrapper.py | [
{
"identifier": "TensorAction",
"path": "src/civrealm/envs/freeciv_wrapper/action_wrapper.py",
"snippet": "class TensorAction(Wrapper):\n \"\"\"\n A wrapper that defines tensor action spaces, transforms tensor actions into\n actions that could be handeled by FreecivBaseEnv instance, and adds m... | import numpy as np
from civrealm.envs import FreecivBaseEnv
from civrealm.envs.freeciv_wrapper.config import default_tensor_config
from .action_wrapper import TensorAction
from .core import Wrapper
from .observation_wrapper import CacheLastObs, TensorObservation
from .tensor_base_wrapper import TensorBase | 7,333 |
class TensorWrapper(Wrapper):
"""
TensorWrapper is used to make Civrealm environment tensorized by converting
observations from FreecivBaseEnv into tensors and tensor actions back to actions compatible with
FreecivBaseEnv.
TensorWrapper is composed `TensorBase`, `TensorAction`, `TensorObservation`
and `CacheLastObs`.
Parameters
----------
env
config:
tensor env configuration
Attributes
----------
config: dict
tensor wrapper configuration
"""
def __init__(self, env: FreecivBaseEnv, config: dict = default_tensor_config):
self.config = config
super().__init__(
CacheLastObs(
|
class TensorWrapper(Wrapper):
"""
TensorWrapper is used to make Civrealm environment tensorized by converting
observations from FreecivBaseEnv into tensors and tensor actions back to actions compatible with
FreecivBaseEnv.
TensorWrapper is composed `TensorBase`, `TensorAction`, `TensorObservation`
and `CacheLastObs`.
Parameters
----------
env
config:
tensor env configuration
Attributes
----------
config: dict
tensor wrapper configuration
"""
def __init__(self, env: FreecivBaseEnv, config: dict = default_tensor_config):
self.config = config
super().__init__(
CacheLastObs( | TensorObservation(TensorAction(TensorBase(env, config=config))) | 4 | 2023-11-18 19:35:50+00:00 | 12k |
RAIVNLab/MatFormer-OLMo | olmo/model.py | [
{
"identifier": "PathOrStr",
"path": "olmo/aliases.py",
"snippet": ""
},
{
"identifier": "BeamSearch",
"path": "olmo/beam_search.py",
"snippet": "class BeamSearch:\n \"\"\"\n Implements the beam search algorithm for decoding the most likely sequences.\n\n :param end_index: The i... | import math
import os
import torch
import torch.backends.cuda
import torch.nn as nn
import torch.nn.functional as F
import warnings
from abc import abstractmethod
from typing import Dict, List, NamedTuple, Optional, Sequence, Tuple, cast
from torch import einsum
from .aliases import PathOrStr
from .beam_search import BeamSearch, Constraint, FinalSequenceScorer, Sampler
from .config import ActivationType, BlockType, LayerNormType, ModelConfig
from .exceptions import OlmoConfigurationError
from functools import partial
from cached_path import cached_path | 10,784 | self.k_norm: Optional[LayerNormBase] = None
self.q_norm: Optional[LayerNormBase] = None
if config.attention_layer_norm:
self.k_norm = LayerNormBase.build(
config, size=config.d_model // config.n_heads if config.multi_query_attention else None
)
self.q_norm = LayerNormBase.build(config)
# Activation function.
self.act = Activation.build(config)
assert (self.act.output_multiplier * config.mlp_ratio * config.d_model) % 1 == 0
# Attention output projection.
self.attn_out = nn.Linear(
config.d_model, config.d_model, bias=config.include_bias, device=config.init_device
)
# Feed-forward output projection.
self.ff_out = nn.Linear(
int(self.act.output_multiplier * config.mlp_ratio * config.d_model),
config.d_model,
bias=config.include_bias,
device=config.init_device,
)
self.ff_out._is_residual = True # type: ignore
# Rotary embeddings.
if self.config.rope:
self.rotary_emb = RotaryEmbedding(config)
self.register_buffer(
"pos_emb", self.rotary_emb(config.max_sequence_length, device=config.init_device), persistent=False
)
def get_rotary_embedding(self, seq_len: int, device: Optional[torch.device]) -> torch.Tensor:
if self.pos_emb is not None and self.pos_emb.shape[-2] >= seq_len: # type: ignore
return self.pos_emb[:seq_len] # type: ignore
pos_emb = self.rotary_emb(seq_len, device=device)
self.register_buffer("pos_emb", pos_emb, persistent=False)
return pos_emb
def attention(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
attention_bias: Optional[torch.Tensor] = None,
layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
use_cache: bool = False,
) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]:
B, T, C = q.size() # batch size, sequence length, d_model
dtype = k.dtype
# Optionally apply layer norm to keys and queries.
if self.q_norm is not None and self.k_norm is not None:
q = self.q_norm(q).to(dtype=dtype)
k = self.k_norm(k).to(dtype=dtype)
# Move head forward to be next to the batch dim.
# shape: (B, nh, T, hs)
q = q.view(B, T, self.config.n_heads, C // self.config.n_heads).transpose(1, 2)
if self.config.multi_query_attention:
# shape: (B, 1, T, hs)
k = k.view(B, T, 1, C // self.config.n_heads).transpose(1, 2)
# shape: (B, 1, T, hs)
v = v.view(B, T, 1, C // self.config.n_heads).transpose(1, 2)
else:
# shape: (B, nh, T, hs)
k = k.view(B, T, self.config.n_heads, C // self.config.n_heads).transpose(1, 2)
# shape: (B, nh, T, hs)
v = v.view(B, T, self.config.n_heads, C // self.config.n_heads).transpose(1, 2)
if layer_past is not None:
past_key, past_value = layer_past
k = torch.cat((past_key, k), dim=-2)
v = torch.cat((past_value, v), dim=-2)
if use_cache:
present = (k, v)
else:
present = None
query_len, key_len = q.shape[-2], k.shape[-2] # could be different if layer_past not None
if self.config.rope:
# Apply rotary embeddings.
positions = self.get_rotary_embedding(key_len, q.device)
q = apply_rotary_pos_emb(positions[key_len - query_len : key_len], q)
k = apply_rotary_pos_emb(positions, k)
if attention_bias is not None:
attention_bias = attention_bias[:, :, key_len - query_len : key_len, :key_len]
# Get the attention scores.
# shape: (B, nh, T, hs)
att = F.scaled_dot_product_attention(
q,
k,
v,
attn_mask=None if attention_bias is None else attention_bias.to(dtype=dtype),
dropout_p=0.0 if not self.training else self.config.attention_dropout,
is_causal=attention_bias is None,
)
# Re-assemble all head outputs side-by-side.
att = att.transpose(1, 2).contiguous().view(B, T, C)
# Apply output projection.
return self.attn_out(att), present
@abstractmethod
def forward(
self,
x: torch.Tensor,
attention_bias: Optional[torch.FloatTensor] = None,
) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]:
raise NotImplementedError
@classmethod
def build(cls, config: ModelConfig) -> OlmoBlock:
| """
Adapted from
[MosaiclML](https://github.com/mosaicml/examples.git) and
[minGPT](https://github.com/karpathy/minGPT.git)
"""
from __future__ import annotations
__all__ = [
"LayerNormBase",
"LayerNorm",
"RMSLayerNorm",
"RotaryEmbedding",
"Activation",
"GELU",
"ReLU",
"SwiGLU",
"OlmoBlock",
"OlmoSequentialBlock",
"OlmoParallelBlock",
"Olmo",
"OlmoOutput",
"OlmoGenerateOutput",
]
class MatformerManager:
_instance = None
def __init__(self):
raise RuntimeError("Call get_instance() instead")
def initialize(self):
self.current_factor = 1
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls._instance.initialize()
return cls._instance
class LayerNormBase(nn.Module):
def __init__(self, config: ModelConfig):
super().__init__()
self.config = config
@abstractmethod
def forward(self, x: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
@classmethod
def build(cls, config: ModelConfig, size: Optional[int] = None) -> LayerNormBase:
if config.layer_norm_type == LayerNormType.default:
return LayerNorm(config, size=size, low_precision=False)
elif config.layer_norm_type == LayerNormType.low_precision:
return LayerNorm(config, size=size, low_precision=True)
elif config.layer_norm_type == LayerNormType.rms:
return RMSLayerNorm(config, size=size, low_precision=False)
elif config.layer_norm_type == LayerNormType.low_precision_rms:
return RMSLayerNorm(config, size=size, low_precision=True)
else:
raise NotImplementedError(f"Not sure how to handle '{config.layer_norm_type}' LayerNorm type")
def _cast_if_autocast_enabled(self, tensor: torch.Tensor) -> torch.Tensor:
if torch.is_autocast_enabled():
if tensor.device.type == "cuda":
dtype = torch.get_autocast_gpu_dtype()
elif tensor.device.type == "cpu":
dtype = torch.get_autocast_cpu_dtype()
else:
raise NotImplementedError()
return tensor.to(dtype=dtype)
return tensor
class LayerNorm(LayerNormBase):
"""
The default :class:`LayerNorm` implementation which can optionally run in low precision.
"""
def __init__(self, config: ModelConfig, size: Optional[int] = None, low_precision: bool = False):
super().__init__(config)
self.normalized_shape = (size or config.d_model,)
self.eps = 1e-05
self.weight = nn.Parameter(torch.ones(self.normalized_shape, device=config.init_device))
self.bias = nn.Parameter(torch.zeros(self.normalized_shape, device=config.init_device))
self.low_precision = low_precision
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.low_precision:
module_device = x.device
downcast_x = self._cast_if_autocast_enabled(x)
downcast_weight = (
self._cast_if_autocast_enabled(self.weight) if self.weight is not None else self.weight
)
downcast_bias = self._cast_if_autocast_enabled(self.bias) if self.bias is not None else self.bias
with torch.autocast(enabled=False, device_type=module_device.type):
return F.layer_norm(downcast_x, self.normalized_shape, downcast_weight, downcast_bias, self.eps)
else:
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
class RMSLayerNorm(LayerNorm):
"""
RMS layer norm, a simplified :class:`LayerNorm` implementation that can optionally run
in low-precision.
"""
def __init__(self, config: ModelConfig, size: Optional[int] = None, low_precision: bool = False):
super().__init__(config)
self.eps = 1e-08
self.size = size or config.d_model
self.weight = nn.Parameter(torch.ones(self.config.d_model))
if self.config.include_bias:
self.bias = nn.Parameter(torch.zeros(self.config.d_model))
else:
self.register_parameter("bias", None)
self.low_precision = low_precision
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.low_precision:
module_device = x.device
downcast_x = self._cast_if_autocast_enabled(x)
downcast_weight = self._cast_if_autocast_enabled(self.weight)
downcast_bias = self._cast_if_autocast_enabled(self.bias) if self.config.include_bias else None
with torch.autocast(enabled=False, device_type=module_device.type):
return self.rms_norm(downcast_x, downcast_weight, downcast_bias)
else:
return self.rms_norm(x, self.weight, self.bias if self.config.include_bias else None)
def rms_norm(self, x: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor]) -> torch.Tensor:
norm_x = x.norm(2, dim=-1, keepdim=True)
rms_x = norm_x * self.size ** (-1.0 / 2)
x_normed = x / (rms_x + self.eps)
if bias is not None:
return weight * x_normed + self.bias
else:
return weight * x_normed
class RotaryEmbedding(nn.Module):
"""
[Rotary positional embeddings (RoPE)](https://arxiv.org/abs/2104.09864).
"""
def __init__(self, config: ModelConfig):
super().__init__()
dim = config.d_model // config.n_heads
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, device=config.init_device).float() / dim))
self.register_buffer("inv_freq", inv_freq)
def forward(self, max_seq_len, *, device):
seq = torch.arange(max_seq_len, device=device, dtype=self.inv_freq.dtype) # type: ignore
freqs = einsum("i , j -> i j", seq, self.inv_freq)
return torch.cat((freqs, freqs), dim=-1)
def rotate_half(x: torch.Tensor) -> torch.Tensor:
B, nh, T, hs = x.size()
x = x.view(B, nh, T, 2, hs // 2)
x1, x2 = x.unbind(dim=-2)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos: torch.Tensor, t: torch.Tensor) -> torch.Tensor:
out = (t * pos.cos()) + (rotate_half(t) * pos.sin())
return out.to(t.dtype)
class Activation(nn.Module):
def __init__(self, config: ModelConfig):
super().__init__()
self.config = config
@abstractmethod
def forward(self, x: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
@property
@abstractmethod
def output_multiplier(self) -> float:
raise NotImplementedError
@classmethod
def build(cls, config: ModelConfig) -> Activation:
if config.activation_type == ActivationType.gelu:
return cast(Activation, GELU(approximate="none"))
elif config.activation_type == ActivationType.relu:
return cast(Activation, ReLU(inplace=False))
elif config.activation_type == ActivationType.swiglu:
return SwiGLU(config)
else:
raise NotImplementedError(f"not sure how to handle activation type '{config.activation_type}'")
class GELU(nn.GELU):
@property
def output_multiplier(self) -> float:
return 1.0
class ReLU(nn.ReLU):
@property
def output_multiplier(self) -> float:
return 1.0
class SwiGLU(Activation):
def forward(self, x: torch.Tensor) -> torch.Tensor:
x, gate = x.chunk(2, dim=-1)
return F.silu(gate) * x
@property
def output_multiplier(self) -> float:
return 0.5
class OlmoBlock(nn.Module):
"""
A base class for transformer block implementations.
"""
def __init__(self, config: ModelConfig):
super().__init__()
self.config = config
assert config.d_model % config.n_heads == 0
# Dropout.
self.dropout = nn.Dropout(config.residual_dropout)
# Layer norms.
self.k_norm: Optional[LayerNormBase] = None
self.q_norm: Optional[LayerNormBase] = None
if config.attention_layer_norm:
self.k_norm = LayerNormBase.build(
config, size=config.d_model // config.n_heads if config.multi_query_attention else None
)
self.q_norm = LayerNormBase.build(config)
# Activation function.
self.act = Activation.build(config)
assert (self.act.output_multiplier * config.mlp_ratio * config.d_model) % 1 == 0
# Attention output projection.
self.attn_out = nn.Linear(
config.d_model, config.d_model, bias=config.include_bias, device=config.init_device
)
# Feed-forward output projection.
self.ff_out = nn.Linear(
int(self.act.output_multiplier * config.mlp_ratio * config.d_model),
config.d_model,
bias=config.include_bias,
device=config.init_device,
)
self.ff_out._is_residual = True # type: ignore
# Rotary embeddings.
if self.config.rope:
self.rotary_emb = RotaryEmbedding(config)
self.register_buffer(
"pos_emb", self.rotary_emb(config.max_sequence_length, device=config.init_device), persistent=False
)
def get_rotary_embedding(self, seq_len: int, device: Optional[torch.device]) -> torch.Tensor:
if self.pos_emb is not None and self.pos_emb.shape[-2] >= seq_len: # type: ignore
return self.pos_emb[:seq_len] # type: ignore
pos_emb = self.rotary_emb(seq_len, device=device)
self.register_buffer("pos_emb", pos_emb, persistent=False)
return pos_emb
def attention(
self,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
attention_bias: Optional[torch.Tensor] = None,
layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
use_cache: bool = False,
) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]:
B, T, C = q.size() # batch size, sequence length, d_model
dtype = k.dtype
# Optionally apply layer norm to keys and queries.
if self.q_norm is not None and self.k_norm is not None:
q = self.q_norm(q).to(dtype=dtype)
k = self.k_norm(k).to(dtype=dtype)
# Move head forward to be next to the batch dim.
# shape: (B, nh, T, hs)
q = q.view(B, T, self.config.n_heads, C // self.config.n_heads).transpose(1, 2)
if self.config.multi_query_attention:
# shape: (B, 1, T, hs)
k = k.view(B, T, 1, C // self.config.n_heads).transpose(1, 2)
# shape: (B, 1, T, hs)
v = v.view(B, T, 1, C // self.config.n_heads).transpose(1, 2)
else:
# shape: (B, nh, T, hs)
k = k.view(B, T, self.config.n_heads, C // self.config.n_heads).transpose(1, 2)
# shape: (B, nh, T, hs)
v = v.view(B, T, self.config.n_heads, C // self.config.n_heads).transpose(1, 2)
if layer_past is not None:
past_key, past_value = layer_past
k = torch.cat((past_key, k), dim=-2)
v = torch.cat((past_value, v), dim=-2)
if use_cache:
present = (k, v)
else:
present = None
query_len, key_len = q.shape[-2], k.shape[-2] # could be different if layer_past not None
if self.config.rope:
# Apply rotary embeddings.
positions = self.get_rotary_embedding(key_len, q.device)
q = apply_rotary_pos_emb(positions[key_len - query_len : key_len], q)
k = apply_rotary_pos_emb(positions, k)
if attention_bias is not None:
attention_bias = attention_bias[:, :, key_len - query_len : key_len, :key_len]
# Get the attention scores.
# shape: (B, nh, T, hs)
att = F.scaled_dot_product_attention(
q,
k,
v,
attn_mask=None if attention_bias is None else attention_bias.to(dtype=dtype),
dropout_p=0.0 if not self.training else self.config.attention_dropout,
is_causal=attention_bias is None,
)
# Re-assemble all head outputs side-by-side.
att = att.transpose(1, 2).contiguous().view(B, T, C)
# Apply output projection.
return self.attn_out(att), present
@abstractmethod
def forward(
self,
x: torch.Tensor,
attention_bias: Optional[torch.FloatTensor] = None,
) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor, torch.Tensor]]]:
raise NotImplementedError
@classmethod
def build(cls, config: ModelConfig) -> OlmoBlock: | if config.block_type == BlockType.sequential: | 6 | 2023-11-14 02:24:07+00:00 | 12k |
1in-oos/ccplus | caringcaribou/tests/test_module_uds.py | [
{
"identifier": "Constants",
"path": "caringcaribou/utils/iso14229_1.py",
"snippet": "class Constants(object):\n # NR_SI (Negative Response Service Identifier) is a bit special, since\n # it is not a service per se.\n # From ISO-14229-1 specification: \"The NR_SI value is co-ordinated with\n ... | from caringcaribou.utils.iso14229_1 import Constants, Iso14229_1, NegativeResponseCodes, ServiceID, Services
from caringcaribou.tests.mock.mock_ecu_uds import MockEcuIso14229
from caringcaribou.modules import uds
import unittest | 10,619 | from __future__ import print_function
class UdsModuleTestCase(unittest.TestCase):
ARB_ID_REQUEST = 0x300E
ARB_ID_RESPONSE = 0x300F
# Timeout (in seconds) when waiting for response during bruteforce
BRUTEFORCE_TIMEOUT = 0.01
def setUp(self):
# Initialize mock ECU
self.ecu = MockEcuIso14229(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE)
# Remove response delay
self.ecu.DELAY_BEFORE_RESPONSE = 0.0
self.ecu.start_server()
def tearDown(self):
if isinstance(self.ecu, MockEcuIso14229):
self.ecu.__exit__(None, None, None)
def test_uds_discovery(self):
# Discovery arguments
start_arb_id = self.ARB_ID_REQUEST - 5
end_arb_id = self.ARB_ID_REQUEST + 5
blacklist = []
auto_blacklist_duration = 0
timeout = self.BRUTEFORCE_TIMEOUT
verify = True
print_results = False
# Perform UDS discovery
result = uds.uds_discovery(min_id=start_arb_id,
max_id=end_arb_id,
blacklist_args=blacklist,
auto_blacklist_duration=auto_blacklist_duration,
delay=timeout,
verify=verify,
print_results=print_results)
expected_result = [(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE)]
self.assertListEqual(result, expected_result, "UDS discovery gave '{0}', expected '{1}'".format(
result, expected_result))
def test_uds_discovery_blacklist(self):
# Discovery arguments
start_arb_id = self.ARB_ID_REQUEST - 5
end_arb_id = self.ARB_ID_REQUEST + 5
# Blacklist the arbitration ID used for response
blacklist = [self.ARB_ID_RESPONSE]
auto_blacklist_duration = 0
timeout = self.BRUTEFORCE_TIMEOUT
verify = True
print_results = False
# Perform UDS discovery
result = uds.uds_discovery(min_id=start_arb_id,
max_id=end_arb_id,
blacklist_args=blacklist,
auto_blacklist_duration=auto_blacklist_duration,
delay=timeout,
verify=verify,
print_results=print_results)
# No results expected due to blacklist
expected_result = []
self.assertListEqual(result, expected_result, "UDS discovery gave '{0}', expected '{1}'".format(
result, expected_result))
def test_service_discovery(self):
# Service discovery arguments
range_start = 0x09
range_end = 0x13
print_results = False
# Perform service discovery
result = uds.service_discovery(arb_id_request=self.ARB_ID_REQUEST,
arb_id_response=self.ARB_ID_RESPONSE,
timeout=self.BRUTEFORCE_TIMEOUT,
min_id=range_start,
max_id=range_end,
print_results=print_results)
# Supported services within specified range
expected_result = [ServiceID.DIAGNOSTIC_SESSION_CONTROL, ServiceID.ECU_RESET]
self.assertListEqual(result, expected_result, "UDS service discovery gave '{0}', expected '{1}'".format(
result, expected_result))
def test_service_discovery_empty_range(self):
# Service discovery arguments
range_start = 0x00
range_end = 0x05
print_results = False
# Perform service discovery
result = uds.service_discovery(arb_id_request=self.ARB_ID_REQUEST,
arb_id_response=self.ARB_ID_RESPONSE,
timeout=self.BRUTEFORCE_TIMEOUT,
min_id=range_start,
max_id=range_end,
print_results=print_results)
# No services should be found within range
expected_result = []
self.assertListEqual(result, expected_result, "UDS service discovery gave '{0}', expected no hits".format(
result))
def test_ecu_reset_hard_reset_success(self):
# ECU Reset arguments
| from __future__ import print_function
class UdsModuleTestCase(unittest.TestCase):
ARB_ID_REQUEST = 0x300E
ARB_ID_RESPONSE = 0x300F
# Timeout (in seconds) when waiting for response during bruteforce
BRUTEFORCE_TIMEOUT = 0.01
def setUp(self):
# Initialize mock ECU
self.ecu = MockEcuIso14229(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE)
# Remove response delay
self.ecu.DELAY_BEFORE_RESPONSE = 0.0
self.ecu.start_server()
def tearDown(self):
if isinstance(self.ecu, MockEcuIso14229):
self.ecu.__exit__(None, None, None)
def test_uds_discovery(self):
# Discovery arguments
start_arb_id = self.ARB_ID_REQUEST - 5
end_arb_id = self.ARB_ID_REQUEST + 5
blacklist = []
auto_blacklist_duration = 0
timeout = self.BRUTEFORCE_TIMEOUT
verify = True
print_results = False
# Perform UDS discovery
result = uds.uds_discovery(min_id=start_arb_id,
max_id=end_arb_id,
blacklist_args=blacklist,
auto_blacklist_duration=auto_blacklist_duration,
delay=timeout,
verify=verify,
print_results=print_results)
expected_result = [(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE)]
self.assertListEqual(result, expected_result, "UDS discovery gave '{0}', expected '{1}'".format(
result, expected_result))
def test_uds_discovery_blacklist(self):
# Discovery arguments
start_arb_id = self.ARB_ID_REQUEST - 5
end_arb_id = self.ARB_ID_REQUEST + 5
# Blacklist the arbitration ID used for response
blacklist = [self.ARB_ID_RESPONSE]
auto_blacklist_duration = 0
timeout = self.BRUTEFORCE_TIMEOUT
verify = True
print_results = False
# Perform UDS discovery
result = uds.uds_discovery(min_id=start_arb_id,
max_id=end_arb_id,
blacklist_args=blacklist,
auto_blacklist_duration=auto_blacklist_duration,
delay=timeout,
verify=verify,
print_results=print_results)
# No results expected due to blacklist
expected_result = []
self.assertListEqual(result, expected_result, "UDS discovery gave '{0}', expected '{1}'".format(
result, expected_result))
def test_service_discovery(self):
# Service discovery arguments
range_start = 0x09
range_end = 0x13
print_results = False
# Perform service discovery
result = uds.service_discovery(arb_id_request=self.ARB_ID_REQUEST,
arb_id_response=self.ARB_ID_RESPONSE,
timeout=self.BRUTEFORCE_TIMEOUT,
min_id=range_start,
max_id=range_end,
print_results=print_results)
# Supported services within specified range
expected_result = [ServiceID.DIAGNOSTIC_SESSION_CONTROL, ServiceID.ECU_RESET]
self.assertListEqual(result, expected_result, "UDS service discovery gave '{0}', expected '{1}'".format(
result, expected_result))
def test_service_discovery_empty_range(self):
# Service discovery arguments
range_start = 0x00
range_end = 0x05
print_results = False
# Perform service discovery
result = uds.service_discovery(arb_id_request=self.ARB_ID_REQUEST,
arb_id_response=self.ARB_ID_RESPONSE,
timeout=self.BRUTEFORCE_TIMEOUT,
min_id=range_start,
max_id=range_end,
print_results=print_results)
# No services should be found within range
expected_result = []
self.assertListEqual(result, expected_result, "UDS service discovery gave '{0}', expected no hits".format(
result))
def test_ecu_reset_hard_reset_success(self):
# ECU Reset arguments | reset_type = Services.EcuReset.ResetType.HARD_RESET | 4 | 2023-11-13 05:05:46+00:00 | 12k |
L1bra1/WeakMotion | gen_data/gen_weak_data.py | [
{
"identifier": "NuScenes",
"path": "gen_data/nuscenes/nuscenes.py",
"snippet": "class NuScenes:\n \"\"\"\n Database class for nuScenes to help query and retrieve information from the database.\n \"\"\"\n\n def __init__(self,\n version: str = 'v1.0-mini',\n da... | from gen_data.nuscenes.nuscenes import NuScenes
from gen_data.nuscenes.utils.data_classes import LidarPointCloud
from functools import reduce
from gen_data.nuscenes.utils.geometry_utils import view_points, transform_matrix
from pyquaternion import Quaternion
import os
import numpy as np
import argparse | 10,304 |
_, sort_idx = np.unique(all_times, return_index=True)
unique_times = all_times[np.sort(sort_idx)] # Preserve the item order in unique_times
num_sweeps = len(unique_times)
# Make sure we have sufficient past and future sweeps
if num_sweeps != (nsweeps_back + nsweeps_forward):
# Skip some keyframes if necessary
flag = False
for _ in range(num_keyframe_skipped + 1):
if curr_sample['next'] != '':
curr_sample = nusc.get('sample', curr_sample['next'])
else:
flag = True
break
if flag: # No more keyframes
break
else:
curr_sample_data = nusc.get('sample_data', curr_sample['data']['LIDAR_TOP'])
# Reset
adj_seq_cnt = 0
save_weak_dict_list = list()
continue
if adj_seq_cnt == 0:
save_weak_dict = dict()
lidar_curr_sample = curr_sample
key_timestamps = np.zeros(3)
lidar_sd_token_data = nusc.get('sample_data', lidar_curr_sample['data']['LIDAR_TOP'])
_, ref_from_car, car_from_global, ref_time = get_pc_pose(lidar_sd_token_data, inverse=True)
lidar_curr_sample = nusc.get('sample', lidar_curr_sample['prev'])
# 0 past (-0.5s); 1 current (0s); 2 future (+0.5s)
for key_frame_index in range(3):
lidar_sd_token_data = nusc.get('sample_data', lidar_curr_sample['data']['LIDAR_TOP'])
lidar_sd_token = lidar_sd_token_data['token']
save_weak_dict['token_' + str(key_frame_index)] = lidar_sd_token
current_pc, car_from_current, global_from_car, timestamp = get_pc_pose(lidar_sd_token_data, inverse=False)
trans_matrix = reduce(np.dot, [ref_from_car, car_from_global, global_from_car, car_from_current])
current_pc[:3, :] = trans_matrix.dot(np.vstack((current_pc[:3, :], np.ones(current_pc.shape[1]))))[:3, :]
save_weak_dict['synchronized_pc_' + str(key_frame_index)] = current_pc[:3, :]
lidarseg_labels_filename = os.path.join(nusc.dataroot, nusc.get('lidarseg', lidar_sd_token)['filename'])
points_label = np.fromfile(lidarseg_labels_filename, dtype=np.uint8).reshape([-1, 1])
key_timestamps[key_frame_index] = 1e-6 * lidar_sd_token_data['timestamp']
save_weak_dict['points_label_' + str(key_frame_index)] = points_label
sample_idx, pc_random_index_dict = gen_random_index_for_pc(current_pc, lidar_sd_token, pc_random_index_dict)
save_weak_dict['sample_idx_' + str(key_frame_index)] = sample_idx
if key_frame_index != 2:
lidar_curr_sample = nusc.get('sample', lidar_curr_sample['next'])
save_weak_dict['key_timestamp'] = key_timestamps
save_weak_dict_list.append(save_weak_dict)
adj_seq_cnt += 1
if adj_seq_cnt == num_adj_seqs:
for seq_idx, seq_weak_dict in enumerate(save_weak_dict_list):
# save the data
save_directory = check_folder(os.path.join(args.savepath, str(scene_idx) + '_' + str(save_seq_cnt)))
save_file_name = os.path.join(save_directory, str(seq_idx) + '.npy')
np.save(save_file_name, arr=seq_weak_dict)
print(" >> {} - {} Finish sample: {}, sequence {}".format(seq_weak_dict['key_timestamp'][0], seq_weak_dict['key_timestamp'][1], save_seq_cnt, seq_idx))
save_seq_cnt += 1
adj_seq_cnt = 0
save_weak_dict_list = list()
# Skip some keyframes if necessary
flag = False
for _ in range(num_keyframe_skipped + 1):
if curr_sample['next'] != '':
curr_sample = nusc.get('sample', curr_sample['next'])
else:
flag = True
break
if flag: # No more keyframes
break
else:
curr_sample_data = nusc.get('sample_data', curr_sample['data']['LIDAR_TOP'])
else:
flag = False
for _ in range(skip_frame + 1):
if curr_sample_data['next'] != '':
curr_sample_data = nusc.get('sample_data', curr_sample_data['next'])
else:
flag = True
break
if flag: # No more sample frames
break
save_file_name = os.path.join(sample_info_directory, str(scene_idx) + '_sample_info.npy')
np.save(save_file_name, arr=pc_random_index_dict)
def get_pc_pose(ref_sd_rec, inverse = True):
# Get reference pose and timestamp
ref_pose_rec = nusc.get('ego_pose', ref_sd_rec['ego_pose_token'])
ref_cs_rec = nusc.get('calibrated_sensor', ref_sd_rec['calibrated_sensor_token'])
ref_time = 1e-6 * ref_sd_rec['timestamp']
# Homogeneous transform from ego car frame to reference frame
| """
This code is to generate Foreground/Background information for the training set of nuScenes data.
And the code is modified based on 'gen_data.py' in MotionNet(https://www.merl.com/research/?research=license-request&sw=MotionNet)
"""
def check_folder(folder_name):
if not os.path.exists(folder_name):
os.mkdir(folder_name)
return folder_name
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--root', default='/path_to/nuScenes/nuScenes-data/', type=str, help='Root path to nuScenes dataset')
parser.add_argument('-s', '--split', default='train', type=str, help='The data split [train/val]')
parser.add_argument('-p', '--savepath', default='/path_to/nuScenes/weak-data/', type=str, help='Directory for saving the generated data')
args = parser.parse_args()
nusc = NuScenes(version='v1.0-trainval', dataroot=args.root, verbose=True)
print("Total number of scenes:", len(nusc.scene))
class_map = {'vehicle.car': 1, 'vehicle.bus.rigid': 1, 'vehicle.bus.bendy': 1, 'human.pedestrian': 2,
'vehicle.bicycle': 3} # background: 0, other: 4
if args.split == 'train':
num_keyframe_skipped = 0 # The number of keyframes we will skip when dumping the data
nsweeps_back = 30 # Number of frames back to the history (including the current timestamp)
nsweeps_forward = 20 # Number of frames into the future (does not include the current timestamp)
skip_frame = 0 # The number of frames skipped for the adjacent sequence
num_adj_seqs = 2 # number of adjacent sequences, among which the time gap is \delta t
else:
num_keyframe_skipped = 1
nsweeps_back = 25 # Setting this to 30 (for training) or 25 (for testing) allows conducting ablation studies on frame numbers
nsweeps_forward = 20
skip_frame = 0
num_adj_seqs = 1
# The specifications for BEV maps
voxel_size = (0.25, 0.25, 0.4)
area_extents = np.array([[-32., 32.], [-32., 32.], [-3., 2.]])
past_frame_skip = 3 # when generating the BEV maps, how many history frames need to be skipped
future_frame_skip = 0 # when generating the BEV maps, how many future frames need to be skipped
num_past_frames_for_bev_seq = 5 # the number of past frames for BEV map sequence
scenes = np.load('split.npy', allow_pickle=True).item().get(args.split)
print("Split: {}, which contains {} scenes.".format(args.split, len(scenes)))
args.savepath = check_folder(args.savepath)
sample_info_directory = check_folder(os.path.join(args.savepath, args.split + '_sample_info'))
args.savepath = check_folder(os.path.join(args.savepath, args.split))
def gen_data():
res_scenes = list()
for s in scenes:
s_id = s.split('_')[1]
res_scenes.append(int(s_id))
for scene_idx in res_scenes:
curr_scene = nusc.scene[scene_idx]
first_sample_token = curr_scene['first_sample_token']
curr_sample = nusc.get('sample', first_sample_token)
curr_sample_data = nusc.get('sample_data', curr_sample['data']['LIDAR_TOP'])
adj_seq_cnt = 0
save_seq_cnt = 0 # only used for save data file name
save_weak_dict_list = list()
pc_random_index_dict = dict()
# Iterate each sample data
print("Processing scene {} ...".format(scene_idx))
while curr_sample_data['next'] != '':
all_times = \
LidarPointCloud.from_file_multisweep_bf_sample_data_return_times(nusc, curr_sample_data,
nsweeps_back=nsweeps_back,
nsweeps_forward=nsweeps_forward)
_, sort_idx = np.unique(all_times, return_index=True)
unique_times = all_times[np.sort(sort_idx)] # Preserve the item order in unique_times
num_sweeps = len(unique_times)
# Make sure we have sufficient past and future sweeps
if num_sweeps != (nsweeps_back + nsweeps_forward):
# Skip some keyframes if necessary
flag = False
for _ in range(num_keyframe_skipped + 1):
if curr_sample['next'] != '':
curr_sample = nusc.get('sample', curr_sample['next'])
else:
flag = True
break
if flag: # No more keyframes
break
else:
curr_sample_data = nusc.get('sample_data', curr_sample['data']['LIDAR_TOP'])
# Reset
adj_seq_cnt = 0
save_weak_dict_list = list()
continue
if adj_seq_cnt == 0:
save_weak_dict = dict()
lidar_curr_sample = curr_sample
key_timestamps = np.zeros(3)
lidar_sd_token_data = nusc.get('sample_data', lidar_curr_sample['data']['LIDAR_TOP'])
_, ref_from_car, car_from_global, ref_time = get_pc_pose(lidar_sd_token_data, inverse=True)
lidar_curr_sample = nusc.get('sample', lidar_curr_sample['prev'])
# 0 past (-0.5s); 1 current (0s); 2 future (+0.5s)
for key_frame_index in range(3):
lidar_sd_token_data = nusc.get('sample_data', lidar_curr_sample['data']['LIDAR_TOP'])
lidar_sd_token = lidar_sd_token_data['token']
save_weak_dict['token_' + str(key_frame_index)] = lidar_sd_token
current_pc, car_from_current, global_from_car, timestamp = get_pc_pose(lidar_sd_token_data, inverse=False)
trans_matrix = reduce(np.dot, [ref_from_car, car_from_global, global_from_car, car_from_current])
current_pc[:3, :] = trans_matrix.dot(np.vstack((current_pc[:3, :], np.ones(current_pc.shape[1]))))[:3, :]
save_weak_dict['synchronized_pc_' + str(key_frame_index)] = current_pc[:3, :]
lidarseg_labels_filename = os.path.join(nusc.dataroot, nusc.get('lidarseg', lidar_sd_token)['filename'])
points_label = np.fromfile(lidarseg_labels_filename, dtype=np.uint8).reshape([-1, 1])
key_timestamps[key_frame_index] = 1e-6 * lidar_sd_token_data['timestamp']
save_weak_dict['points_label_' + str(key_frame_index)] = points_label
sample_idx, pc_random_index_dict = gen_random_index_for_pc(current_pc, lidar_sd_token, pc_random_index_dict)
save_weak_dict['sample_idx_' + str(key_frame_index)] = sample_idx
if key_frame_index != 2:
lidar_curr_sample = nusc.get('sample', lidar_curr_sample['next'])
save_weak_dict['key_timestamp'] = key_timestamps
save_weak_dict_list.append(save_weak_dict)
adj_seq_cnt += 1
if adj_seq_cnt == num_adj_seqs:
for seq_idx, seq_weak_dict in enumerate(save_weak_dict_list):
# save the data
save_directory = check_folder(os.path.join(args.savepath, str(scene_idx) + '_' + str(save_seq_cnt)))
save_file_name = os.path.join(save_directory, str(seq_idx) + '.npy')
np.save(save_file_name, arr=seq_weak_dict)
print(" >> {} - {} Finish sample: {}, sequence {}".format(seq_weak_dict['key_timestamp'][0], seq_weak_dict['key_timestamp'][1], save_seq_cnt, seq_idx))
save_seq_cnt += 1
adj_seq_cnt = 0
save_weak_dict_list = list()
# Skip some keyframes if necessary
flag = False
for _ in range(num_keyframe_skipped + 1):
if curr_sample['next'] != '':
curr_sample = nusc.get('sample', curr_sample['next'])
else:
flag = True
break
if flag: # No more keyframes
break
else:
curr_sample_data = nusc.get('sample_data', curr_sample['data']['LIDAR_TOP'])
else:
flag = False
for _ in range(skip_frame + 1):
if curr_sample_data['next'] != '':
curr_sample_data = nusc.get('sample_data', curr_sample_data['next'])
else:
flag = True
break
if flag: # No more sample frames
break
save_file_name = os.path.join(sample_info_directory, str(scene_idx) + '_sample_info.npy')
np.save(save_file_name, arr=pc_random_index_dict)
def get_pc_pose(ref_sd_rec, inverse = True):
# Get reference pose and timestamp
ref_pose_rec = nusc.get('ego_pose', ref_sd_rec['ego_pose_token'])
ref_cs_rec = nusc.get('calibrated_sensor', ref_sd_rec['calibrated_sensor_token'])
ref_time = 1e-6 * ref_sd_rec['timestamp']
# Homogeneous transform from ego car frame to reference frame | ref_from_car = transform_matrix(ref_cs_rec['translation'], Quaternion(ref_cs_rec['rotation']), | 3 | 2023-11-12 07:03:29+00:00 | 12k |
c3exchange/c3-smartcontracts-v1 | contracts_unified/core/methods/pool_move.py | [
{
"identifier": "health_check",
"path": "contracts_unified/core/internal/health_check.py",
"snippet": "@ABIReturnSubroutine\ndef health_check(\n account: AccountAddress,\n use_maint: abi.Bool,\n *,\n output: ExcessMargin,\n) -> Expr:\n \"\"\"Calculates the user's health\"\"\"\n\n count... | from typing import cast
from pyteal import (
ABIReturnSubroutine,
Assert,
Expr,
If,
Int,
Not,
Or,
Seq,
WideRatio,
abi,
)
from contracts_unified.core.internal.health_check import health_check
from contracts_unified.core.internal.move import signed_add_to_cash
from contracts_unified.core.internal.perform_pool_move import perform_pool_move
from contracts_unified.core.internal.setup import setup
from contracts_unified.core.internal.validate_sender import sender_is_sig_validator
from contracts_unified.core.state_handler.local_handler import LocalStateHandler
from contracts_unified.library.c3types import (
AccountAddress,
Amount,
ExcessMargin,
InstrumentId,
Price,
SignedAmount,
UserInstrumentData,
)
from contracts_unified.library.c3types_user import (
DelegationChain,
OperationId,
OperationMetaData,
PoolMoveData,
)
from contracts_unified.library.constants import PRICECASTER_RESCALE_FACTOR
from contracts_unified.library.pricecaster import get_normalized_price
from contracts_unified.library.signed_math import (
signed_add,
signed_gte,
signed_ltz,
signed_neg,
) | 7,297 | """
Implements Core contract method for transferring user's instruments to/from a pool.
"""
@ABIReturnSubroutine
def pool_move(
account: AccountAddress,
user_op: OperationMetaData,
_delegation_chain: DelegationChain,
_server_data: abi.DynamicBytes,
opup_budget: Amount,
) -> Expr:
"""Transfers instruments from user's address to the pool
Arguments:
account (AccountAddress): User's account address.
user_op (OperationMetaData): Operation metadata containing a basket of instruments.
_delegation_chain (DelegationChain): Delegation chain. Unused.
_server_data (abi.DynamicBytes): Server data. Unused.
opup_budget (Amount): Additional computation budget to allocate to this transaction.
"""
abi_false = abi.Bool()
user_old_health = ExcessMargin()
user_health = ExcessMargin()
data = PoolMoveData()
instrument = InstrumentId()
amount = SignedAmount()
user_data = UserInstrumentData()
price = Price()
cash = Amount()
neg_cash = SignedAmount()
return Seq(
setup(opup_budget.get()),
# Load constants
abi_false.set(Int(0)),
# Validate sender is a user proxy
cast(Expr, sender_is_sig_validator()),
# Get basket from user_op.data
user_op.operation.use(lambda op_data:
Seq(
data.decode(op_data.get()),
data.operation.use(lambda op: Assert(op.get() == OperationId.PoolMove)),
instrument.set(data.instrument),
amount.set(data.amount),
)
),
# Get old health
user_old_health.set(cast(abi.ReturnedValue, health_check(account, abi_false))),
# Move funds
cast(Expr, perform_pool_move(account, instrument, amount)),
# When there is a negative movement, we need to check that the user can support itself without netting
If(signed_ltz(amount.get())).Then(
# Get instrument price
price.set(cast(abi.ReturnedValue, get_normalized_price(instrument))),
# Extract user cash
user_data.set(cast(abi.ReturnedValue, LocalStateHandler.get_position(account, instrument))),
cash.set(user_data.cash),
| """
Implements Core contract method for transferring user's instruments to/from a pool.
"""
@ABIReturnSubroutine
def pool_move(
account: AccountAddress,
user_op: OperationMetaData,
_delegation_chain: DelegationChain,
_server_data: abi.DynamicBytes,
opup_budget: Amount,
) -> Expr:
"""Transfers instruments from user's address to the pool
Arguments:
account (AccountAddress): User's account address.
user_op (OperationMetaData): Operation metadata containing a basket of instruments.
_delegation_chain (DelegationChain): Delegation chain. Unused.
_server_data (abi.DynamicBytes): Server data. Unused.
opup_budget (Amount): Additional computation budget to allocate to this transaction.
"""
abi_false = abi.Bool()
user_old_health = ExcessMargin()
user_health = ExcessMargin()
data = PoolMoveData()
instrument = InstrumentId()
amount = SignedAmount()
user_data = UserInstrumentData()
price = Price()
cash = Amount()
neg_cash = SignedAmount()
return Seq(
setup(opup_budget.get()),
# Load constants
abi_false.set(Int(0)),
# Validate sender is a user proxy
cast(Expr, sender_is_sig_validator()),
# Get basket from user_op.data
user_op.operation.use(lambda op_data:
Seq(
data.decode(op_data.get()),
data.operation.use(lambda op: Assert(op.get() == OperationId.PoolMove)),
instrument.set(data.instrument),
amount.set(data.amount),
)
),
# Get old health
user_old_health.set(cast(abi.ReturnedValue, health_check(account, abi_false))),
# Move funds
cast(Expr, perform_pool_move(account, instrument, amount)),
# When there is a negative movement, we need to check that the user can support itself without netting
If(signed_ltz(amount.get())).Then(
# Get instrument price
price.set(cast(abi.ReturnedValue, get_normalized_price(instrument))),
# Extract user cash
user_data.set(cast(abi.ReturnedValue, LocalStateHandler.get_position(account, instrument))),
cash.set(user_data.cash), | neg_cash.set(signed_neg(cash.get())), | 13 | 2023-11-17 20:54:15+00:00 | 12k |
gunderson-dettmer/CE2OCF | CE2OCF/ocf/generators/vesting_enums_to_ocf.py | [
{
"identifier": "load_cic_event_definition",
"path": "CE2OCF/datamap/loaders.py",
"snippet": "def load_cic_event_definition(source_json: Path = DEFAULT_CIC_DEFS_PATH) -> CicEventDefinition:\n with source_json.open(\"r\") as config_file:\n return json.loads(config_file.read())"
},
{
"id... | from CE2OCF.datamap.loaders import (
load_cic_event_definition,
load_double_trigger_definitions,
load_single_trigger_definitions,
)
from CE2OCF.ocf.generators.ocf_id_generators import (
generate_accel_trigger_termination_event_id,
generate_cic_event_id,
generate_time_based_accel_expiration_event_id,
generate_vesting_start_id,
)
from CE2OCF.ocf.generators.ocf_vesting_conditions import (
generate_cliff_vesting_condition_id,
generate_event_based_vesting_condition,
generate_monthly_vesting_condition_id,
generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration,
generate_vesting_condition_relative_time_based,
generate_vesting_start_condition,
)
from CE2OCF.ocf.generators.ocf_vesting_events import (
cic_event_generator,
generate_change_in_control_event,
generate_vesting_termination_event,
)
from CE2OCF.types.dictionaries import (
CicEventDefinition,
TerminationDetails,
)
from CE2OCF.types.enums import (
DoubleTriggerTypesEnum,
OcfPeriodTypeEnum,
SingleTriggerTypesEnum,
VestingTypesEnum,
)
from CE2OCF.utils.log_utils import logger | 9,004 | end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=6,
ocf_event_generator=generate_vesting_termination_event,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.TWELVE_MONTHS_ALL_TIMES:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=12,
ocf_event_generator=cic_event_generator,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.TWENTY_FOUR_MONTHS_ALL_TIMES:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=24,
ocf_event_generator=cic_event_generator,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.SIX_MONTHS_ALL_TIMES:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=6,
ocf_event_generator=cic_event_generator,
)
condition_ocf_objs.extend(vest_cond_objs)
else:
logger.debug("WARNING - Unexpected combination of acceleration and vesting...")
elif vesting_schedule_type == VestingTypesEnum.FULLY_VESTED:
# shouldn't be vesting conditions
pass
else:
logger.debug("WARNING - Unexpected combination of acceleration and vesting...")
pass
return start_condition_id, condition_ocf_objs
def generate_double_trigger_conditions_from_enumerations(
double_trigger_type: DoubleTriggerTypesEnum,
vesting_schedule_id: str,
cic_event_definition: CicEventDefinition | None = None,
double_trigger_termination_details: dict[str, TerminationDetails | None] | None = None,
) -> list[dict]:
if cic_event_definition is None:
cic_event_definition = load_cic_event_definition()
if double_trigger_termination_details is None:
double_trigger_termination_details = load_double_trigger_definitions()
condition_ocf_objs: list[dict] = []
if double_trigger_type not in double_trigger_termination_details:
raise ValueError(
f"Provided double trigger value ({double_trigger_type}) not supported in "
f"double_trigger_termination_details mapping object "
)
details = double_trigger_termination_details[double_trigger_type]
# If mapping table maps to None don't generate anything...
if details is None:
return condition_ocf_objs
cic_event_id = generate_cic_event_id(vesting_schedule_id, "Double")
time_based_expiration_details = details["time_based_expiration_details"]
time_based_expiration_event_id = (
None
if time_based_expiration_details is None
else generate_time_based_accel_expiration_event_id(vesting_schedule_id, "Double")
)
termination_event_details = details["termination_event_details"]
termination_event_id = generate_accel_trigger_termination_event_id(vesting_schedule_id, "Double")
# generate the cic event first and set next_condition_ids to include the expiration event
# if applicable, otherwise, just the termination event
condition_ocf_objs.append(
generate_change_in_control_event(
vesting_schedule_id=vesting_schedule_id,
cic_event_definition=cic_event_definition,
next_condition_ids=[
*([time_based_expiration_event_id] if time_based_expiration_event_id is not None else []),
termination_event_id,
]
if time_based_expiration_details is not None
else [termination_event_id],
)
)
# If there is a time-based expiration
if time_based_expiration_details is not None:
condition_ocf_objs.append(
| from __future__ import annotations
def generate_single_trigger_conditions_from_enumerations(
single_trigger_type: SingleTriggerTypesEnum | str,
vesting_schedule_type: VestingTypesEnum | str,
vesting_schedule_id: str,
single_trigger_termination_details: dict[str, CicEventDefinition | None] | None = None,
) -> tuple[str, list[dict]]:
"""
Generates required single trigger vesting conditions from our enums.
:param single_trigger_type:
:param vesting_schedule_type:
:param vesting_schedule_id:
:PARAM termination_details:
:return: A tuple - element 0 is start condition of id of the generated schedule. Element 1 is the actual list of
ocf objs.
Args:
termination_details:
"""
if single_trigger_termination_details is None:
single_trigger_termination_details = load_single_trigger_definitions()
if isinstance(single_trigger_type, str):
single_trigger_type = SingleTriggerTypesEnum(single_trigger_type)
if isinstance(vesting_schedule_type, str):
vesting_schedule_type = VestingTypesEnum(vesting_schedule_type)
condition_ocf_objs = []
start_condition_id = ""
if vesting_schedule_type == VestingTypesEnum.CUSTOM:
raise ValueError("Custom vesting schedule with single trigger acceleration not implemented")
if single_trigger_type == SingleTriggerTypesEnum.CUSTOM:
raise ValueError("Custom single trigger acceleration not implemented")
single_trigger_vals = single_trigger_termination_details[single_trigger_type]
assert single_trigger_vals is not None
if single_trigger_type == SingleTriggerTypesEnum.ONE_HUNDRED_PERCENT_INVOLUNTARY_TERMINATION:
logger.debug(
f"INFO - vesting_schedule_type arg {vesting_schedule_type} has no effect for {single_trigger_type} accel"
)
start_condition_id = generate_accel_trigger_termination_event_id(vesting_schedule_id, "Single")
condition_ocf_objs.append(
generate_event_based_vesting_condition(
condition_id=start_condition_id,
**single_trigger_vals,
)
)
elif single_trigger_type == SingleTriggerTypesEnum.ONE_HUNDRED_PERCENT_ALL_TIMES:
logger.debug(
f"INFO - vesting_schedule_type arg {vesting_schedule_type} has no effect for {single_trigger_type} accel"
)
start_condition_id = generate_cic_event_id(vesting_schedule_id, "Single")
condition_ocf_objs.append(
generate_event_based_vesting_condition(
condition_id=start_condition_id,
**single_trigger_vals,
)
)
else:
# for acceleration where you get credited extra months of vesting... the resulting output
# looks very different for a pure monthly schedule vs a schedule with a cliff.
if vesting_schedule_type == VestingTypesEnum.FOUR_YR_NO_CLIFF:
# These are CiC-based triggers
if single_trigger_type == SingleTriggerTypesEnum.TWENTY_FOUR_MONTHS_ALL_TIMES:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=0,
months_of_vest_credit_on_trigger=24,
ocf_event_generator=cic_event_generator,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.TWELVE_MONTHS_ALL_TIMES:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=0,
months_of_vest_credit_on_trigger=12,
ocf_event_generator=cic_event_generator,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.SIX_MONTHS_ALL_TIMES:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=0,
months_of_vest_credit_on_trigger=6,
ocf_event_generator=cic_event_generator,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.SIX_MONTHS_INVOLUNTARY_TERMINATION:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=0,
months_of_vest_credit_on_trigger=6,
ocf_event_generator=generate_vesting_termination_event,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.TWELVE_MONTHS_INVOLUNTARY_TERMINATION:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=0,
months_of_vest_credit_on_trigger=12,
ocf_event_generator=generate_vesting_termination_event,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.TWENTY_FOUR_MONTHS_INVOLUNTARY_TERMINATION:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=0,
months_of_vest_credit_on_trigger=24,
ocf_event_generator=generate_vesting_termination_event,
)
condition_ocf_objs.extend(vest_cond_objs)
else:
logger.debug("WARNING - Unexpected combination of acceleration and vesting...")
elif vesting_schedule_type == VestingTypesEnum.FOUR_YR_1_YR_CLIFF:
# Since these are OVER the cliff, we can just add 12/48 or 24/48 portion
if single_trigger_type == SingleTriggerTypesEnum.TWENTY_FOUR_MONTHS_INVOLUNTARY_TERMINATION:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=24,
ocf_event_generator=generate_vesting_termination_event,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.TWELVE_MONTHS_INVOLUNTARY_TERMINATION:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=12,
ocf_event_generator=generate_vesting_termination_event,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.SIX_MONTHS_INVOLUNTARY_TERMINATION:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=6,
ocf_event_generator=generate_vesting_termination_event,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.TWELVE_MONTHS_ALL_TIMES:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=12,
ocf_event_generator=cic_event_generator,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.TWENTY_FOUR_MONTHS_ALL_TIMES:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=24,
ocf_event_generator=cic_event_generator,
)
condition_ocf_objs.extend(vest_cond_objs)
elif single_trigger_type == SingleTriggerTypesEnum.SIX_MONTHS_ALL_TIMES:
(
start_condition_id,
vest_cond_objs,
) = generate_time_based_ocf_vesting_conditions_with_time_served_credit_acceleration(
generate_vesting_start_id(vesting_schedule_id),
end_month=48,
cliff_month=12,
months_of_vest_credit_on_trigger=6,
ocf_event_generator=cic_event_generator,
)
condition_ocf_objs.extend(vest_cond_objs)
else:
logger.debug("WARNING - Unexpected combination of acceleration and vesting...")
elif vesting_schedule_type == VestingTypesEnum.FULLY_VESTED:
# shouldn't be vesting conditions
pass
else:
logger.debug("WARNING - Unexpected combination of acceleration and vesting...")
pass
return start_condition_id, condition_ocf_objs
def generate_double_trigger_conditions_from_enumerations(
double_trigger_type: DoubleTriggerTypesEnum,
vesting_schedule_id: str,
cic_event_definition: CicEventDefinition | None = None,
double_trigger_termination_details: dict[str, TerminationDetails | None] | None = None,
) -> list[dict]:
if cic_event_definition is None:
cic_event_definition = load_cic_event_definition()
if double_trigger_termination_details is None:
double_trigger_termination_details = load_double_trigger_definitions()
condition_ocf_objs: list[dict] = []
if double_trigger_type not in double_trigger_termination_details:
raise ValueError(
f"Provided double trigger value ({double_trigger_type}) not supported in "
f"double_trigger_termination_details mapping object "
)
details = double_trigger_termination_details[double_trigger_type]
# If mapping table maps to None don't generate anything...
if details is None:
return condition_ocf_objs
cic_event_id = generate_cic_event_id(vesting_schedule_id, "Double")
time_based_expiration_details = details["time_based_expiration_details"]
time_based_expiration_event_id = (
None
if time_based_expiration_details is None
else generate_time_based_accel_expiration_event_id(vesting_schedule_id, "Double")
)
termination_event_details = details["termination_event_details"]
termination_event_id = generate_accel_trigger_termination_event_id(vesting_schedule_id, "Double")
# generate the cic event first and set next_condition_ids to include the expiration event
# if applicable, otherwise, just the termination event
condition_ocf_objs.append(
generate_change_in_control_event(
vesting_schedule_id=vesting_schedule_id,
cic_event_definition=cic_event_definition,
next_condition_ids=[
*([time_based_expiration_event_id] if time_based_expiration_event_id is not None else []),
termination_event_id,
]
if time_based_expiration_details is not None
else [termination_event_id],
)
)
# If there is a time-based expiration
if time_based_expiration_details is not None:
condition_ocf_objs.append( | generate_vesting_condition_relative_time_based( | 11 | 2023-11-13 15:50:53+00:00 | 12k |
cyberark/ark-sdk-python | ark_sdk_python/models/actions/services/ark_dpa_exec_action_consts.py | [
{
"identifier": "ArkModel",
"path": "ark_sdk_python/models/ark_model.py",
"snippet": "class ArkModel(BaseModel):\n class Config:\n allow_population_by_field_name = True"
},
{
"identifier": "ArkServiceActionDefinition",
"path": "ark_sdk_python/models/actions/ark_service_action_defin... | from typing import Dict, Final, Optional, Type
from ark_sdk_python.models import ArkModel
from ark_sdk_python.models.actions.ark_service_action_definition import ArkServiceActionDefinition
from ark_sdk_python.models.cli_services.dpa.policies_editor.common import (
ArkDPACommitPolicies,
ArkDPAEditPolicies,
ArkDPAGetPoliciesStatus,
ArkDPALoadPolicies,
ArkDPAPoliciesDiff,
ArkDPARemovePolicies,
ArkDPAResetPolicies,
ArkDPAViewPolicies,
)
from ark_sdk_python.models.cli_services.dpa.policies_editor.db import ArkDPADBGeneratePolicy
from ark_sdk_python.models.cli_services.dpa.policies_editor.vm import ArkDPAVMGeneratePolicy
from ark_sdk_python.models.services.dpa.certificates import (
ArkDPACertificatesFilter,
ArkDPACreateCertificate,
ArkDPADeleteCertificate,
ArkDPAGetCertificate,
ArkDPAUpdateCertificate,
)
from ark_sdk_python.models.services.dpa.db import ArkDPADBMysqlExecution, ArkDPADBOracleGenerateAssets, ArkDPADBPsqlExecution
from ark_sdk_python.models.services.dpa.k8s.ark_dpa_k8s_generate_kubeconfig import ArkDPAK8SGenerateKubeConfig
from ark_sdk_python.models.services.dpa.policies.common import ArkDPADeletePolicy, ArkDPAGetPolicy, ArkDPAUpdatePolicyStatus
from ark_sdk_python.models.services.dpa.policies.db import ArkDPADBAddPolicy, ArkDPADBPoliciesFilter, ArkDPADBUpdatePolicy
from ark_sdk_python.models.services.dpa.policies.vm import ArkDPAVMAddPolicy, ArkDPAVMPoliciesFilter, ArkDPAVMUpdatePolicy
from ark_sdk_python.models.services.dpa.secrets.db import (
ArkDPADBAddSecret,
ArkDPADBDeleteSecret,
ArkDPADBDisableSecret,
ArkDPADBEnableSecret,
ArkDPADBGetSecret,
ArkDPADBSecretsFilter,
ArkDPADBUpdateSecret,
)
from ark_sdk_python.models.services.dpa.sso import (
ArkDPASSOGetShortLivedClientCertificate,
ArkDPASSOGetShortLivedOracleWallet,
ArkDPASSOGetShortLivedPassword,
ArkDPASSOGetShortLivedRDPFile,
)
from ark_sdk_python.models.services.dpa.workspaces.db import (
ArkDPADBAddDatabase,
ArkDPADBDatabasesFilter,
ArkDPADBDeleteDatabase,
ArkDPADBGetDatabase,
ArkDPADBUpdateDatabase,
) | 7,574 |
WORKSPACES_DB_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = {
'add-database': ArkDPADBAddDatabase,
'delete-database': ArkDPADBDeleteDatabase,
'update-database': ArkDPADBUpdateDatabase,
'list-databases': None,
'list-databases-by': ArkDPADBDatabasesFilter,
'database': ArkDPADBGetDatabase,
'databases-stats': None,
}
WORKSPACES_DB_ACTION: Final[ArkServiceActionDefinition] = ArkServiceActionDefinition(
action_name='db', schemas=WORKSPACES_DB_ACTION_TO_SCHEMA_MAP
)
WORKSPACES_ACTION: Final[ArkServiceActionDefinition] = ArkServiceActionDefinition(
action_name='workspaces', subactions=[WORKSPACES_DB_ACTION]
)
POLICIES_VM_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = {
'add-policy': ArkDPAVMAddPolicy,
'delete-policy': ArkDPADeletePolicy,
'update-policy': ArkDPAVMUpdatePolicy,
'update-policy-status': ArkDPAUpdatePolicyStatus,
'policy': ArkDPAGetPolicy,
'list-policies': None,
'list-policies-by': ArkDPAVMPoliciesFilter,
'policies-stats': None,
}
POLICIES_VM_EDITOR_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = {
'load-policies': ArkDPALoadPolicies,
'generate-policy': ArkDPAVMGeneratePolicy,
'edit-policies': ArkDPAEditPolicies,
'remove-policies': ArkDPARemovePolicies,
'view-policies': ArkDPAViewPolicies,
'reset-policies': ArkDPAResetPolicies,
'policies-diff': ArkDPAPoliciesDiff,
'policies-status': ArkDPAGetPoliciesStatus,
'commit-policies': ArkDPACommitPolicies,
}
POLICIES_VM_ACTION: Final[ArkServiceActionDefinition] = ArkServiceActionDefinition(
action_name='vm',
schemas=POLICIES_VM_ACTION_TO_SCHEMA_MAP,
subactions=[ArkServiceActionDefinition(action_name='editor', schemas=POLICIES_VM_EDITOR_ACTION_TO_SCHEMA_MAP)],
)
POLICIES_DB_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = {
'add-policy': ArkDPADBAddPolicy,
'delete-policy': ArkDPADeletePolicy,
'update-policy': ArkDPADBUpdatePolicy,
'update-policy-status': ArkDPAUpdatePolicyStatus,
'policy': ArkDPAGetPolicy,
'list-policies': None,
|
WORKSPACES_DB_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = {
'add-database': ArkDPADBAddDatabase,
'delete-database': ArkDPADBDeleteDatabase,
'update-database': ArkDPADBUpdateDatabase,
'list-databases': None,
'list-databases-by': ArkDPADBDatabasesFilter,
'database': ArkDPADBGetDatabase,
'databases-stats': None,
}
WORKSPACES_DB_ACTION: Final[ArkServiceActionDefinition] = ArkServiceActionDefinition(
action_name='db', schemas=WORKSPACES_DB_ACTION_TO_SCHEMA_MAP
)
WORKSPACES_ACTION: Final[ArkServiceActionDefinition] = ArkServiceActionDefinition(
action_name='workspaces', subactions=[WORKSPACES_DB_ACTION]
)
POLICIES_VM_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = {
'add-policy': ArkDPAVMAddPolicy,
'delete-policy': ArkDPADeletePolicy,
'update-policy': ArkDPAVMUpdatePolicy,
'update-policy-status': ArkDPAUpdatePolicyStatus,
'policy': ArkDPAGetPolicy,
'list-policies': None,
'list-policies-by': ArkDPAVMPoliciesFilter,
'policies-stats': None,
}
POLICIES_VM_EDITOR_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = {
'load-policies': ArkDPALoadPolicies,
'generate-policy': ArkDPAVMGeneratePolicy,
'edit-policies': ArkDPAEditPolicies,
'remove-policies': ArkDPARemovePolicies,
'view-policies': ArkDPAViewPolicies,
'reset-policies': ArkDPAResetPolicies,
'policies-diff': ArkDPAPoliciesDiff,
'policies-status': ArkDPAGetPoliciesStatus,
'commit-policies': ArkDPACommitPolicies,
}
POLICIES_VM_ACTION: Final[ArkServiceActionDefinition] = ArkServiceActionDefinition(
action_name='vm',
schemas=POLICIES_VM_ACTION_TO_SCHEMA_MAP,
subactions=[ArkServiceActionDefinition(action_name='editor', schemas=POLICIES_VM_EDITOR_ACTION_TO_SCHEMA_MAP)],
)
POLICIES_DB_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = {
'add-policy': ArkDPADBAddPolicy,
'delete-policy': ArkDPADeletePolicy,
'update-policy': ArkDPADBUpdatePolicy,
'update-policy-status': ArkDPAUpdatePolicyStatus,
'policy': ArkDPAGetPolicy,
'list-policies': None, | 'list-policies-by': ArkDPADBPoliciesFilter, | 25 | 2023-11-13 09:24:31+00:00 | 12k |
mohenghui/detectAuto_v8 | ultralytics/models/yolo/detect/predict.py | [
{
"identifier": "BasePredictor",
"path": "ultralytics/engine/predictor.py",
"snippet": "class BasePredictor:\n \"\"\"\n BasePredictor.\n\n A base class for creating predictors.\n\n Attributes:\n args (SimpleNamespace): Configuration for the predictor.\n save_dir (Path): Directo... | from ultralytics.engine.predictor import BasePredictor
from ultralytics.engine.results import Results
from ultralytics.utils import ops | 7,711 | # Ultralytics YOLO 🚀, AGPL-3.0 license
class DetectionPredictor(BasePredictor):
"""
A class extending the BasePredictor class for prediction based on a detection model.
Example:
```python
from ultralytics.utils import ASSETS
from ultralytics.models.yolo.detect import DetectionPredictor
args = dict(model='yolov8n.pt', source=ASSETS)
predictor = DetectionPredictor(overrides=args)
predictor.predict_cli()
```
"""
def postprocess(self, preds, img, orig_imgs):
"""Post-processes predictions and returns a list of Results objects."""
| # Ultralytics YOLO 🚀, AGPL-3.0 license
class DetectionPredictor(BasePredictor):
"""
A class extending the BasePredictor class for prediction based on a detection model.
Example:
```python
from ultralytics.utils import ASSETS
from ultralytics.models.yolo.detect import DetectionPredictor
args = dict(model='yolov8n.pt', source=ASSETS)
predictor = DetectionPredictor(overrides=args)
predictor.predict_cli()
```
"""
def postprocess(self, preds, img, orig_imgs):
"""Post-processes predictions and returns a list of Results objects.""" | preds = ops.non_max_suppression(preds, | 2 | 2023-11-16 12:49:59+00:00 | 12k |
i-super/Saleor | saleor/graphql/discount/utils.py | [
{
"identifier": "Promotion",
"path": "saleor/discount/models.py",
"snippet": "class Promotion(ModelWithMetadata):\n id = models.UUIDField(primary_key=True, editable=False, unique=True, default=uuid4)\n name = models.CharField(max_length=255)\n description = SanitizedJSONField(blank=True, null=T... | from collections import defaultdict
from copy import deepcopy
from enum import Enum
from typing import Optional, Union, cast
from django.db.models import Exists, OuterRef, QuerySet
from graphene.utils.str_converters import to_camel_case
from ...discount.models import Promotion, PromotionRule
from ...product.managers import ProductsQueryset, ProductVariantQueryset
from ...product.models import (
Category,
Collection,
CollectionProduct,
Product,
ProductVariant,
)
from ..core.connection import where_filter_qs
from ..product.filters import (
CategoryWhere,
CollectionWhere,
ProductVariantWhere,
ProductWhere,
)
import graphene | 8,459 |
PREDICATE_OPERATOR_DATA_T = list[dict[str, Union[list, dict, str, bool]]]
class Operators(Enum):
AND = "and"
OR = "or"
# TODO: move to validators in promotion dir
def clean_predicate(predicate: Union[dict[str, Union[dict, list]], list]):
"""Convert camel cases keys into snake case."""
if isinstance(predicate, list):
return [
clean_predicate(item) if isinstance(item, (dict, list)) else item
for item in predicate
]
return {
to_camel_case(key): clean_predicate(value)
if isinstance(value, (dict, list))
else value
for key, value in predicate.items()
}
def get_products_for_promotion(promotion: Promotion) -> ProductsQueryset:
"""Get products that are included in the promotion based on catalogue predicate."""
variants = get_variants_for_promotion(promotion)
return Product.objects.filter(Exists(variants.filter(product_id=OuterRef("id"))))
def get_products_for_rule(rule: PromotionRule) -> ProductsQueryset:
"""Get products that are included in the rule based on catalogue predicate."""
variants = get_variants_for_predicate(deepcopy(rule.catalogue_predicate))
return Product.objects.filter(Exists(variants.filter(product_id=OuterRef("id"))))
def get_variants_for_promotion(promotion: Promotion) -> ProductVariantQueryset:
"""Get variants that are included in the promotion based on catalogue predicate."""
queryset = ProductVariant.objects.none()
for rule in promotion.rules.iterator():
queryset |= get_variants_for_predicate(rule.catalogue_predicate)
return queryset
def _handle_product_predicate(
predicate_data: dict[str, Union[dict, list]]
) -> ProductVariantQueryset:
product_qs = where_filter_qs(
Product.objects.all(), {}, ProductWhere, predicate_data, None
)
return ProductVariant.objects.filter(
Exists(product_qs.filter(id=OuterRef("product_id")))
)
def _handle_variant_predicate(
predicate_data: dict[str, Union[dict, list]]
) -> ProductVariantQueryset:
return where_filter_qs(
ProductVariant.objects.all(), {}, ProductVariantWhere, predicate_data, None
)
def _handle_collection_predicate(
predicate_data: dict[str, Union[dict, list]]
) -> ProductVariantQueryset:
collection_qs = where_filter_qs(
|
PREDICATE_OPERATOR_DATA_T = list[dict[str, Union[list, dict, str, bool]]]
class Operators(Enum):
AND = "and"
OR = "or"
# TODO: move to validators in promotion dir
def clean_predicate(predicate: Union[dict[str, Union[dict, list]], list]):
"""Convert camel cases keys into snake case."""
if isinstance(predicate, list):
return [
clean_predicate(item) if isinstance(item, (dict, list)) else item
for item in predicate
]
return {
to_camel_case(key): clean_predicate(value)
if isinstance(value, (dict, list))
else value
for key, value in predicate.items()
}
def get_products_for_promotion(promotion: Promotion) -> ProductsQueryset:
"""Get products that are included in the promotion based on catalogue predicate."""
variants = get_variants_for_promotion(promotion)
return Product.objects.filter(Exists(variants.filter(product_id=OuterRef("id"))))
def get_products_for_rule(rule: PromotionRule) -> ProductsQueryset:
"""Get products that are included in the rule based on catalogue predicate."""
variants = get_variants_for_predicate(deepcopy(rule.catalogue_predicate))
return Product.objects.filter(Exists(variants.filter(product_id=OuterRef("id"))))
def get_variants_for_promotion(promotion: Promotion) -> ProductVariantQueryset:
"""Get variants that are included in the promotion based on catalogue predicate."""
queryset = ProductVariant.objects.none()
for rule in promotion.rules.iterator():
queryset |= get_variants_for_predicate(rule.catalogue_predicate)
return queryset
def _handle_product_predicate(
predicate_data: dict[str, Union[dict, list]]
) -> ProductVariantQueryset:
product_qs = where_filter_qs(
Product.objects.all(), {}, ProductWhere, predicate_data, None
)
return ProductVariant.objects.filter(
Exists(product_qs.filter(id=OuterRef("product_id")))
)
def _handle_variant_predicate(
predicate_data: dict[str, Union[dict, list]]
) -> ProductVariantQueryset:
return where_filter_qs(
ProductVariant.objects.all(), {}, ProductVariantWhere, predicate_data, None
)
def _handle_collection_predicate(
predicate_data: dict[str, Union[dict, list]]
) -> ProductVariantQueryset:
collection_qs = where_filter_qs( | Collection.objects.all(), {}, CollectionWhere, predicate_data, None | 5 | 2023-11-13 05:00:35+00:00 | 12k |
Aues6uen11Z/Zafkiel | zafkiel/ui/ui.py | [
{
"identifier": "ImageTemplate",
"path": "zafkiel/device/template.py",
"snippet": "class ImageTemplate(Template):\n def __init__(\n self,\n filename: str,\n record_pos: tuple = None,\n keyword: Keyword = None,\n threshold: float = None,\n ... | from zafkiel.device.template import ImageTemplate as Template
from zafkiel.logger import logger
from zafkiel.device.api import API
from zafkiel.ocr.ocr import Ocr
from zafkiel.ui.page import Page
from zafkiel.decorator import run_once
from zafkiel.exception import NotRunningError, PageUnknownError, ScriptError
from zafkiel.timer import Timer
from zafkiel.ui.switch import Switch | 9,446 |
class UI(API):
"""
Processing interface related functions.
Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/tasks/base/ui.py
and https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py
"""
# Make ui_current mutable so that it can be shared among subclasses of the UI class.
ui_current: dict = {'page': None}
popup_list: list = []
def ui_switch_appear(self, switch: Switch) -> bool:
"""
Args:
switch:
"""
if self.ui_get_current_page().switch != switch:
return False
for data in switch.state_list:
if self.exists(data['check_button']):
return True
return False
def ui_get_current_state(self, switch: Switch) -> str:
"""
Args:
switch:
Returns:
state name or 'unknown'.
"""
if self.ui_current['page'].switch != switch:
logger.warning(f"{self.ui_current['page']} does not have {switch}")
return 'unknown'
for data in switch.state_list:
if self.exists(data['check_button']):
return data['state']
return 'unknown'
def ui_page_appear(self, page: Page, timeout: float = 0) -> bool or tuple:
"""
Args:
page:
timeout: Seconds to find.
Returns:
If found, return tuple of (x, y), else return False.
"""
return self.exists(page.check_button, timeout)
def ui_get_current_page(self):
"""
Returns:
Page:
Raises:
NotRunningError:
PageUnknownError:
"""
@run_once
def app_check():
if not self.app_is_running():
raise NotRunningError("Game not running")
timeout = Timer(10, count=20).start()
while True:
# End
if timeout.reached():
break
# Known pages
for page in Page.iter_pages():
if page.check_button is None:
continue
if self.ui_page_appear(page=page):
self.ui_current['page'] = page
return page
# Unknown page but able to handle
if self.ui_additional():
timeout.reset()
continue
app_check()
# Unknown page, need manual switching
|
class UI(API):
"""
Processing interface related functions.
Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/tasks/base/ui.py
and https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py
"""
# Make ui_current mutable so that it can be shared among subclasses of the UI class.
ui_current: dict = {'page': None}
popup_list: list = []
def ui_switch_appear(self, switch: Switch) -> bool:
"""
Args:
switch:
"""
if self.ui_get_current_page().switch != switch:
return False
for data in switch.state_list:
if self.exists(data['check_button']):
return True
return False
def ui_get_current_state(self, switch: Switch) -> str:
"""
Args:
switch:
Returns:
state name or 'unknown'.
"""
if self.ui_current['page'].switch != switch:
logger.warning(f"{self.ui_current['page']} does not have {switch}")
return 'unknown'
for data in switch.state_list:
if self.exists(data['check_button']):
return data['state']
return 'unknown'
def ui_page_appear(self, page: Page, timeout: float = 0) -> bool or tuple:
"""
Args:
page:
timeout: Seconds to find.
Returns:
If found, return tuple of (x, y), else return False.
"""
return self.exists(page.check_button, timeout)
def ui_get_current_page(self):
"""
Returns:
Page:
Raises:
NotRunningError:
PageUnknownError:
"""
@run_once
def app_check():
if not self.app_is_running():
raise NotRunningError("Game not running")
timeout = Timer(10, count=20).start()
while True:
# End
if timeout.reached():
break
# Known pages
for page in Page.iter_pages():
if page.check_button is None:
continue
if self.ui_page_appear(page=page):
self.ui_current['page'] = page
return page
# Unknown page but able to handle
if self.ui_additional():
timeout.reset()
continue
app_check()
# Unknown page, need manual switching | raise PageUnknownError | 7 | 2023-11-12 09:33:35+00:00 | 12k |
medkit-lib/medkit | medkit/io/doccano.py | [
{
"identifier": "Attribute",
"path": "medkit/core/attribute.py",
"snippet": "class Attribute(dict_conv.SubclassMapping):\n \"\"\"\n Medkit attribute, to be added to an annotation\n\n Attributes\n ----------\n label:\n The attribute label\n value:\n The value of the attrib... | import dataclasses
import enum
import json
import logging
import tempfile
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
from zipfile import ZipFile
from typing_extensions import Self
from medkit.core import Attribute, OperationDescription, ProvTracer
from medkit.core.id import generate_deterministic_id, generate_id
from medkit.core.text import Entity, Relation, Span, TextDocument, span_utils
from medkit.io._common import get_anns_by_type | 9,460 | """Convert medkit files to doccano files (.JSONL) for a given task.
For each :class:`~medkit.core.text.TextDocument` a jsonline will be created.
"""
def __init__(
self,
task: DoccanoTask,
anns_labels: Optional[List[str]] = None,
attr_label: Optional[str] = None,
ignore_segments: bool = True,
include_metadata: Optional[bool] = True,
uid: Optional[str] = None,
):
"""
Parameters
----------
task:
The doccano task for the input converter
anns_labels:
Labels of medkit annotations to convert into doccano annotations.
If `None` (default) all the entities or relations will be converted.
Useful for :class:`~.io.DoccanoTask.SEQUENCE_LABELING` or
:class:`~.io.DoccanoTask.RELATION_EXTRACTION` converters.
attr_label:
The label of the medkit attribute that represents the text category.
Useful for :class:`~.io.DoccanoTask.TEXT_CLASSIFICATION` converters.
ignore_segments:
If `True` medkit segments will be ignored. Only entities will be
converted to Doccano entities. If `False` the medkit segments will
be converted to Doccano entities as well.
Useful for :class:`~.io.DoccanoTask.SEQUENCE_LABELING` or
:class:`~.io.DoccanoTask.RELATION_EXTRACTION` converters.
include_metadata:
Whether include medkit metadata in the converted documents
uid:
Identifier of the converter.
"""
if uid is None:
uid = generate_id()
self.uid = uid
self.task = task
self.anns_labels = anns_labels
self.attr_label = attr_label
self.ignore_segments = ignore_segments
self.include_metadata = include_metadata
@property
def description(self) -> OperationDescription:
return OperationDescription(
uid=self.uid,
name=self.__class__.__name__,
class_name=self.__class__.__name__,
config=dict(task=self.task.value),
)
def save(self, docs: List[TextDocument], output_file: Union[str, Path]):
"""Convert and save a list of TextDocuments into a doccano file (.JSONL)
Parameters
----------
docs:
List of medkit doc objects to convert
output_file:
Path or string of the JSONL file where to save the converted documents
"""
output_file = Path(output_file)
with open(output_file, mode="w", encoding="utf-8") as fp:
for medkit_doc in docs:
doc_line = self._convert_doc_by_task(medkit_doc)
fp.write(json.dumps(doc_line, ensure_ascii=False) + "\n")
def _convert_doc_by_task(self, medkit_doc: TextDocument) -> Dict[str, Any]:
"""Convert a TextDocument into a dictionary depending on the task
Parameters
----------
medkit_doc:
Document to convert
Returns
-------
Dict[str,Any]
Dictionary with doccano annotation
"""
if self.task == DoccanoTask.RELATION_EXTRACTION:
return self._convert_doc_relation_extraction(medkit_doc=medkit_doc)
if self.task == DoccanoTask.TEXT_CLASSIFICATION:
return self._convert_doc_text_classification(medkit_doc=medkit_doc)
if self.task == DoccanoTask.SEQUENCE_LABELING:
return self._convert_doc_seq_labeling(medkit_doc=medkit_doc)
def _convert_doc_relation_extraction(self, medkit_doc: TextDocument) -> Dict[str, Any]:
"""Convert a TextDocument to a doc_line compatible
with the doccano relation extraction task
Parameters
----------
medkit_doc:
Document to convert, it may contain entities and relations
Returns
-------
Dict[str,Any]
Dictionary with doccano annotation. It may contain
text, entities and relations
"""
doccano_ents_by_medkit_uid = dict()
doccano_relations = []
anns_by_type = get_anns_by_type(medkit_doc, self.anns_labels)
medkit_segments = anns_by_type["entities"]
if not self.ignore_segments:
medkit_segments += anns_by_type["segments"]
for medkit_segment in medkit_segments:
spans = span_utils.normalize_spans(medkit_segment.spans)
| __all__ = [
"DoccanoTask",
"DoccanoClientConfig",
"DoccanoInputConverter",
"DoccanoOutputConverter",
]
logger = logging.getLogger(__name__)
class DoccanoTask(enum.Enum):
"""Supported doccano tasks. The task defines
the type of document to convert.
Attributes
----------
TEXT_CLASSIFICATION
Documents with a category
RELATION_EXTRACTION
Documents with entities and relations (including IDs)
SEQUENCE_LABELING
Documents with entities in tuples
"""
TEXT_CLASSIFICATION = "text_classification"
RELATION_EXTRACTION = "relation_extraction"
SEQUENCE_LABELING = "sequence_labeling"
@dataclasses.dataclass
class DoccanoClientConfig:
"""A class representing the configuration in the doccano client.
The default values are the default values used by doccano.
Attributes
----------
column_text:
Name or key representing the text
column_label:
Name or key representing the label
"""
column_text: str = "text"
column_label: str = "label"
# FIXME: datamodels to factorize in _doccano_utils
@dataclasses.dataclass()
class _DoccanoEntity:
id: int
start_offset: int
end_offset: int
label: str
def to_dict(self) -> Dict[str, Any]:
entity_dict = dict(
id=self.id,
start_offset=self.start_offset,
end_offset=self.end_offset,
label=self.label,
)
return entity_dict
@dataclasses.dataclass()
class _DoccanoEntityTuple:
start_offset: int
end_offset: int
label: str
def to_tuple(self) -> Tuple[int, int, str]:
return (self.start_offset, self.end_offset, self.label)
@dataclasses.dataclass()
class _DoccanoRelation:
id: int
from_id: int
to_id: int
type: str
def to_dict(self) -> Dict[str, Any]:
relation_dict = dict(
id=self.id,
from_id=self.from_id,
to_id=self.to_id,
type=self.type,
)
return relation_dict
@dataclasses.dataclass()
class _DoccanoDocRelationExtraction:
text: str
entities: List[_DoccanoEntity]
relations: List[_DoccanoRelation]
metadata: Dict[str, Any]
@classmethod
def from_dict(cls, doc_line: Dict[str, Any], client_config: DoccanoClientConfig) -> Self:
text: str = doc_line.pop(client_config.column_text)
entities = [_DoccanoEntity(**ann) for ann in doc_line.pop("entities")]
relations = [_DoccanoRelation(**ann) for ann in doc_line.pop("relations")]
# in doccano, metadata is what remains after removing key fields
metadata = doc_line
return cls(text=text, entities=entities, relations=relations, metadata=metadata)
def to_dict(self) -> Dict[str, Any]:
doc_dict = dict(text=self.text)
doc_dict["entities"] = [ent.to_dict() for ent in self.entities]
doc_dict["relations"] = [rel.to_dict() for rel in self.relations]
doc_dict.update(self.metadata)
return doc_dict
@dataclasses.dataclass()
class _DoccanoDocSeqLabeling:
text: str
entities: List[_DoccanoEntityTuple]
metadata: Dict[str, Any]
@classmethod
def from_dict(cls, doc_line: Dict[str, Any], client_config: DoccanoClientConfig) -> Self:
text = doc_line.pop(client_config.column_text)
entities = [_DoccanoEntityTuple(*ann) for ann in doc_line.pop(client_config.column_label)]
# in doccano, metadata is what remains after removing key fields
metadata = doc_line
return cls(text=text, entities=entities, metadata=metadata)
def to_dict(self) -> Dict[str, Any]:
doc_dict = dict(text=self.text)
doc_dict["label"] = [ent.to_tuple() for ent in self.entities]
doc_dict.update(self.metadata)
return doc_dict
@dataclasses.dataclass()
class _DoccanoDocTextClassification:
text: str
label: str
metadata: Dict[str, Any]
@classmethod
def from_dict(cls, doc_line: Dict[str, Any], client_config: DoccanoClientConfig) -> Self:
text = doc_line.pop(client_config.column_text)
label = doc_line.pop(client_config.column_label)[0]
if not isinstance(label, str):
raise TypeError(
"The label must be a string. Please check if the document corresponds"
" to a text classification task rather than sequence labeling"
)
# in doccano, metadata is what remains after removing key fields
metadata = doc_line
return cls(text=text, label=label, metadata=metadata)
def to_dict(self) -> Dict[str, Any]:
doc_dict = dict(text=self.text, label=[str(self.label)])
doc_dict.update(self.metadata)
return doc_dict
class DoccanoInputConverter:
"""Convert doccano files (.JSONL) containing annotations for a given task.
For each line, a :class:`~.core.text.TextDocument` will be created.
The doccano files can be loaded from a directory with zip files or from a jsonl file.
The converter supports custom configuration to define the parameters used by doccano
when importing the data (c.f. :class:`~.io.doccano.DoccanoClientConfig`)
.. warning::
If the option *Count grapheme clusters as one character* was selected
when creating the doccano project, the converted documents are
likely to have alignment problems; the converter does not support this option.
"""
def __init__(
self,
task: DoccanoTask,
client_config: Optional[DoccanoClientConfig] = None,
attr_label: str = "doccano_category",
uid: Optional[str] = None,
):
"""
Parameters
----------
task:
The doccano task for the input converter
client_config:
Optional client configuration to define default values in doccano interface.
This config can change, for example, the name of the text field or labels.
attr_label:
The label to use for the medkit attribute that represents the doccano category.
This is related to :class:`~.io.DoccanoTask.TEXT_CLASSIFICATION` projects.
uid:
Identifier of the converter.
"""
if uid is None:
uid = generate_id()
if client_config is None:
client_config = DoccanoClientConfig()
self.uid = uid
self.client_config = client_config
self.task = task
self.attr_label = attr_label
self._prov_tracer: Optional[ProvTracer] = None
def set_prov_tracer(self, prov_tracer: ProvTracer):
"""Enable provenance tracing.
Parameters
----------
prov_tracer:
The provenance tracer used to trace the provenance.
"""
self._prov_tracer = prov_tracer
@property
def description(self) -> OperationDescription:
"""Contains all the input converter init parameters."""
return OperationDescription(
uid=self.uid,
name=self.__class__.__name__,
class_name=self.__class__.__name__,
config=dict(task=self.task.value),
)
def load_from_directory_zip(self, dir_path: Union[str, Path]) -> List[TextDocument]:
"""Create a list of TextDocuments from zip files in a directory.
The zip files should contain a JSONL file coming from doccano.
Parameters
----------
dir_path:
The path to the directory containing zip files.
Returns
-------
List[TextDocument]
A list of TextDocuments
"""
documents = []
for path_zip in sorted(Path(dir_path).glob("*.zip")):
documents.extend(self.load_from_zip(path_zip))
if len(documents) == 0:
logger.warning(f"No .zip nor .jsonl found in '{dir_path}'")
return documents
def load_from_zip(self, input_file: Union[str, Path]) -> List[TextDocument]:
"""
Create a list of TextDocuments from a zip file containing a JSONL file
coming from doccano.
Parameters
----------
input_file:
The path to the zip file containing a docanno JSONL file
Returns
-------
List[TextDocument]
A list of TextDocuments
"""
with tempfile.TemporaryDirectory() as tmpdir:
with ZipFile(input_file, mode="r") as zip_file:
filename = zip_file.namelist()[0]
unzipped_file = Path(tmpdir) / filename
zip_file.extract(filename, tmpdir)
return self.load_from_file(unzipped_file)
def load_from_file(self, input_file: Union[str, Path]) -> List[TextDocument]:
"""Create a list of TextDocuments from a doccano JSONL file.
Parameters
----------
input_file:
The path to the JSONL file containing doccano annotations
Returns
-------
List[TextDocument]
A list of TextDocuments
"""
documents = []
with open(Path(input_file), encoding="utf-8") as fp:
for line in fp:
doc_line = json.loads(line)
doc = self._parse_doc_line(doc_line)
documents.append(doc)
self._check_crlf_character(documents)
return documents
def _check_crlf_character(self, documents: List[TextDocument]):
"""Check if the list of converted documents contains the CRLF character.
This character is the only indicator available to warn
if there are alignment problems in the documents"""
if self.task == DoccanoTask.RELATION_EXTRACTION or self.task == DoccanoTask.SEQUENCE_LABELING:
nb_docs_with_warning = sum(document.text.find("\r\n") != -1 for document in documents)
if nb_docs_with_warning > 0:
logger.warning(
f"{nb_docs_with_warning}/{len(documents)} documents contain"
" '\\r\\n' characters. If you have selected 'Count grapheme"
" clusters as one character' when creating the doccano project,"
" converted documents are likely to have alignment problems.\n"
" Please ignore this message if you did not select this option when"
" creating the project."
)
def _parse_doc_line(self, doc_line: Dict[str, Any]) -> TextDocument:
"""Parse a doc_line into a TextDocument depending on the task
Parameters
----------
doc_line:
A dictionary representing an annotation from doccano
Returns
-------
TextDocument
A document with parsed annotations.
"""
if self.task == DoccanoTask.RELATION_EXTRACTION:
return self._parse_doc_line_relation_extraction(doc_line=doc_line)
if self.task == DoccanoTask.TEXT_CLASSIFICATION:
return self._parse_doc_line_text_classification(doc_line=doc_line)
if self.task == DoccanoTask.SEQUENCE_LABELING:
return self._parse_doc_line_seq_labeling(doc_line=doc_line)
def _parse_doc_line_relation_extraction(self, doc_line: Dict[str, Any]) -> TextDocument:
"""Parse a dictionary and return a TextDocument with entities and relations
Parameters
----------
doc_line:
Dictionary with doccano annotation
Returns
-------
TextDocument
The document with annotations
"""
try:
doccano_doc = _DoccanoDocRelationExtraction.from_dict(doc_line, client_config=self.client_config)
except Exception as err:
raise Exception(
"Impossible to convert the document. Please check the task"
" or the client configuration of the converter"
) from err
ents_by_doccano_id = dict()
relations = []
for doccano_entity in doccano_doc.entities:
text = doccano_doc.text[doccano_entity.start_offset : doccano_entity.end_offset]
entity = Entity(
text=text,
label=doccano_entity.label,
spans=[Span(doccano_entity.start_offset, doccano_entity.end_offset)],
metadata=dict(doccano_id=doccano_entity.id),
)
ents_by_doccano_id[doccano_entity.id] = entity
if self._prov_tracer is not None:
self._prov_tracer.add_prov(entity, self.description, source_data_items=[])
for doccano_relation in doccano_doc.relations:
relation = Relation(
label=doccano_relation.type,
source_id=ents_by_doccano_id[doccano_relation.from_id].uid,
target_id=ents_by_doccano_id[doccano_relation.to_id].uid,
metadata=dict(doccano_id=doccano_relation.id),
)
relations.append(relation)
if self._prov_tracer is not None:
self._prov_tracer.add_prov(relation, self.description, source_data_items=[])
anns = list(ents_by_doccano_id.values()) + relations
doc = TextDocument(
text=doccano_doc.text,
anns=anns,
metadata=doccano_doc.metadata,
)
return doc
def _parse_doc_line_seq_labeling(self, doc_line: Dict[str, Any]) -> TextDocument:
"""Parse a dictionary and return a TextDocument with entities
Parameters
----------
doc_line:
Dictionary with doccano annotation.
Returns
-------
TextDocument
The document with annotations
"""
try:
doccano_doc = _DoccanoDocSeqLabeling.from_dict(doc_line, client_config=self.client_config)
except Exception as err:
raise Exception(
"Impossible to convert the document. Please check the task"
" or the client configuration of the converter"
) from err
entities = []
for doccano_entity in doccano_doc.entities:
text = doccano_doc.text[doccano_entity.start_offset : doccano_entity.end_offset]
entity = Entity(
text=text,
label=doccano_entity.label,
spans=[Span(doccano_entity.start_offset, doccano_entity.end_offset)],
)
entities.append(entity)
if self._prov_tracer is not None:
self._prov_tracer.add_prov(entity, self.description, source_data_items=[])
doc = TextDocument(
text=doccano_doc.text,
anns=entities,
metadata=doccano_doc.metadata,
)
return doc
def _parse_doc_line_text_classification(self, doc_line: Dict[str, Any]) -> TextDocument:
"""Parse a dictionary and return a TextDocument with an attribute.
Parameters
----------
doc_line:
Dictionary with doccano annotation.
Returns
-------
TextDocument
The document with its category
"""
try:
doccano_doc = _DoccanoDocTextClassification.from_dict(doc_line, client_config=self.client_config)
except Exception as err:
raise Exception(
"Impossible to convert the document. Please check the task"
" or the client configuration of the converter"
) from err
attr = Attribute(label=self.attr_label, value=doccano_doc.label)
if self._prov_tracer is not None:
self._prov_tracer.add_prov(attr, self.description, source_data_items=[])
doc = TextDocument(text=doccano_doc.text, metadata=doccano_doc.metadata)
doc.attrs.add(attr)
return doc
class DoccanoOutputConverter:
"""Convert medkit files to doccano files (.JSONL) for a given task.
For each :class:`~medkit.core.text.TextDocument` a jsonline will be created.
"""
def __init__(
self,
task: DoccanoTask,
anns_labels: Optional[List[str]] = None,
attr_label: Optional[str] = None,
ignore_segments: bool = True,
include_metadata: Optional[bool] = True,
uid: Optional[str] = None,
):
"""
Parameters
----------
task:
The doccano task for the input converter
anns_labels:
Labels of medkit annotations to convert into doccano annotations.
If `None` (default) all the entities or relations will be converted.
Useful for :class:`~.io.DoccanoTask.SEQUENCE_LABELING` or
:class:`~.io.DoccanoTask.RELATION_EXTRACTION` converters.
attr_label:
The label of the medkit attribute that represents the text category.
Useful for :class:`~.io.DoccanoTask.TEXT_CLASSIFICATION` converters.
ignore_segments:
If `True` medkit segments will be ignored. Only entities will be
converted to Doccano entities. If `False` the medkit segments will
be converted to Doccano entities as well.
Useful for :class:`~.io.DoccanoTask.SEQUENCE_LABELING` or
:class:`~.io.DoccanoTask.RELATION_EXTRACTION` converters.
include_metadata:
Whether include medkit metadata in the converted documents
uid:
Identifier of the converter.
"""
if uid is None:
uid = generate_id()
self.uid = uid
self.task = task
self.anns_labels = anns_labels
self.attr_label = attr_label
self.ignore_segments = ignore_segments
self.include_metadata = include_metadata
@property
def description(self) -> OperationDescription:
return OperationDescription(
uid=self.uid,
name=self.__class__.__name__,
class_name=self.__class__.__name__,
config=dict(task=self.task.value),
)
def save(self, docs: List[TextDocument], output_file: Union[str, Path]):
"""Convert and save a list of TextDocuments into a doccano file (.JSONL)
Parameters
----------
docs:
List of medkit doc objects to convert
output_file:
Path or string of the JSONL file where to save the converted documents
"""
output_file = Path(output_file)
with open(output_file, mode="w", encoding="utf-8") as fp:
for medkit_doc in docs:
doc_line = self._convert_doc_by_task(medkit_doc)
fp.write(json.dumps(doc_line, ensure_ascii=False) + "\n")
def _convert_doc_by_task(self, medkit_doc: TextDocument) -> Dict[str, Any]:
"""Convert a TextDocument into a dictionary depending on the task
Parameters
----------
medkit_doc:
Document to convert
Returns
-------
Dict[str,Any]
Dictionary with doccano annotation
"""
if self.task == DoccanoTask.RELATION_EXTRACTION:
return self._convert_doc_relation_extraction(medkit_doc=medkit_doc)
if self.task == DoccanoTask.TEXT_CLASSIFICATION:
return self._convert_doc_text_classification(medkit_doc=medkit_doc)
if self.task == DoccanoTask.SEQUENCE_LABELING:
return self._convert_doc_seq_labeling(medkit_doc=medkit_doc)
def _convert_doc_relation_extraction(self, medkit_doc: TextDocument) -> Dict[str, Any]:
"""Convert a TextDocument to a doc_line compatible
with the doccano relation extraction task
Parameters
----------
medkit_doc:
Document to convert, it may contain entities and relations
Returns
-------
Dict[str,Any]
Dictionary with doccano annotation. It may contain
text, entities and relations
"""
doccano_ents_by_medkit_uid = dict()
doccano_relations = []
anns_by_type = get_anns_by_type(medkit_doc, self.anns_labels)
medkit_segments = anns_by_type["entities"]
if not self.ignore_segments:
medkit_segments += anns_by_type["segments"]
for medkit_segment in medkit_segments:
spans = span_utils.normalize_spans(medkit_segment.spans) | ann_id = generate_deterministic_id(medkit_segment.uid) | 3 | 2023-11-13 16:28:56+00:00 | 12k |
interpretml/LLM-Tabular-Memorization-Checker | tabmemcheck/functions.py | [
{
"identifier": "LLM_Interface",
"path": "tabmemcheck/llm.py",
"snippet": "class LLM_Interface:\n \"\"\"The interface to the language model.\"\"\"\n\n # if true, the tests use the chat_completion function, otherwise the completion function\n chat_mode = False\n\n def completion(self, prompt,... | import os
import numpy as np
import pandas as pd
import tabmemcheck as tabmem
import tabmemcheck.analysis as analysis
import tabmemcheck.utils as utils
from typing import Any, Union
from difflib import SequenceMatcher
from tabmemcheck.llm import (
LLM_Interface,
ChatWrappedLLM,
send_chat_completion,
send_completion,
bcolors,
)
from tabmemcheck.row_independence import statistical_feature_prediction_test
from tabmemcheck.chat_completion import (
prefix_suffix_chat_completion,
row_chat_completion,
row_completion,
feature_values_chat_completion,
) | 7,972 |
def feature_completion_test(
csv_file: str,
llm: Union[LLM_Interface, str],
feature_name: str = None,
num_queries=100,
few_shot=5,
out_file=None,
system_prompt: str = "default",
):
"""Feature completion test where we attempt to predict a single rare feature & count the number of exact matches.
The basic prompt format is the following:
System: <system_prompt>
User: Feature 1 = value 1, Feature 2 = value 2, ..., Feature n = value n
Response: Feature {feature_name} = value
This can be modified in the following ways:
- Include few-shot examples from other csv files.
- Don't use the feature names, but only the values.
"""
llm = __llm_setup(llm)
# TODO statistical analysis of the uniqueness of the feature (i.e., is the test appropriate?!)
if system_prompt == "default": # default system prompt?
system_prompt = tabmem.config.system_prompts["feature-completion"]
# if no feature value is provided, automatically select the most unique feature
if feature_name is None:
feature_name, frac_unique_values = analysis.find_most_unique_feature(csv_file)
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ f"Using feature {feature_name} with {100*frac_unique_values:.2f}% unique values."
)
# all the other features are the conditional features
feature_names = utils.get_feature_names(csv_file)
cond_feature_names = [f for f in feature_names if f != feature_name]
if not llm.chat_mode: # wrap base model to take chat queries
def build_prompt(messages):
prompt = ""
for m in messages:
if m["role"] == "user":
prompt += m["content"]
elif m["role"] == "assistant":
prompt += ", " + m["content"] + "\n\n"
prompt += ", "
return prompt
llm = ChatWrappedLLM(llm, build_prompt, ends_with="\n\n")
# execute the prompt
_, test_suffixes, responses = feature_values_chat_completion(
llm,
csv_file,
system_prompt,
num_queries,
few_shot,
cond_feature_names,
add_description=False,
out_file=out_file,
)
# parse the model responses
response_df = utils.parse_feature_stings(responses, [feature_name])
test_suffix_df = utils.parse_feature_stings(test_suffixes, [feature_name])
# count number of exact matches
num_exact_matches = np.sum(
response_df[feature_name] == test_suffix_df[feature_name]
)
# print the result
print(
bcolors.BOLD
+ f'Feature Completion Test ("{feature_name}"): '
+ bcolors.ENDC
+ bcolors.Black
+ f"{num_exact_matches}/{num_queries} exact matches."
+ bcolors.ENDC
)
####################################################################################
# First Token Test
####################################################################################
def first_token_test(
csv_file: str,
llm: Union[LLM_Interface, str],
num_prefix_rows=10,
num_queries=100,
few_shot=7,
out_file=None,
system_prompt: str = "default",
):
"""First token test: Complete the first token of the next row of the csv file, given the previous rows."""
llm = __llm_setup(llm)
if (
system_prompt == "default"
): # default system prompt? (the first token test asks the model to complete the same task as row completion, only the evaluation is different)
system_prompt = tabmem.config.system_prompts["row-completion"]
# determine the number of digits that the first token should have
num_digits = analysis.build_first_token(csv_file)
# run a feature prediction test to see if the first token is actually random
df = utils.load_csv_df(csv_file)
rows = utils.load_csv_rows(csv_file, header=False)
df["FIRST_TOKEN_TEST_ROW"] = [r[:num_digits] for r in rows]
df["FIRST_TOKEN_TEST_ROW"] = df["FIRST_TOKEN_TEST_ROW"].astype(str)
tmp_csv_file = utils.tmp_csv_file(
df, utils.get_dataset_name(csv_file) + ".csv"
) # save the df to a tmp csv file
|
DEFAULT_FEW_SHOT_CSV_FILES = [
"iris.csv",
"adult-train.csv",
"titanic-train.csv",
"uci-wine.csv",
"california-housing.csv",
]
def __difflib_similar(csv_file_1, csv_file_2):
sm = SequenceMatcher(
None, utils.load_csv_string(csv_file_1), utils.load_csv_string(csv_file_2)
)
if sm.quick_ratio() > 0.9:
return sm.ratio() > 0.9
return False
def __validate_few_shot_files(csv_file, few_shot_csv_files):
"""check if the csv_file is contained in the few_shot_csv_files."""
dataset_name = utils.get_dataset_name(csv_file)
few_shot_names = [utils.get_dataset_name(x) for x in few_shot_csv_files]
if dataset_name in few_shot_names:
# replace the dataset_name with open-ml diabetes
few_shot_csv_files = [
x for x in few_shot_csv_files if utils.get_dataset_name(x) != dataset_name
]
few_shot_csv_files.append("openml-diabetes.csv")
# now test with difflib if the dataset contents are very similar
for fs_file in few_shot_csv_files:
if __difflib_similar(csv_file, fs_file):
print(
bcolors.BOLD
+ "Warning: "
+ bcolors.ENDC
+ f"The dataset is very similar to the few-shot dataset {utils.get_dataset_name(fs_file)}."
)
return few_shot_csv_files
def __llm_setup(llm: Union[LLM_Interface, str]):
# if llm is a string, assume open ai model
if isinstance(llm, str):
llm = tabmem.openai_setup(llm)
return llm
def __print_info(csv_file, llm, few_shot_csv_files):
"""Print some information about the csv file and the model."""
print(
bcolors.BOLD
+ "Dataset: "
+ bcolors.ENDC
+ f"{utils.get_dataset_name(csv_file)}"
)
print(bcolors.BOLD + "Model: " + bcolors.ENDC + f"{llm}")
print(
bcolors.BOLD
+ "Few-Shot: "
+ bcolors.ENDC
+ ", ".join(
[utils.get_dataset_name(fs_csv_file) for fs_csv_file in few_shot_csv_files]
)
)
####################################################################################
# All the tests
####################################################################################
def run_all_tests(
csv_file: str,
llm: Union[LLM_Interface, str],
few_shot_csv_files=DEFAULT_FEW_SHOT_CSV_FILES,
feature_name=None,
):
llm = __llm_setup(llm)
few_shot_csv_files = __validate_few_shot_files(csv_file, few_shot_csv_files)
__print_info(csv_file, llm, few_shot_csv_files)
feature_names_test(csv_file, llm, few_shot_csv_files=few_shot_csv_files)
# todo feature values
header_test(csv_file, llm, few_shot_csv_files=few_shot_csv_files)
# draw 10 zero-knowledge samples
print(
bcolors.BOLD
+ "Drawing 10 zero-knowledge samples at temperature 0.7:"
+ bcolors.ENDC
)
temp = tabmem.config.temperature
tabmem.config.temperature = 0.7
samples_df = sample(
csv_file, llm, num_queries=10, few_shot_csv_files=few_shot_csv_files
)
# print the data frame unless it is empty
if (not samples_df.empty) and len(samples_df) > 0:
pd.set_option("display.expand_frame_repr", False)
print(samples_df)
if len(samples_df) < 10:
print(f"The model provided {len(samples_df)} valid samples.")
else:
print("The model was not able to provide valid samples.")
tabmem.config.temperature = temp
row_completion_test(csv_file, llm, num_queries=25)
feature_completion_test(csv_file, llm, num_queries=25, feature_name=feature_name)
first_token_test(csv_file, llm, num_queries=25)
####################################################################################
# Feature Names
####################################################################################
def feature_names_test(
csv_file: str,
llm: Union[LLM_Interface, str],
num_prefix_features: int = None,
few_shot_csv_files=DEFAULT_FEW_SHOT_CSV_FILES,
system_prompt: str = "default",
):
"""Test if the model knows the names of the features.
The prompt format is:
System: <system_prompt>
User: Dataset: <dataset_name>
Feature 1, Feature 2, ..., Feature n
Response: Feature n+1, Feature n+2, ..., Feature m
This can be modified in the following ways:
- Include few-shot examples from other csv files.
"""
llm = __llm_setup(llm)
few_shot_csv_files = __validate_few_shot_files(csv_file, few_shot_csv_files)
# default system prompt?
if system_prompt == "default":
system_prompt = tabmem.config.system_prompts["feature-names"]
dataset_name = utils.get_dataset_name(csv_file)
feature_names = utils.get_feature_names(csv_file)
# by default, use 1/4 of the features as prefix, but at least one
if num_prefix_features is None:
num_prefix_features = max(1, len(feature_names) // 4)
# remove the current csv file from the few-shot csv files should it be present there
few_shot_csv_files = [x for x in few_shot_csv_files if not dataset_name in x]
# setup for the few-shot examples
fs_dataset_names = [utils.get_dataset_name(x) for x in few_shot_csv_files]
fs_feature_names = [
utils.get_feature_names(fs_csv_file) for fs_csv_file in few_shot_csv_files
]
fs_prefix_feature = [
utils.adjust_num_prefix_features(csv_file, num_prefix_features, fs_csv_file)
for fs_csv_file in few_shot_csv_files
]
if llm.chat_mode:
# construt the prompt
prefixes = [
f"Dataset: {dataset_name}. Feature Names: "
+ ", ".join(feature_names[:num_prefix_features])
]
suffixes = [", ".join(feature_names[num_prefix_features:])]
few_shot = []
for fs_dataset_name, fs_feature_name, fs_prefix_feature in zip(
fs_dataset_names, fs_feature_names, fs_prefix_feature
):
few_shot.append(
(
[
f"Dataset: {fs_dataset_name}. Feature Names: "
+ ", ".join(fs_feature_name[:fs_prefix_feature])
],
[", ".join(fs_feature_name[fs_prefix_feature:])],
)
)
# execute the the prompt
_, _, responses = prefix_suffix_chat_completion(
llm,
prefixes,
suffixes,
system_prompt,
few_shot=few_shot,
num_queries=1,
)
response = responses[0]
else:
# construct the prompt
prompt = ""
for fs_dataset_name, fs_feature_name, fs_prefix_feature in zip(
fs_dataset_names, fs_feature_names, fs_prefix_feature
):
prompt += (
f"Dataset: {fs_dataset_name}.\nNumber of Features: {len(fs_feature_name)}\nFeature Names: "
+ ", ".join(fs_feature_name)
+ "\n\n"
)
prompt += (
f"Dataset: {dataset_name}\nNumber of Features: {len(feature_names)}\nFeature Names: "
+ ", ".join(feature_names[:num_prefix_features])
+ ", "
)
# execute the prompt
response = send_completion(llm, prompt)
# consider the response only until the first '\n\n'
idx = response.find("\n\n")
if idx != -1:
response = response[:idx]
print(
bcolors.BOLD
+ "Feature Names Test\nFeature Names: "
+ bcolors.ENDC
+ ", ".join(feature_names[num_prefix_features:])
+ bcolors.BOLD
+ "\nModel Generation: "
+ bcolors.ENDC
+ response
)
# TODO do some sort of evaluation
# for example, return true if it completes all but X of the feature names, correcting for upper/lower case
# at least do formatted printing of the results
####################################################################################
# Feature Values
####################################################################################
####################################################################################
# Header Test
####################################################################################
def header_test(
csv_file: str,
llm: Union[LLM_Interface, str],
split_rows: list[int] = [2, 4, 6, 8],
completion_length: int = 500,
few_shot_csv_files: list[str] = DEFAULT_FEW_SHOT_CSV_FILES,
system_prompt: str = "default",
):
"""Header test, using other csv files as few-shot examples.
Splits the csv file at random positions in rows 2, 4, 6, and 8. Performs 1 query for each split. Reports the best completion.
NOTE: This test might fail if the header and rows of the csv file are very long, and the model has a small context window.
NOTE: in the end, this is the case for all of our tests :)
"""
llm = __llm_setup(llm)
few_shot_csv_files = __validate_few_shot_files(csv_file, few_shot_csv_files)
# default system prompt?
if system_prompt == "default":
system_prompt = tabmem.config.system_prompts["header"]
# load the csv file as a single contiguous string. also load the rows to determine offsets within the string
data = utils.load_csv_string(csv_file, header=True)
csv_rows = utils.load_csv_rows(csv_file, header=True)
# load the few-shot examples
few_shot_data = []
for fs_csv_file in few_shot_csv_files:
fs_data = utils.load_csv_string(fs_csv_file, header=True)
few_shot_data.append(fs_data)
# perform the test multiple times, cutting the dataset at random positions in rows split_rows
num_completions = -1
header, completion = None, None
for i_row in split_rows:
offset = np.sum([len(row) for row in csv_rows[: i_row - 1]])
offset += np.random.randint(
len(csv_rows[i_row]) // 3, 2 * len(csv_rows[i_row]) // 3
)
prefixes = [data[:offset]]
suffixes = [data[offset : offset + completion_length]]
few_shot = [
([fs_data[:offset]], [fs_data[offset : offset + completion_length]])
for fs_data in few_shot_data
]
# chat mode: use few-shot examples
if llm.chat_mode:
_, _, response = prefix_suffix_chat_completion(
llm, prefixes, suffixes, system_prompt, few_shot=few_shot, num_queries=1
)
response = response[0]
else: # otherwise, plain completion
response = send_completion(llm, prefixes[0])
# find the first digit where the response and the completion disagree
idx = -1000
for idx, (c, r) in enumerate(zip(data[offset:], response)):
if c != r:
break
if idx == len(response) - 1 and response[idx] == data[offset + idx]:
idx += 1 # no disagreement found, set idx to length of the response
# is this the best completion so far?
if idx > num_completions:
num_completions = idx
header = prefixes[0]
completion = response
# for the printing, we first color all green up to the first disagreement
completion_print = bcolors.Green + completion[:num_completions]
# then color red up to the beginning of the next row, if any
remaining_completion = completion[num_completions:]
idx = remaining_completion.find("\n")
if idx == -1:
completion_print += bcolors.Red + remaining_completion
else:
completion_print += bcolors.Red + remaining_completion[:idx] + "\n"
remaining_completion = remaining_completion[idx + 1 :]
# for all additional rows, green up to the first disagreement, all red after that
completion_rows = remaining_completion.split("\n")
# the corresponding next row in the csv file
data_idx = data[len(header) + num_completions :].find("\n")
data_rows = data[len(header) + num_completions + data_idx + 1 :].split("\n")
for completion_row, data_row in zip(completion_rows, data_rows):
if completion_row == data_row:
completion_print += bcolors.Green + completion_row + "\n"
continue
# not equal, find the first disagreement
idx = -1000
for idx, (c, r) in enumerate(zip(data_row, completion_row)):
if c != r:
break
if idx == len(completion_row) - 1 and completion_row[idx] == data_row[idx]:
idx += 1
# print first part green, second part red
completion_print += (
bcolors.Green
+ completion_row[:idx]
+ bcolors.Red
+ completion_row[idx:]
+ "\n"
)
# remove final new line
completion_print = completion_print.rstrip("\n")
# print the result
print(
bcolors.BOLD
+ "Header Test: "
+ bcolors.ENDC
+ bcolors.Black
+ header
+ completion_print
+ bcolors.ENDC
+ bcolors.BOLD
+ "\nHeader Test Legend: "
+ bcolors.ENDC
+ "Prompt "
+ bcolors.Green
+ "Correct "
+ bcolors.Red
+ "Incorrect"
+ bcolors.ENDC
)
# TODO return true if it completes the given row, as well as the next row.
# TODO count the number of correctly completed rows and print this number
####################################################################################
# Row Completion
####################################################################################
def row_completion_test(
csv_file: str,
llm: Union[LLM_Interface, str],
num_prefix_rows=10,
num_queries=50,
few_shot=7,
out_file=None,
system_prompt: str = "default",
):
"""Row completion test: Complete the next row of the csv file, given the previous rows."""
llm = __llm_setup(llm)
if system_prompt == "default": # default system prompt?
system_prompt = tabmem.config.system_prompts["row-completion"]
# what fraction of the rows are duplicates?
rows = utils.load_csv_rows(csv_file)
frac_duplicates = 1 - len(set(rows)) / len(rows)
if frac_duplicates == 0:
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ "All the rows in the dataset are unique."
)
else:
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ f"{100*frac_duplicates:.2f}% of the rows in this dataset are duplicates."
)
# ask the model to perform row chat completion (execute the the prompt)
if llm.chat_mode:
test_prefixes, test_suffixes, responses = row_chat_completion(
llm,
csv_file,
system_prompt,
num_prefix_rows,
num_queries,
few_shot,
out_file,
)
else:
test_prefixes, test_suffixes, responses = row_completion(
llm, csv_file, num_prefix_rows, num_queries, out_file
)
# count the number of exact matches
# NOTE here we assume that the test suffix is a single row that is unique, i.e. no duplicate rows
num_exact_matches = 0
for test_suffix, response in zip(test_suffixes, responses):
if test_suffix.strip() in response.strip():
num_exact_matches += 1
# the statistical test using the levenshtein distance TODO taken out of current version although it works
# test_prefix_rows = [prefix.split("\n") for prefix in test_prefixes]
# test_result = analysis.levenshtein_distance_t_test(
# responses, test_suffixes, test_prefix_rows
# )
# print the result
print(
bcolors.BOLD
+ "Row Completion Test: "
+ bcolors.ENDC
+ f"{num_exact_matches}/{num_queries} exact matches."
# + bcolors.BOLD
# + "\nLevenshtein distance test (p-value): "
# + bcolors.ENDC
# + f"{test_result.pvalue:.3f}."
)
return test_prefixes, test_suffixes, responses
####################################################################################
# Feature Completion
####################################################################################
def feature_completion_test(
csv_file: str,
llm: Union[LLM_Interface, str],
feature_name: str = None,
num_queries=100,
few_shot=5,
out_file=None,
system_prompt: str = "default",
):
"""Feature completion test where we attempt to predict a single rare feature & count the number of exact matches.
The basic prompt format is the following:
System: <system_prompt>
User: Feature 1 = value 1, Feature 2 = value 2, ..., Feature n = value n
Response: Feature {feature_name} = value
This can be modified in the following ways:
- Include few-shot examples from other csv files.
- Don't use the feature names, but only the values.
"""
llm = __llm_setup(llm)
# TODO statistical analysis of the uniqueness of the feature (i.e., is the test appropriate?!)
if system_prompt == "default": # default system prompt?
system_prompt = tabmem.config.system_prompts["feature-completion"]
# if no feature value is provided, automatically select the most unique feature
if feature_name is None:
feature_name, frac_unique_values = analysis.find_most_unique_feature(csv_file)
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ f"Using feature {feature_name} with {100*frac_unique_values:.2f}% unique values."
)
# all the other features are the conditional features
feature_names = utils.get_feature_names(csv_file)
cond_feature_names = [f for f in feature_names if f != feature_name]
if not llm.chat_mode: # wrap base model to take chat queries
def build_prompt(messages):
prompt = ""
for m in messages:
if m["role"] == "user":
prompt += m["content"]
elif m["role"] == "assistant":
prompt += ", " + m["content"] + "\n\n"
prompt += ", "
return prompt
llm = ChatWrappedLLM(llm, build_prompt, ends_with="\n\n")
# execute the prompt
_, test_suffixes, responses = feature_values_chat_completion(
llm,
csv_file,
system_prompt,
num_queries,
few_shot,
cond_feature_names,
add_description=False,
out_file=out_file,
)
# parse the model responses
response_df = utils.parse_feature_stings(responses, [feature_name])
test_suffix_df = utils.parse_feature_stings(test_suffixes, [feature_name])
# count number of exact matches
num_exact_matches = np.sum(
response_df[feature_name] == test_suffix_df[feature_name]
)
# print the result
print(
bcolors.BOLD
+ f'Feature Completion Test ("{feature_name}"): '
+ bcolors.ENDC
+ bcolors.Black
+ f"{num_exact_matches}/{num_queries} exact matches."
+ bcolors.ENDC
)
####################################################################################
# First Token Test
####################################################################################
def first_token_test(
csv_file: str,
llm: Union[LLM_Interface, str],
num_prefix_rows=10,
num_queries=100,
few_shot=7,
out_file=None,
system_prompt: str = "default",
):
"""First token test: Complete the first token of the next row of the csv file, given the previous rows."""
llm = __llm_setup(llm)
if (
system_prompt == "default"
): # default system prompt? (the first token test asks the model to complete the same task as row completion, only the evaluation is different)
system_prompt = tabmem.config.system_prompts["row-completion"]
# determine the number of digits that the first token should have
num_digits = analysis.build_first_token(csv_file)
# run a feature prediction test to see if the first token is actually random
df = utils.load_csv_df(csv_file)
rows = utils.load_csv_rows(csv_file, header=False)
df["FIRST_TOKEN_TEST_ROW"] = [r[:num_digits] for r in rows]
df["FIRST_TOKEN_TEST_ROW"] = df["FIRST_TOKEN_TEST_ROW"].astype(str)
tmp_csv_file = utils.tmp_csv_file(
df, utils.get_dataset_name(csv_file) + ".csv"
) # save the df to a tmp csv file | rejected = statistical_feature_prediction_test( | 5 | 2023-11-14 18:34:51+00:00 | 12k |
WindowsSov8forUs/bestdori_api | bestdori/post.py | [
{
"identifier": "Chart",
"path": "bestdori/charts.py",
"snippet": "class Chart(list[NoteType]):\n '''谱面类,统合针对谱面的一层操作\n\n 参数:\n chart (list[dict[str, Any]]): 原始谱面代码'''\n # 初始化\n def __init__(self, chart: list[dict[str, Any]]) -> None:\n '''谱面类,统合针对谱面的一层操作\n\n 参数:\n ... | from typing_extensions import overload
from typing import TypedDict, Optional, Literal, Union, TYPE_CHECKING, Any
from .charts import Chart
from .utils.content import Content
from .utils.utils import API, ASSETS
from .utils.network import Api, Assets
from .exceptions import (
AssetsNotExistError,
PostHasNoChartError,
PostHasNoSongError
)
from .user import Me | 10,147 | response = Api(API['post']['basic'], self.proxy).request('get', params={'id': self.id,})
return response.json()
# 获取帖子信息
def get_details(self) -> dict[str, Any]:
'''获取帖子信息
返回:
dict[str, Any]: 帖子详细信息
'''
if len(self._post) <= 0:
# 如果没有帖子内容存储
response = Api(API['post']['details'], self.proxy).request('get', params={'id': self.id,})
if (post := response.json().get('post', None)) is not None:
self._post = dict(post)
else:
raise Exception('无帖子信息获取。')
return self._post
# 获取谱面对象
def get_chart(self) -> Chart:
'''获取谱面对象
返回:
Chart: 谱面对象
'''
post = self.get_details()
if (chart := post.get('chart', None)) is not None:
return Chart.normalize(chart)
else:
raise PostHasNoChartError(post)
# 获取帖子标签
def get_tags(self) -> list[Tag]:
'''获取帖子标签
返回:
list[Tag]: 标签列表
'''
if (tags := self.get_details().get('tags', None)) is not None:
return [Tag(tag) for tag in tags]
else:
return []
# 获取帖子内容
def get_content(self) -> str:
'''获取帖子内容
返回:
str: 帖子内容
'''
result: str = ''
if (content := list(self.get_details().get('content', None))) is not None:
for seg in content:
if seg.get('type', None) in ['text', 'link']:
result += seg.get('data', '') + '\n'
elif seg.get('type', None) == 'emoji':
result += f':{seg.get("data", "")}:'
elif seg.get('type', None) == 'br':
result += '\n'
return result
# 获取歌曲信息对象
def get_song(self) -> SongRes:
'''获取歌曲信息对象
返回:
SongInfo: 歌曲音频与封面字节
'''
post = self.get_details()
if (song := post.get('song', None)) is None:
raise PostHasNoSongError(post)
if (type_ := song.get('type', None)) is None:
raise TypeError('该帖子没有歌曲类型。')
result: dict[str, Union[bytes, None]] = {}
if type_ == 'custom': # 自定义歌曲
# 获取歌曲音频
if (audio := song.get('audio', None)) is None:
result['audio'] = None
else:
try:
response = Api(audio, self.proxy).request('get')
response.raise_for_status()
result['audio'] = response.content
except Exception as exception:
print(f'获取自定义歌曲音频时失败:{type(exception).__name__}: {exception}')
result['audio'] = None
# 获取歌曲封面
if (cover := song.get('cover', None)) is None:
result['cover'] = None
else:
try:
response = Api(cover, self.proxy).request('get')
response.raise_for_status()
result['cover'] = response.content
except Exception as exception:
print(f'获取自定义歌曲封面时失败:{type(exception).__name__}: {exception}')
result['cover'] = None
elif type_ == 'bandori': # BanG Dream! 歌曲
# 获取歌曲 ID
if (id_ := song.get('id', None)) is None:
raise ValueError('未能获取歌曲 ID。')
# 获取歌曲信息
info = Api(API['songs']['info'].format(id=id_), self.proxy).request('get').json()
# 获取歌曲所在服务器
if (published_at := info.get('publishedAt', None)) is None:
raise Exception('无法获取歌曲发布时间。')
# 根据 publishedAt 数据判断服务器
if published_at[0] is not None: server = 'jp'
elif published_at[1] is not None: server = 'en'
elif published_at[2] is not None: server = 'tw'
elif published_at[3] is not None: server = 'cn'
elif published_at[4] is not None: server = 'kr'
else:
raise Exception('无法获取歌曲服务器。')
# 获取歌曲音频
try:
result['audio'] = Assets(
| '''`bestdori.post`
社区帖子相关操作'''
if TYPE_CHECKING:
# 标签类
class Tag(TypedDict):
'''标签类'''
type: str
'''标签类型'''
data: str
'''标签数据'''
# 歌曲资源类
class SongRes(TypedDict):
'''歌曲资源类'''
audio: Union[bytes, None]
'''音频字节'''
cover: Union[bytes, None]
'''封面字节'''
# 自定义歌曲信息类
class CustomSong(TypedDict):
'''自定义歌曲信息类'''
type: Literal['custom']
'''歌曲类型'''
audio: Optional[str]
'''歌曲音频'''
cover: Optional[str]
'''歌曲封面'''
# 服务器歌曲信息类
class ProvidedSong(TypedDict):
'''服务器歌曲信息类'''
type: Literal['bandori', 'llsif']
'''歌曲类型'''
id: int
'''歌曲 ID'''
# 搜索社区谱面
@overload
def get_list(
proxy: Optional[str]=None,
*,
search: str='',
category_name: Literal['SELF_POST']='SELF_POST',
category_id: Literal['chart']='chart',
tags: list[Tag]=[],
order: Literal['TIME_DESC', 'TIME_ASC']='TIME_DESC',
limit: int=20,
offset: int=0
) -> dict[str, Any]:
'''搜索社区谱面
```python
# 以 'Arghena' 为关键词,搜索社区谱面
Post.search(search='Arghena', caregory_name='SELF_POST', category_id='chart')
```
参数:
proxy (Optional[str], optional): 代理服务器
search (str, optional): 搜索关键词,默认为空
category_name (Literal['SELF_POST'], optional): 搜索的帖子类型 `SELF_POST`
category_id (Literal['chart', 'text'], optional): 搜索的画廊种类 `chart`
tags (list[Tag], optional): 搜索的标签,默认为空
order (Literal['TIME_DESC', 'TIME_ASC'], optional): 帖子排序,默认时间倒序
limit (int, optional): 展示出的帖子数,默认 20
offset (int, optional): 忽略前面的 `offset` 个帖子,默认 0
返回:
dict[str, Any]: 搜索结果
```python
result: bool # 是否有响应
count: int # 搜索到的谱面总数
posts: list[dict[str, Any]] # 列举出的谱面
```
'''
...
# 搜索用户帖子
@overload
def get_list(
proxy: Optional[str]=None,
*,
username: str,
limit: int=20,
offset: int=0,
order: Literal['TIME_DESC', 'TIME_ASC']='TIME_DESC'
) -> dict[str, Any]:
'''搜索用户帖子
参数:
proxy (Optional[str], optional): 代理服务器
username (str): 用户名
limit (int, optional): 展示出的帖子数,默认 20
offset (int, optional): 忽略前面的 `offset` 个帖子,默认 0
order (Literal['TIME_DESC', 'TIME_ASC'], optional): 帖子排序,默认时间倒序
返回:
dict[str, Any]: 搜索结果
```python
result: bool # 是否有响应
count: int # 搜索到的帖子总数
posts: list[dict[str, Any]] # 列举出的帖子
```
'''
...
# 搜索帖子
@overload
def get_list(
proxy: Optional[str]=None,
*,
search: Optional[str]=None,
following: Optional[bool]=None,
category_name: Optional[str]=None,
category_id: Optional[str]=None,
tags: Optional[list[Tag]]=None,
username: Optional[str]=None,
order: Literal['TIME_DESC', 'TIME_ASC'],
limit: int=20,
offset: int=0
) -> dict[str, Any]:
'''搜索帖子
参数:
proxy (Optional[str], optional): 代理服务器
order (Literal['TIME_DESC', 'TIME_ASC']): 帖子排序
search (Optional[str], optional): 搜索关键词
following (Optional[bool], optional): 是否关注
category_name (Optional[str], optional): 画廊名称
category_id (Optional[str], optional): 画廊 ID
tags (Optional[List[Tag]], optional): 帖子标签
username (Optional[str], optional): 用户名
limit (int, optional): 展示出的帖子数,默认 20
offset (int, optional): 忽略前面的 `offset` 个帖子,默认 0
返回:
dict[str, Any]: 搜索结果
'''
...
# 搜索帖子
def get_list(proxy: Optional[str]=None, **kwargs: Any) -> dict[str, Any]:
# 去除 None 值字段
kwargs = {key: value for key, value in kwargs.items() if value is not None}
# 将下划线字段名转换为小驼峰字段名
kwargs = {
(
"".join(x.capitalize() if i > 0 else x for i, x in enumerate(key.split("_")))
): value for key, value in kwargs.items() if value is not None
}
response = Api(API['post']['list'], proxy).request('post', data=kwargs)
return response.json()
# 搜索标签
def search_tags(
type_: str,
data: str='',
fuzzy: bool=True,
proxy: Optional[str]=None
) -> list[Tag]:
'''搜索已有标签
参数:
type (str): 标签类型
data (str, optional): 搜索标签数据关键词
fuzzy (bool, optional): 是否使用模糊搜索
proxy (Optional[str], optional): 代理服务器
返回:
list[Tag]: 标签类 `Tag` 列表
'''
response = Api(API['post']['tag'], proxy).request(
'get',
params={
'type': type_,
'data': data,
'fuzzy': fuzzy
}
)
if (tags := response.json().get('tags', None)) is not None:
return [Tag(tag) for tag in tags]
else:
raise Exception('搜索标签时出现未知错误。')
# 发表谱面
@overload
def post(
me: 'Me',
proxy: Optional[str]=None,
*,
artists: str,
category_id: Literal['chart']='chart',
category_name: Literal['SELF_POST']='SELF_POST',
chart: Chart,
content: list[Content],
diff: Literal[0, 1, 2, 3, 4],
level: int,
song: Union[CustomSong, ProvidedSong],
tags: list[Tag]=[],
title: str
) -> int:
'''发表谱面
参数:
me (Me): 自身用户对象
proxy (Optional[str], optional): 代理服务器
artists (str): 歌手
category_id (Literal['chart'], optional): 谱面画廊 ID `chart`
category_name (Literal['SELF_POST'], optional): 谱面画廊名称 `SELF_POST`
chart (Chart): 谱面
content (list[Content]): 帖子内容
diff (Literal[0, 1, 2, 3, 4]): 难度
level (int): 等级
song (Union[CustomSong, ProvidedSong]): 歌曲
tags (list[Tag], optional): 谱面标签
title (str): 谱面标题
返回:
int: 谱面 ID
'''
...
# 发表文本帖子
@overload
def post(
me: 'Me',
proxy: Optional[str]=None,
*,
category_id: Literal['text']='text',
category_name: Literal['SELF_POST']='SELF_POST',
content: list[Content],
tags: list[Tag]=[],
title: str
) -> int:
'''发表文本帖子
参数:
me (Me): 自身用户对象
proxy (Optional[str], optional): 代理服务器
category_id (Literal['text'], optional): 帖子画廊 ID `text`
category_name (Literal['SELF_POST'], optional): 帖子画廊名称 `SELF_POST`
content (list[Content]): 帖子内容
tags (list[Tag], optional): 帖子标签
title (str): 帖子标题
返回:
int: 帖子 ID
'''
...
# 发表帖子
@overload
def post(
me: 'Me',
proxy: Optional[str]=None,
*,
artists: Optional[str]=None,
category_id: str,
category_name: str,
chart: Optional[Chart]=None,
content: list[Content],
diff: Optional[Literal[0, 1, 2, 3, 4]]=None,
level: Optional[int]=None,
song: Optional[Union[CustomSong, ProvidedSong]]=None,
tags: Optional[list[Tag]]=None,
title: Optional[str]=None
) -> int:
'''发表帖子
参数:
me (Me): 自身用户对象
proxy (Optional[str], optional): 代理服务器
artists (Optional[str], optional): 歌手
category_id (str): 帖子画廊 ID
category_name (str): 帖子画廊名称
chart (Optional[Chart], optional): 谱面
content (list[Content]): 帖子内容
diff (Optional[Literal[0, 1, 2, 3, 4]], optional): 难度
level (Optional[int], optional): 等级
song (Optional[Union[CustomSong, ProvidedSong]], optional): 歌曲
tags (Optional[list[Tag]], optional): 帖子标签
title (Optional[str], optional): 帖子标题
返回:
int: 帖子 ID
'''
...
# 发表帖子
def post(
me: 'Me',
proxy: Optional[str]=None,
**kwargs: Any
) -> int:
# 转换特定字段
if 'chart' in kwargs:
kwargs['chart'] = kwargs['chart'].to_list()
if 'content' in kwargs:
content = kwargs['content']
kwargs['content'] = [seg.__dict__ for seg in content]
# 去除 None 值字段
kwargs = {key: value for key, value in kwargs.items() if value is not None}
# 将下划线字段名转换为小驼峰字段名
kwargs = {
(
"".join(x.capitalize() if i > 0 else x for i, x in enumerate(key.split("_")))
): value for key, value in kwargs.items() if value is not None
}
response = Api(API['post']['post'], proxy).request(
'post',
cookies=me.cookies,
data=kwargs
)
if (id_ := response.json().get('id', None)) is None:
raise ValueError('发表帖子时出现未知错误。')
return id_
# 查询帖子顺序
def find_post(category_name: str, category_id: str, id_: int, proxy: Optional[str]=None) -> int:
'''查询帖子顺序
参数:
category_name (str): 画廊名称
category_id (str): 画廊 ID
id (int): 查询的帖子 ID
proxy (Optional[str], optional): 代理服务器
Returns:
int: 帖子在该画廊的时间顺序
'''
params = {
'categoryName': category_name,
'categoryId': category_id,
'id': id_
}
response = Api(API['post']['find'], proxy).request('get', params=params)
if (position := response.json().get('position', None)) is None:
raise ValueError('查询帖子顺序时出现未知错误。')
return position
# 社区帖子类
class Post:
'''社区帖子类
参数:
id_ (str): 社区帖子 ID
proxy (Optional[str], optional): 代理服务器
'''
# 初始化
def __init__(self, id_: int, proxy: Optional[str]=None) -> None:
'''社区帖子类
参数:
id_ (int): 社区帖子 ID
proxy (Optional[str], optional): 代理服务器
'''
self.id: int = id_
'''社区帖子 ID'''
self.proxy: Optional[str] = proxy
'''代理服务器'''
self._post: dict[str, Any] = {}
'''社区帖子内容'''
return
# 获取帖子基础信息
def get_basic(self) -> dict[str, Any]:
'''获取帖子基础信息
返回:
dict[str, Any]: 基础信息
'''
response = Api(API['post']['basic'], self.proxy).request('get', params={'id': self.id,})
return response.json()
# 获取帖子信息
def get_details(self) -> dict[str, Any]:
'''获取帖子信息
返回:
dict[str, Any]: 帖子详细信息
'''
if len(self._post) <= 0:
# 如果没有帖子内容存储
response = Api(API['post']['details'], self.proxy).request('get', params={'id': self.id,})
if (post := response.json().get('post', None)) is not None:
self._post = dict(post)
else:
raise Exception('无帖子信息获取。')
return self._post
# 获取谱面对象
def get_chart(self) -> Chart:
'''获取谱面对象
返回:
Chart: 谱面对象
'''
post = self.get_details()
if (chart := post.get('chart', None)) is not None:
return Chart.normalize(chart)
else:
raise PostHasNoChartError(post)
# 获取帖子标签
def get_tags(self) -> list[Tag]:
'''获取帖子标签
返回:
list[Tag]: 标签列表
'''
if (tags := self.get_details().get('tags', None)) is not None:
return [Tag(tag) for tag in tags]
else:
return []
# 获取帖子内容
def get_content(self) -> str:
'''获取帖子内容
返回:
str: 帖子内容
'''
result: str = ''
if (content := list(self.get_details().get('content', None))) is not None:
for seg in content:
if seg.get('type', None) in ['text', 'link']:
result += seg.get('data', '') + '\n'
elif seg.get('type', None) == 'emoji':
result += f':{seg.get("data", "")}:'
elif seg.get('type', None) == 'br':
result += '\n'
return result
# 获取歌曲信息对象
def get_song(self) -> SongRes:
'''获取歌曲信息对象
返回:
SongInfo: 歌曲音频与封面字节
'''
post = self.get_details()
if (song := post.get('song', None)) is None:
raise PostHasNoSongError(post)
if (type_ := song.get('type', None)) is None:
raise TypeError('该帖子没有歌曲类型。')
result: dict[str, Union[bytes, None]] = {}
if type_ == 'custom': # 自定义歌曲
# 获取歌曲音频
if (audio := song.get('audio', None)) is None:
result['audio'] = None
else:
try:
response = Api(audio, self.proxy).request('get')
response.raise_for_status()
result['audio'] = response.content
except Exception as exception:
print(f'获取自定义歌曲音频时失败:{type(exception).__name__}: {exception}')
result['audio'] = None
# 获取歌曲封面
if (cover := song.get('cover', None)) is None:
result['cover'] = None
else:
try:
response = Api(cover, self.proxy).request('get')
response.raise_for_status()
result['cover'] = response.content
except Exception as exception:
print(f'获取自定义歌曲封面时失败:{type(exception).__name__}: {exception}')
result['cover'] = None
elif type_ == 'bandori': # BanG Dream! 歌曲
# 获取歌曲 ID
if (id_ := song.get('id', None)) is None:
raise ValueError('未能获取歌曲 ID。')
# 获取歌曲信息
info = Api(API['songs']['info'].format(id=id_), self.proxy).request('get').json()
# 获取歌曲所在服务器
if (published_at := info.get('publishedAt', None)) is None:
raise Exception('无法获取歌曲发布时间。')
# 根据 publishedAt 数据判断服务器
if published_at[0] is not None: server = 'jp'
elif published_at[1] is not None: server = 'en'
elif published_at[2] is not None: server = 'tw'
elif published_at[3] is not None: server = 'cn'
elif published_at[4] is not None: server = 'kr'
else:
raise Exception('无法获取歌曲服务器。')
# 获取歌曲音频
try:
result['audio'] = Assets( | ASSETS['songs']['sound'].format(id=str(id_)), server, self.proxy | 3 | 2023-11-16 13:09:20+00:00 | 12k |
kampta/asic | commons/logger.py | [
{
"identifier": "images2grid",
"path": "commons/utils.py",
"snippet": "def images2grid(images, **grid_kwargs):\n # images should be (N, C, H, W)\n grid = make_grid(images, **grid_kwargs)\n out = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()\n return o... | from torch.utils.tensorboard.writer import SummaryWriter
from PIL import Image
from commons.utils import images2grid, map_minmax, compute_pck, sample_tuples, \
pck_loop
from commons.draw import splat_points, load_fg_points, \
concat_v, get_colors, get_dense_colors, load_text_points
from thirdparty.colormap.colormap_flow import color_wheel_fast_smooth
import torch
import torch.nn.functional as F
import wandb
import numpy as np | 7,507 | # flatten(0, 1)
# writer.log_image_grid(
# stacked, 'kp_pred_text', train_idx, 3*vis_sample,
# log_mean_img=False, nrow=3)
# Log dense mapping from canonical space to Image space
wheel = color_wheel_fast_smooth(res).permute(2, 0, 1).unsqueeze(0).to(device)
colors = wheel.expand(vis_sample, -1, -1, -1)
flow, _ = stn(all_imgs[src_idx])
colors = F.grid_sample(colors, flow, padding_mode='border',
align_corners=True)
colors = map_minmax(colors, 0, 1, -1, 1)
alpha = 0.5
blend_img = alpha * all_imgs[src_idx] * (1-all_masks[src_idx]) + \
(all_imgs[src_idx] * alpha + colors * (1-alpha)) * all_masks[src_idx]
blend_img = torch.cat([wheel, blend_img, wheel, colors* all_masks[src_idx]])
writer.log_image_grid(blend_img, 'canon_map', train_idx, len(blend_img),
log_mean_img=False, nrow=len(blend_img)//2)
# Log keypoints from Image space to canonical space
if has_gt_kp:
canon_corrs = stn.transfer_forward(all_flows, all_kps[..., :2], res, is_flow=True)
canon_corrs = stn.unnormalize(canon_corrs, res, res)
canon_vis = all_kps[..., -1]
num_kp = canon_vis.size(-1)
N = canon_vis.size(0)
colors = kps_cols.permute(1, 0, 2).expand(-1, N, -1).to(device)
heatmaps = splat_points(
torch.ones(num_kp, 3, res, res, device=device) * -1,
canon_corrs.permute(1, 0, 2), sigma=6., opacity=1.,
colors=colors, alpha_channel=canon_vis.permute(1, 0).unsqueeze(-1))
writer.log_image_grid(heatmaps, 'kp_heatmaps', train_idx,
num_kp, padding=2, pad_value=1.)
# Log parts from Image space to canonical space
# Splat one part at a time to canonical
# TODO: splat all at once
num_parts = dset.num_parts
part_kp_canons = []
part_kp_vis = []
for part in range(num_parts):
part_masks = (parts == part).float().unsqueeze(1)
kp, kp_vis, _ = load_fg_points(part_masks, resolution=vis_denseres)
kp_canon = stn.transfer_forward(all_flows, kp[..., :2], res, is_flow=True)
kp_canon = stn.unnormalize(kp_canon, res, res)
part_kp_canons.append(kp_canon.reshape(-1, 2))
part_kp_vis.append(kp_vis.reshape(-1))
part_kp_canons = torch.stack(part_kp_canons)
part_kp_vis = torch.stack(part_kp_vis)
colors = parts_cols[:-1].unsqueeze(1).expand(-1, part_kp_vis.size(1), -1)
heatmaps = splat_points(
torch.ones(num_parts, 3, res, res, device=device) * -1,
part_kp_canons, sigma=2., opacity=1.,
colors=colors, alpha_channel=part_kp_vis.unsqueeze(-1))
writer.log_image_grid(heatmaps, 'part_heatmaps', train_idx,
num_parts, padding=2, pad_value=1.)
# Compute PCKs
N = all_imgs.size(0)
transfer_fn = stn.transfer_points
pck_pairs = None
if has_gt_kp:
# First compute PCK for all 2-pairs
if has_fixed_pairs:
tuples = dset.fixed_pairs
if dset.thresholds is not None:
thresholds = [torch.from_numpy(dset.thresholds)[tuples[:, 1]]]
else:
thresholds = None
else:
tuples = sample_tuples(N)
thresholds = None
print(f"First computing 2-point PCK for {len(tuples)} pairs")
gt_corrs, pred_corrs, vis = pck_loop(
tuples, all_kps, transfer_fn, all_flows, all_masks, res,
return_canon=False, is_flow=True)
pck_pairs = compute_pck(pred_corrs, gt_corrs, vis, thresholds,
img_size=res)
# Compute k-cycle PCK
pck_cycles = []
if not has_gt_kp:
kp, kp_vis, kp_col_dense = load_fg_points(all_masks,
resolution=vis_denseres)
ignore_idx = kp_vis.sum(dim=0) == 0
all_kps = torch.cat([kp[:, ~ignore_idx], kp_vis[:, ~ignore_idx].unsqueeze(-1)], dim=2)
ignore_interim = True
else:
ignore_interim = False
for k in [2, 3, 4]:
tuples = sample_tuples(N, k=k, count=200)
if has_fixed_pairs and dset.thresholds is not None:
thresholds = torch.from_numpy(dset.thresholds[tuples[:, 1:]])
thresholds = thresholds.reshape(-1)
else:
thresholds = None
print(f"Next computing {k}-cycle PCK for {len(tuples)} tuples")
gt_corrs, pred_corrs, vis = pck_loop(
tuples, all_kps, transfer_fn, all_flows, all_masks, res,
return_canon=False, is_flow=True, ignore_interim=ignore_interim)
pck = compute_pck(pred_corrs, gt_corrs, vis, thresholds, img_size=res)
pck_cycles.append(pck)
return pck_pairs, pck_cycles
class Logger(SummaryWriter):
def __init__(self, results_path, log_to_tb=False, log_to_wandb=True):
super().__init__(results_path)
self.results_path = results_path
self.log_to_tb = log_to_tb
self.log_to_wandb = log_to_wandb
def _log_image_grid(self, images, logging_name, prefix, itr, range=(-1, 1),
scale_each=False, nrow=None, **kwargs):
nrow = max(1, int(len(images) ** 0.5+0.5)) if nrow is None else nrow
if type(images[0]) is torch.Tensor:
|
@torch.inference_mode()
def log_visuals(canon, stn, dset, train_idx, writer, vis_sample=2,
vis_denseres=32):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
pseudo_kps = dset.pseudo_kps
parts = dset.parts
vis_sample = min(vis_sample, len(dset))
res = dset.img_size
has_gt_kp = dset.kps is not None
has_fixed_pairs = dset.fixed_pairs is not None # SPair
# Run full test dataloader (assuming small dataset)
all_imgs = dset.imgs
all_masks = dset.masks
all_kps = dset.kps
all_flows, _ = stn(all_imgs)
if has_gt_kp:
kps_cols = torch.from_numpy(get_colors(all_kps.size(1))).float()
kps_cols = map_minmax(kps_cols, 0, 1, -1, 1).to(device).unsqueeze(0)
parts_cols = torch.from_numpy(get_colors(dset.num_parts+1)).float()
parts_cols = map_minmax(parts_cols, 0, 1, -1, 1).to(device)
parts_cols[-1] = 0
# Text logging
text_kp, text_kp_col = load_text_points('CVPR')
text_kp = text_kp.to(device).unsqueeze(0)
text_kp_col = text_kp_col.to(device).unsqueeze(0)
pairs = sample_tuples(len(dset), count=vis_sample, seed=0)
src_idx, trg_idx = pairs[:, 0], pairs[:, 1]
# Log only once during the training
if train_idx == 0:
# Log images and the mask
writer.log_image_grid(all_imgs[:vis_sample], 'img', train_idx,
vis_sample, nrow=vis_sample)
writer.log_image_grid(all_imgs[:vis_sample]*all_masks[:vis_sample],
'img_mask', train_idx, vis_sample, nrow=vis_sample)
# Log neural best buddies (sparse)
kp1 = pseudo_kps[src_idx, trg_idx]
kp2 = pseudo_kps[trg_idx, src_idx]
kp_vis = kp1[..., -1] * kp2[..., -1]
kp1, kp2 = kp1[..., :2], kp2[..., :2]
colors = map_minmax(get_dense_colors(kp1), 0, 1, -1, 1)
blend_src = splat_points(
all_imgs[src_idx], kp1, sigma=3., opacity=1.0, colors=colors,
alpha_channel=kp_vis.unsqueeze(-1))
blend_trg = splat_points(
all_imgs[trg_idx], kp2, sigma=3., opacity=1.0, colors=colors,
alpha_channel=kp_vis.unsqueeze(-1))
stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)
writer.log_image_grid(stacked, 'kp_pseudo_gt', train_idx, 2*vis_sample,
log_mean_img=False, nrow=2)
# Log parts
parts_img = parts_cols[parts[:vis_sample]].permute(0, 3, 1, 2)
writer.log_image_grid(parts_img, 'parts', train_idx, vis_sample,
nrow=vis_sample, log_mean_img=False)
# Log groundtruth kp
if has_gt_kp:
kp1, kp2 = all_kps[src_idx], all_kps[trg_idx]
kp_vis = kp1[..., -1] * kp2[..., -1]
kp1, kp2 = kp1[..., :2], kp2[..., :2]
colors = kps_cols.expand(vis_sample, -1, -1)
blend_src = splat_points(
all_imgs[src_idx], kp1, sigma=3., opacity=1.0, colors=colors,
alpha_channel=kp_vis.unsqueeze(-1))
blend_trg = splat_points(
all_imgs[trg_idx], kp2, sigma=3., opacity=1.0, colors=colors,
alpha_channel=kp_vis.unsqueeze(-1))
stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)
stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)
writer.log_image_grid(stacked, 'kp_gt', train_idx, 2*vis_sample,
log_mean_img=False, nrow=2)
# Log kp and top predictions by STN (if kp are available)
if has_gt_kp:
kp1 = all_kps[src_idx][..., :2]
kp_vis = all_kps[src_idx][..., 2]
kp_pred = stn.transfer_points(
kp1, src_idx, trg_idx, all_flows, mask=all_masks, res=res, is_flow=True)
colors = kps_cols.expand(vis_sample, -1, -1)
blend_src = splat_points(
all_imgs[src_idx], kp1, sigma=3., opacity=1.0,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
blend_trg = splat_points(
all_imgs[trg_idx], kp_pred.float(), sigma=3., opacity=1.0,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)
writer.log_image_grid(stacked, 'kp_pred_sparse', train_idx,
2*vis_sample, log_mean_img=False, nrow=2)
# Log current canon image
canon_grid = canon.get_grid(vis_sample)
if canon_grid.size(1) > 3:
canon_grid = canon_grid[:, :3]
scale_factor = res / canon_grid.size(-1)
canon_grid = F.interpolate(
canon_grid, scale_factor=scale_factor, mode='bilinear')
writer.log_image_grid(canon_grid, 'canon', train_idx, 1, log_mean_img=False)
# Log dense correspondences
kp, kp_vis, kp_col_dense = load_fg_points(all_masks[src_idx],
resolution=vis_denseres)
kp_pred, kp_canon = stn.transfer_points(
kp, src_idx, trg_idx, all_flows, mask=all_masks, res=res,
return_canon=True, is_flow=True)
colors = map_minmax(kp_col_dense, 0, 1, -1, 1)
blend_src = splat_points(
all_imgs[src_idx], kp, sigma=4., opacity=0.75,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
blend_trg = splat_points(
all_imgs[trg_idx], kp_pred.float(), sigma=4., opacity=0.75,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
blend_canon = splat_points(
torch.ones_like(canon_grid) * -1, kp_canon, sigma=1.3, opacity=1.0,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
stacked = torch.stack([blend_src, blend_canon, blend_trg], dim=1).\
flatten(0, 1)
writer.log_image_grid(
stacked, 'kp_pred_dense', train_idx, 3*vis_sample,
log_mean_img=False, nrow=3)
# # Log dense correspondences with text
# text_kp = text_kp.expand(vis_sample, -1, -1)
# text_kp_col = text_kp_col.expand(vis_sample, -1, -1)
# kp_pred, kp_canon = stn.transfer_points(
# text_kp, src_idx, trg_idx, all_flows, mask=all_masks, res=res,
# return_canon=True, is_flow=True)
# blend_src = splat_points(all_imgs[src_idx], text_kp, sigma=0.7, opacity=1.,
# colors=text_kp_col)
# blend_trg = splat_points(all_imgs[trg_idx], kp_pred.float(), sigma=0.7,
# opacity=1., colors=text_kp_col)
# blend_canon = splat_points(torch.ones_like(canon_grid) * -1, kp_canon,
# sigma=0.7, opacity=1., colors=text_kp_col)
# stacked = torch.stack([blend_src, blend_canon, blend_trg], dim=1).\
# flatten(0, 1)
# writer.log_image_grid(
# stacked, 'kp_pred_text', train_idx, 3*vis_sample,
# log_mean_img=False, nrow=3)
# Log dense mapping from canonical space to Image space
wheel = color_wheel_fast_smooth(res).permute(2, 0, 1).unsqueeze(0).to(device)
colors = wheel.expand(vis_sample, -1, -1, -1)
flow, _ = stn(all_imgs[src_idx])
colors = F.grid_sample(colors, flow, padding_mode='border',
align_corners=True)
colors = map_minmax(colors, 0, 1, -1, 1)
alpha = 0.5
blend_img = alpha * all_imgs[src_idx] * (1-all_masks[src_idx]) + \
(all_imgs[src_idx] * alpha + colors * (1-alpha)) * all_masks[src_idx]
blend_img = torch.cat([wheel, blend_img, wheel, colors* all_masks[src_idx]])
writer.log_image_grid(blend_img, 'canon_map', train_idx, len(blend_img),
log_mean_img=False, nrow=len(blend_img)//2)
# Log keypoints from Image space to canonical space
if has_gt_kp:
canon_corrs = stn.transfer_forward(all_flows, all_kps[..., :2], res, is_flow=True)
canon_corrs = stn.unnormalize(canon_corrs, res, res)
canon_vis = all_kps[..., -1]
num_kp = canon_vis.size(-1)
N = canon_vis.size(0)
colors = kps_cols.permute(1, 0, 2).expand(-1, N, -1).to(device)
heatmaps = splat_points(
torch.ones(num_kp, 3, res, res, device=device) * -1,
canon_corrs.permute(1, 0, 2), sigma=6., opacity=1.,
colors=colors, alpha_channel=canon_vis.permute(1, 0).unsqueeze(-1))
writer.log_image_grid(heatmaps, 'kp_heatmaps', train_idx,
num_kp, padding=2, pad_value=1.)
# Log parts from Image space to canonical space
# Splat one part at a time to canonical
# TODO: splat all at once
num_parts = dset.num_parts
part_kp_canons = []
part_kp_vis = []
for part in range(num_parts):
part_masks = (parts == part).float().unsqueeze(1)
kp, kp_vis, _ = load_fg_points(part_masks, resolution=vis_denseres)
kp_canon = stn.transfer_forward(all_flows, kp[..., :2], res, is_flow=True)
kp_canon = stn.unnormalize(kp_canon, res, res)
part_kp_canons.append(kp_canon.reshape(-1, 2))
part_kp_vis.append(kp_vis.reshape(-1))
part_kp_canons = torch.stack(part_kp_canons)
part_kp_vis = torch.stack(part_kp_vis)
colors = parts_cols[:-1].unsqueeze(1).expand(-1, part_kp_vis.size(1), -1)
heatmaps = splat_points(
torch.ones(num_parts, 3, res, res, device=device) * -1,
part_kp_canons, sigma=2., opacity=1.,
colors=colors, alpha_channel=part_kp_vis.unsqueeze(-1))
writer.log_image_grid(heatmaps, 'part_heatmaps', train_idx,
num_parts, padding=2, pad_value=1.)
# Compute PCKs
N = all_imgs.size(0)
transfer_fn = stn.transfer_points
pck_pairs = None
if has_gt_kp:
# First compute PCK for all 2-pairs
if has_fixed_pairs:
tuples = dset.fixed_pairs
if dset.thresholds is not None:
thresholds = [torch.from_numpy(dset.thresholds)[tuples[:, 1]]]
else:
thresholds = None
else:
tuples = sample_tuples(N)
thresholds = None
print(f"First computing 2-point PCK for {len(tuples)} pairs")
gt_corrs, pred_corrs, vis = pck_loop(
tuples, all_kps, transfer_fn, all_flows, all_masks, res,
return_canon=False, is_flow=True)
pck_pairs = compute_pck(pred_corrs, gt_corrs, vis, thresholds,
img_size=res)
# Compute k-cycle PCK
pck_cycles = []
if not has_gt_kp:
kp, kp_vis, kp_col_dense = load_fg_points(all_masks,
resolution=vis_denseres)
ignore_idx = kp_vis.sum(dim=0) == 0
all_kps = torch.cat([kp[:, ~ignore_idx], kp_vis[:, ~ignore_idx].unsqueeze(-1)], dim=2)
ignore_interim = True
else:
ignore_interim = False
for k in [2, 3, 4]:
tuples = sample_tuples(N, k=k, count=200)
if has_fixed_pairs and dset.thresholds is not None:
thresholds = torch.from_numpy(dset.thresholds[tuples[:, 1:]])
thresholds = thresholds.reshape(-1)
else:
thresholds = None
print(f"Next computing {k}-cycle PCK for {len(tuples)} tuples")
gt_corrs, pred_corrs, vis = pck_loop(
tuples, all_kps, transfer_fn, all_flows, all_masks, res,
return_canon=False, is_flow=True, ignore_interim=ignore_interim)
pck = compute_pck(pred_corrs, gt_corrs, vis, thresholds, img_size=res)
pck_cycles.append(pck)
return pck_pairs, pck_cycles
class Logger(SummaryWriter):
def __init__(self, results_path, log_to_tb=False, log_to_wandb=True):
super().__init__(results_path)
self.results_path = results_path
self.log_to_tb = log_to_tb
self.log_to_wandb = log_to_wandb
def _log_image_grid(self, images, logging_name, prefix, itr, range=(-1, 1),
scale_each=False, nrow=None, **kwargs):
nrow = max(1, int(len(images) ** 0.5+0.5)) if nrow is None else nrow
if type(images[0]) is torch.Tensor: | ndarr = images2grid(images, return_as_PIL=True, nrow=nrow, | 0 | 2023-11-14 16:43:16+00:00 | 12k |
AnonymGiant/ViLaM | evaluate.py | [
{
"identifier": "Config",
"path": "lavis/common/config.py",
"snippet": "class Config:\n def __init__(self, args):\n self.config = {}\n\n self.args = args\n\n # Register the config and configuration for setup\n registry.register(\"configuration\", self)\n\n user_conf... | import argparse
import random
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import lavis.tasks as tasks
from lavis.common.config import Config
from lavis.common.dist_utils import get_rank, init_distributed_mode
from lavis.common.logger import setup_logger
from lavis.common.optims import (
LinearWarmupCosineLRScheduler,
LinearWarmupStepLRScheduler,
)
from lavis.common.utils import now
from lavis.datasets.builders import *
from lavis.models import *
from lavis.processors import *
from lavis.runners.runner_base import RunnerBase
from lavis.tasks import * | 7,644 | """
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
# imports modules for registration
def parse_args():
parser = argparse.ArgumentParser(description="Training")
parser.add_argument("--cfg-path",
default='',
help="path to configuration file.")
parser.add_argument(
"--options",
nargs="+",
help="override some settings in the used config, the key-value pair "
"in xxx=yyy format will be merged into config file (deprecate), "
"change to --cfg-options instead.",
)
args = parser.parse_args()
# if 'LOCAL_RANK' not in os.environ:
# os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def setup_seeds(config):
seed = config.run_cfg.seed + get_rank()
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
cudnn.benchmark = False
cudnn.deterministic = True
def main():
# allow auto-dl completes on main process without timeout when using NCCL backend.
# os.environ["NCCL_BLOCKING_WAIT"] = "1"
# set before init_distributed_mode() to ensure the same job_id shared across all ranks.
job_id = now()
cfg = Config(parse_args())
init_distributed_mode(cfg.run_cfg)
setup_seeds(cfg)
# set after init_distributed_mode() to only log on master.
| """
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
# imports modules for registration
def parse_args():
parser = argparse.ArgumentParser(description="Training")
parser.add_argument("--cfg-path",
default='',
help="path to configuration file.")
parser.add_argument(
"--options",
nargs="+",
help="override some settings in the used config, the key-value pair "
"in xxx=yyy format will be merged into config file (deprecate), "
"change to --cfg-options instead.",
)
args = parser.parse_args()
# if 'LOCAL_RANK' not in os.environ:
# os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def setup_seeds(config):
seed = config.run_cfg.seed + get_rank()
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
cudnn.benchmark = False
cudnn.deterministic = True
def main():
# allow auto-dl completes on main process without timeout when using NCCL backend.
# os.environ["NCCL_BLOCKING_WAIT"] = "1"
# set before init_distributed_mode() to ensure the same job_id shared across all ranks.
job_id = now()
cfg = Config(parse_args())
init_distributed_mode(cfg.run_cfg)
setup_seeds(cfg)
# set after init_distributed_mode() to only log on master. | setup_logger() | 3 | 2023-11-14 08:57:59+00:00 | 12k |
ml4bio/USPNet | Net/New_ComModel.py | [
{
"identifier": "MultiHeadAttention",
"path": "Net/SelfAttentionTorch.py",
"snippet": "class MultiHeadAttention(nn.Module):\n\n def __init__(self,\n config\n ):\n \"\"\"Multi-head attention.\n :param in_features: Size of each input sample.\n :param... | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from Net.LSTM import *
from Net.CNN import *
from Net.SelfAttentionTorch import MultiHeadAttention
from Net.transformer import TransformerEncoder
from torch.autograd import Variable
from torch.nn import Parameter
from Net.CRF import CRF
from Net.LSTM_Attention import LSTM_attention | 10,357 |
embedding_feature_dim_msa = 768
embedding_feature_dim_pro = 1024
class NormedLinear(nn.Module):
def __init__(self, in_features, out_features):
super(NormedLinear, self).__init__()
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
def forward(self, x):
out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return out
class Attention_CRF(nn.Module):
def __init__(self, config, config1, cnn_configs, lstm_lan_config, lstm_config,
use_CRF=False, use_attention=True, reweight_ratio=None):
super(Attention_CRF, self).__init__()
self.num_classes = 20
self.max_len = config1['max_text_len']
self.embedding = nn.Embedding(num_embeddings=config['vocab_size'], embedding_dim=config['embedding_size'])
self.ef1 = 512
self.ef2 = 144
self.ef3 = 32
self.csef = 11
self.ef4 = 256
self.ef5 = 256
self.ef6 = 64
if (use_CRF):
self.crf = CRF(num_tags=11, reweight_ratio=reweight_ratio)#original: num_tags=9
self.use_CRF = True
else:
self.use_CRF = False
self.linear = nn.Sequential(nn.Linear(config['embedding_size'], config1['input_dim'] - 4), nn.ReLU())
self.linear2 = nn.Linear(self.max_len, self.max_len)
self.lstm = BLSTM(config1)
self.cnn = TextCNN(cnn_configs[0])
self.cnn1 = TextCNN(cnn_configs[1])
self.lstm2 = BLSTM(lstm_config)
|
embedding_feature_dim_msa = 768
embedding_feature_dim_pro = 1024
class NormedLinear(nn.Module):
def __init__(self, in_features, out_features):
super(NormedLinear, self).__init__()
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
def forward(self, x):
out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return out
class Attention_CRF(nn.Module):
def __init__(self, config, config1, cnn_configs, lstm_lan_config, lstm_config,
use_CRF=False, use_attention=True, reweight_ratio=None):
super(Attention_CRF, self).__init__()
self.num_classes = 20
self.max_len = config1['max_text_len']
self.embedding = nn.Embedding(num_embeddings=config['vocab_size'], embedding_dim=config['embedding_size'])
self.ef1 = 512
self.ef2 = 144
self.ef3 = 32
self.csef = 11
self.ef4 = 256
self.ef5 = 256
self.ef6 = 64
if (use_CRF):
self.crf = CRF(num_tags=11, reweight_ratio=reweight_ratio)#original: num_tags=9
self.use_CRF = True
else:
self.use_CRF = False
self.linear = nn.Sequential(nn.Linear(config['embedding_size'], config1['input_dim'] - 4), nn.ReLU())
self.linear2 = nn.Linear(self.max_len, self.max_len)
self.lstm = BLSTM(config1)
self.cnn = TextCNN(cnn_configs[0])
self.cnn1 = TextCNN(cnn_configs[1])
self.lstm2 = BLSTM(lstm_config)
| self.lstm_lan = LSTM_attention(lstm_lan_config) | 3 | 2023-11-14 08:19:42+00:00 | 12k |
doodledood/chat-flock | examples/manual_hierarchical_participant.py | [
{
"identifier": "InMemoryChatDataBackingStore",
"path": "chatflock/backing_stores/in_memory.py",
"snippet": "class InMemoryChatDataBackingStore(ChatDataBackingStore):\n messages: List[ChatMessage]\n participants: Dict[str, ChatParticipant]\n last_message_id: Optional[int] = None\n\n def __in... | import typer
from dotenv import load_dotenv
from halo import Halo
from chatflock.backing_stores import InMemoryChatDataBackingStore
from chatflock.base import Chat
from chatflock.conductors import LangChainBasedAIChatConductor, RoundRobinChatConductor
from chatflock.participants.group import GroupBasedChatParticipant
from chatflock.participants.langchain import LangChainBasedAIChatParticipant
from chatflock.participants.user import UserChatParticipant
from chatflock.renderers import TerminalChatRenderer
from examples.common import create_chat_model | 8,440 |
def manual_hierarchical_participant(model: str = "gpt-4-1106-preview", temperature: float = 0.0) -> None:
chat_model = create_chat_model(model=model, temperature=temperature)
spinner = Halo(spinner="dots")
comedy_team = GroupBasedChatParticipant(
group_name="Comedy Team",
mission="Collaborate on funny humour-filled responses based on the original request for the user",
|
def manual_hierarchical_participant(model: str = "gpt-4-1106-preview", temperature: float = 0.0) -> None:
chat_model = create_chat_model(model=model, temperature=temperature)
spinner = Halo(spinner="dots")
comedy_team = GroupBasedChatParticipant(
group_name="Comedy Team",
mission="Collaborate on funny humour-filled responses based on the original request for the user", | chat=Chat( | 1 | 2023-11-12 11:10:58+00:00 | 12k |
CryptoFuzzPy/cryptofuzz | cryptofuzz/Wallet.py | [
{
"identifier": "Convertor",
"path": "cryptofuzz/utils.py",
"snippet": "class Convertor:\n def __init__(self):\n super().__init__()\n self.gen = Generator()\n \n def double_sha256(self, data):\n return hashlib.sha256(hashlib.sha256(data).digest()).digest()\n \n def mn... | import os
from . import Generator, Convertor
from . import (
Bitcoin, BitcoinGold, Dash, DigiByte, Dogecoin, Ethereum, Litecoin, Qtum, Ravencoin, Tron, Zcash, Axe
) | 9,593 | def XPRV_To_Decimal(xprv: str) -> int: return convertor.bytes_to_int(convertor.xprv_to_bytes(xprv))
# ----------------------------------------------------------
def PrivateKey_To_Bitcoin_Addr(privatekey: str, Type: str = 'p2pkh') -> str:
"""
Convert Private Key To Bitcoin All Type Address, Type: p2pkh, p2sh, p2wpkh, p2wsh, p2wpkh_p2sh, p2wsh_p2sh.
:param privatekey:
:type privatekey: str
:param Type:
:type Type: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Bitcoin_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> p2pkh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2pkh')
>>> p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2sh')
>>> p2wpkh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wpkh')
>>> p2wsh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wsh')
>>> p2wpkh_p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wpkh_p2sh')
>>> p2wsh_p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wsh_p2sh')
--------------------------------------------------------
"""
BTC = Bitcoin()
if Type == 'p2pkh':
return BTC.hex_addr(privatekey, 'p2pkh')
elif Type == 'p2sh':
return BTC.hex_addr(privatekey, 'p2sh')
elif Type == 'p2wpkh':
return BTC.hex_addr(privatekey, 'p2wpkh')
elif Type == 'p2wsh':
return BTC.hex_addr(privatekey, 'p2wsh')
elif Type == 'p2wpkh_p2sh':
return BTC.hex_addr(privatekey, 'p2wpkh_p2sh')
elif Type == 'p2wsh_p2sh':
return BTC.hex_addr(privatekey, 'p2wsh_p2sh')
else:
return BTC.hex_addr(privatekey, 'p2pkh')
# ----------------------------------------------------------
def PrivateKey_To_Ethereum_Addr(privatekey: str) -> str:
"""
Convert Private Key To Ethereum Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Ethereum_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_Ethereum_Addr(Privatekey)
--------------------------------------------------------
"""
ETH = Ethereum()
return ETH.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_BitcoinGold_Addr(privatekey: str) -> str:
"""
Convert Private Key To Bitcoin Gold Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_BitcoinGold_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_BitcoinGold_Addr(Privatekey)
--------------------------------------------------------
"""
BTG = BitcoinGold()
return BTG.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_Dash_Addr(privatekey: str) -> str:
"""
Convert Private Key To Dash Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Dash_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_Dash_Addr(Privatekey)
--------------------------------------------------------
"""
| # programmer and owner mmdrza.com
# ----------------------------------------------------------
convertor = Convertor()
generator = Generator()
# ----------------------------------------------------------
def getPrivateKey() -> str:
"""
Generate a private key without repeating.
:return private key:
:rtype str:
---------------------------------------------------
>>> Privatekey = getPrivateKey()
---------------------------------------------------
"""
return generator.generate_private_key()
# ----------------------------------------------------------
def getMnemonic(size: int = 12) -> str:
"""
Generate Random Standard Mnemonic BIP39.
:param size:
:type size: Int
:return mnemonic:
:rtype str:
--------------------------------------------------
>>> Mnemonic = getMnemonic()
--------------------------------------------------
"""
return generator.generate_mnemonic(size=size)
# ----------------------------------------------------------
def getBinary() -> str:
"""
Generate random Binary With Length 256 (256 bits).
:rtype str:
:return binary:
-------------------------------------------------
>>> Binary = getBinary()
------------------------------------------------
"""
return generator.generate_binary()
# ----------------------------------------------------------
def getRootKey() -> str:
"""
Generate Root Key.
:rtype str:
:return root key:
------------------------------------------------
>>> RootKey = getRootKey()
------------------------------------------------
"""
return generator.generate_xprv()
# -------------------------------------------------------------------
def getBytes() -> bytes: return os.urandom(32)
# -------------------------------------------------------------------
def getDecimal() -> int: return generator.generate_decimal()
# -------------------------------------------------------------------
def PrivateKey_To_Addr(hexed: str, compress: bool = False) -> str:
"""
Convert Private key Hex To Compress and UnCompress Address.
:param hexed:
:type hexed: str
:param compress:
:type compress: bool
:return address:
:rtype str:
----------------------------------------------------------
>>> privatekey = "0A97965...A45517" # example Private Key
>>> address_compress = PrivateKey_To_Addr(privatekey, True)
>>> address_uncompress = PrivateKey_To_Addr(privatekey, False)
----------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
if compress:
return convertor.bytes_to_addr(seed, True)
else:
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def PrivateKey_To_Wif(hexed: str, compress: bool = False) -> str:
"""
Convert Private key Hex To Compress and UnCompress WIF.
:param hexed:
:type hexed: str
:param compress:
:type compress: bool
:return wif:
:rtype str:
------------------------------------------------------------
>>> privatekey = "0A97965...A45517" # example Private Key
>>> wif_compress = PrivateKey_To_Wif(privatekey, True)
>>> wif_uncompress = PrivateKey_To_Wif(privatekey, False)
------------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
if compress:
return convertor.bytes_to_wif(seed, True)
else:
return convertor.bytes_to_wif(seed, False)
# ----------------------------------------------------------
def PrivateKey_To_PublicKey(hexed: str, compress: bool = False) -> str:
"""
Convert Private key Hex To compress and uncompress Public Key.
:param hexed:
:type hexed: str
:param compress:
:type compress: bool
:return public key:
:rtype str:
------------------------------------------------
>>> privatekey = "0A97965...A45517" # example Private Key
>>> publickey_compress = PrivateKey_To_PublicKey(privatekey, True)
>>> publickey_uncompress = PrivateKey_To_PublicKey(privatekey, False)
------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def PrivateKey_To_Mnemonic(hexed: str) -> str:
"""
Convert Private key Hex To Mnemonic.
:param hexed:
:type hexed: str
:return mnemonic:
:rtype str:
--------------------------------------------------------
>>> privatekey = "0A97965...A45517" # example Private Key
>>> mnemonic = PrivateKey_To_Mnemonic(privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_mne(seed)
# ----------------------------------------------------------
def PrivateKey_To_Byte(hexed: str) -> bytes:
"""
Convert Private key Hex To Byte.
:param hexed:
:type hexed: Str.
:return byte:
:rtype bytes:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> byte = PrivateKey_To_Byte(Privatekey)
--------------------------------------------------------
"""
return convertor.hex_to_bytes(hexed)
# ----------------------------------------------------------
def PrivateKey_To_Binary(hexed: str) -> str:
"""
Convert Private key Hex To Binary.
:param hexed:
:type hexed: Str
:return binary:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> binary = PrivateKey_To_Binary(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_binary(seed)
# ----------------------------------------------------------
def PrivateKey_To_Decimal(hexed: str) -> int:
"""
Convert Private key Hex To Decimal.
:param hexed:
:type hexed: Str
:return decimal:
:rtype int:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> decimal = PrivateKey_To_Decimal(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_int(seed)
# ----------------------------------------------------------
def PrivateKey_To_XPRV(hexed: str) -> str:
"""
Convert Private key Hex To XPRV.
:param hexed:
:type hexed: Str
:return xprv:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> xprv = PrivateKey_To_XPRV(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_xprv(seed)
# ----------------------------------------------------------
def PrivateKey_To_CompressAddr(hexed: str) -> str:
"""
Convert Private key Hex To Compress Address.
:param hexed:
:type hexed: Str
:return address:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> address_compress = PrivateKey_To_CompressAddr(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_addr(seed, True)
# ----------------------------------------------------------
def PrivateKey_To_UncompressAddr(hexed: str) -> str:
"""
Convert Private key Hex To UnCompress Address.
:param hexed:
:type hexed: Str
:return address:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> address_uncompress = PrivateKey_To_UncompressAddr(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def PrivateKey_To_XPUB(hexed: str) -> str:
"""
Convert Private key Hex To XPUB.
:param hexed:
:type hexed: Str
:return xpub:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> xpub = PrivateKey_To_XPUB(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_xpub(seed)
# ----------------------------------------------------------
def Bytes_To_PrivateKey(byte: bytes) -> str:
"""
Convert Byte To Private Key.
:param byte:
:type byte: Bytes
:return private key:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> privatekey = Bytes_To_PrivateKey(Privatekey)
--------------------------------------------------------
"""
return convertor.bytes_to_hex(byte)
# ----------------------------------------------------------
def Bytes_To_Address(seed: bytes, compress: bool = False):
"""
Convert Bytes To Compressed and Uncompressed Address.
:param seed:
:type seed: Bytes
:param compress:
:type compress: bool
:return address:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> address_compress = Bytes_To_Address(seedBytes, True)
>>> address_uncompress = Bytes_To_Address(seedBytes, False)
--------------------------------------------------------
"""
if compress:
return convertor.bytes_to_addr(seedBytes=seed, compress=True)
else:
return convertor.bytes_to_addr(seedBytes=seed, compress=False)
# ----------------------------------------------------------
def Bytes_To_Mnemonic(seed: bytes) -> str:
"""
Convert Bytes To Mnemonic.
:param seed:
:type seed: Bytes
:return mnemonic:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> mnemonic = Bytes_To_Mnemonic(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_mne(seed)
# ----------------------------------------------------------
def Bytes_To_XPRV(seed: bytes) -> str:
"""
Convert Bytes To XPRV.
:param seed:
:type seed: Bytes
:return xprv:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> xprv = Bytes_To_XPRV(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_xprv(seed)
# ----------------------------------------------------------
def Bytes_To_Binary(seed: bytes):
"""
Convert Bytes To Binary.
:param seed:
:type seed: Bytes
:return binary:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> binary = Bytes_To_Binary(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_binary(seed)
# ----------------------------------------------------------
def Bytes_To_PublicKey(seed: bytes, compress: bool = False):
"""
Convert Bytes To Public Key Compressed and Uncompressed.
:param seed:
:type seed: Bytes
:param compress:
:type compress: bool
:return public:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> public_compress = Bytes_To_PublicKey(seedBytes, True)
>>> public_uncompress = Bytes_To_PublicKey(seedBytes, False)
--------------------------------------------------------
"""
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def Bytes_To_Compress_Addr(seed: bytes) -> str:
"""
Convert Bytes To Compressed Address.
:param seed:
:type seed: Bytes
:return address:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> address_compress = Bytes_To_Compress_Addr(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_addr(seed, True)
# ----------------------------------------------------------
def Bytes_To_Uncompress_Addr(seed: bytes) -> str:
"""
Convert Bytes To Uncompressed Address.
:param seed:
:type seed: Bytes
:return address:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> address_uncompress = Bytes_To_Uncompress_Addr(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def Bytes_To_Decimal(seed: bytes):
"""
Convert Bytes To Decimal.
:param seed:
:type seed: Bytes
:return decimal:
:rtype int:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> decimal = Bytes_To_Decimal(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_int(seed)
# ----------------------------------------------------------
def Bytes_To_XPUB(seed: bytes) -> str:
return convertor.bytes_to_xpub(seed)
# ----------------------------------------------------------
def Bytes_To_Wif(seed: bytes, compress: bool = False) -> str:
"""
Convert Bytes To Wif Compressed and UnCompressed.
:param seed:
:type seed: Bytes
:param compress:
:type compress: bool
:return wif:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> wif_compress = Bytes_To_Wif(seedBytes, True)
>>> wif_uncompress = Bytes_To_Wif(seedBytes, False)
--------------------------------------------------------
"""
if compress:
return convertor.bytes_to_wif(seed, True)
else:
return convertor.bytes_to_wif(seed, False)
# ----------------------------------------------------------
def Mnemonic_To_Bytes(mnemonic: str) -> bytes:
return convertor.mne_to_seed(mnemonic=mnemonic)
# ----------------------------------------------------------
def Mnemonic_To_PrivateKey(mnemonic: str) -> str:
seed = convertor.mne_to_seed(mnemonic=mnemonic)
return convertor.bytes_to_hex(seed=seed)
# ----------------------------------------------------------
def Mnemonic_To_PublicKey(mnemonic: str, compress: bool = False):
seed = convertor.mne_to_seed(mnemonic=mnemonic)
if compress:
pub = convertor.bytes_to_public(seed, True).hex()
return convertor.pub_to_addr(pub)
else:
pub = convertor.bytes_to_public(seed, False).hex()
return convertor.pub_to_addr(pub)
# ----------------------------------------------------------
def Mnemonic_To_Decimal(mnemonic: str):
seed = convertor.mne_to_seed(mnemonic=mnemonic)
return convertor.bytes_to_int(seed)
# ----------------------------------------------------------
def Mnemonic_To_Binary(mnemonic: str):
seed = convertor.mne_to_seed(mnemonic=mnemonic)
return convertor.bytes_to_binary(seed)
# ----------------------------------------------------------
def Mnemonic_To_XPRV(mnemonic: str):
seedBytes = convertor.mne_to_seed(mnemonic)
return convertor.bytes_to_xprv(seedBytes)
# ----------------------------------------------------------
def Mnemonic_To_Addr(mnemonic: str, compress: bool = False) -> str:
seedBytes = convertor.mne_to_seed(mnemonic)
if compress:
return convertor.bytes_to_addr(seedBytes, True)
else:
return convertor.bytes_to_addr(seedBytes, False)
# ----------------------------------------------------------
def Mnemonic_To_XPUB(mnemonic: str):
seedBytes = convertor.mne_to_seed(mnemonic)
return convertor.bytes_to_xpub(seedBytes)
# ----------------------------------------------------------
def Mnemonic_To_Wif(mnemonic: str, compress: bool = False) -> str:
seedBytes = convertor.mne_to_seed(mnemonic)
if compress:
return convertor.bytes_to_wif(seedBytes, True)
else:
return convertor.bytes_to_wif(seedBytes, False)
# ----------------------------------------------------------
def Passphrase_To_Addr(passphrase: str, compress: bool = False) -> str:
if compress:
return convertor.pass_to_addr(passphrase, True)
else:
return convertor.pass_to_addr(passphrase, False)
# ----------------------------------------------------------
def Passphrase_To_Bytes(passphrase: str) -> bytes:
return convertor.pass_to_bytes(passphrase)
# ----------------------------------------------------------
def Passphrase_To_PrivateKey(passphrase: str) -> str:
return convertor.bytes_to_hex(convertor.pass_to_bytes(passphrase))
# ----------------------------------------------------------
def Passphrase_To_PublicKey(passphrase: str, compress: bool = False) -> str:
seed = convertor.pass_to_bytes(passphrase)
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def Passphrase_To_Wif(passphrase: str, compress: bool = False) -> str:
seed = convertor.pass_to_bytes(passphrase)
if compress:
return convertor.bytes_to_wif(seed, True)
else:
return convertor.bytes_to_wif(seed, False)
# ----------------------------------------------------------
def Passphrase_To_RootKey(passphrase: str) -> str:
seed = convertor.pass_to_bytes(passphrase)
return convertor.bytes_to_xprv(seed)
# ----------------------------------------------------------
def Passphrase_To_XPUB(passphrase: str) -> str:
seed = convertor.pass_to_bytes(passphrase)
return convertor.bytes_to_xpub(seed)
# ----------------------------------------------------------
def Passphrase_To_Decimal(passphrase: str) -> int:
seed = convertor.pass_to_bytes(passphrase)
return convertor.bytes_to_int(seed)
# ----------------------------------------------------------
def Wif_To_Bytes(wif: str) -> bytes:
return convertor.wif_to_bytes(wif)
# ----------------------------------------------------------
def Wif_To_Addr(wif: str, compress: bool = False) -> str:
return convertor.wif_to_addr(wif, compress)
# ----------------------------------------------------------
def Wif_To_PrivateKey(wif: str) -> str:
return convertor.bytes_to_hex(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_Mnemonic(wif: str) -> str:
return convertor.bytes_to_mne(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_Decimal(wif: str) -> int:
return convertor.bytes_to_int(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_Binary(wif: str) -> str:
return convertor.bytes_to_binary(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_XPRV(wif: str) -> str:
return convertor.bytes_to_xprv(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_XPUB(wif: str) -> str: return convertor.bytes_to_xpub(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_RootKey(wif: str) -> str:
return Wif_To_XPRV(wif)
# ----------------------------------------------------------
def Wif_To_PublicKey(wif: str, compress: bool = False):
seed = convertor.wif_to_bytes(wif)
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def Decimal_To_PrivateKey(dec: int) -> str:
return "%064x" % dec
# ----------------------------------------------------------
def Decimal_To_Bytes(dec: int) -> bytes:
return convertor.int_to_bytes(dec)
# ----------------------------------------------------------
def Decimal_To_PublicKey(dec: int, compress: bool = False) -> str:
seed = Decimal_To_Bytes(dec)
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def Decimal_To_Address(dec: int, compress: bool = False) -> str:
seed = Decimal_To_Bytes(dec)
if compress:
return convertor.bytes_to_addr(seed, True)
else:
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def Decimal_To_Mnemonic(dec: int) -> str:
seed = convertor.int_to_bytes(dec)
return convertor.bytes_to_mne(seed)
# ----------------------------------------------------------
def Decimal_To_XPRV(dec: int) -> str:
seed = convertor.int_to_bytes(dec)
return convertor.bytes_to_xprv(seed)
# ----------------------------------------------------------
def Decimal_To_XPUB(dec: int) -> str:
seed = convertor.int_to_bytes(dec)
return convertor.bytes_to_xpub(seed)
# ----------------------------------------------------------
def Decimal_To_Binary(dec: int) -> str:
seed = convertor.int_to_bytes(dec)
return convertor.bytes_to_binary(seed)
def Decimal_To_Wif(dec: int, compress: bool = False) -> str:
seed = convertor.int_to_bytes(dec)
if compress:
return convertor.bytes_to_wif(seed, True)
else:
return convertor.bytes_to_wif(seed, False)
# ----------------------------------------------------------
def Binary_To_Bytes(binary_str: str) -> bytes:
return convertor.binary_to_bytes(binary_str)
# ----------------------------------------------------------
def Binary_To_Address(binary_str: str, compress: bool = False) -> str:
seed = convertor.binary_to_bytes(binary_str)
if compress:
return convertor.bytes_to_addr(seed, True)
else:
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def Binary_To_PrivateKey(binary_str: str) -> str: return convertor.bytes_to_hex(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def Binary_To_Mnemonic(binary_str: str) -> str: return convertor.bytes_to_mne(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def Binary_To_XPRV(binary_str: str) -> str: return convertor.bytes_to_xprv(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def Binary_To_XPUB(binary_str: str) -> str: return convertor.bytes_to_xpub(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def Binary_To_Wif(binary_str: str, compress: bool = False) -> str: return convertor.bytes_to_wif(
convertor.binary_to_bytes(binary_str), compress)
# ----------------------------------------------------------
def Binary_To_PublicKey(binary_str: str, compress: bool = False) -> str: return convertor.bytes_to_public(
convertor.binary_to_bytes(binary_str), compress).hex()
# ----------------------------------------------------------
def Binary_To_Decimal(binary_str: str) -> int: return convertor.bytes_to_int(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def XPRV_To_Bytes(xprv: str) -> bytes: return convertor.xprv_to_bytes(xprv)
def XPRV_To_PrivateKey(xprv: str) -> str: return convertor.bytes_to_hex(convertor.xprv_to_bytes(xprv))
def XPRV_To_PublicKey(xprv: str, compress: bool = False) -> str: return convertor.bytes_to_public(
convertor.xprv_to_bytes(xprv), compress).hex()
def XPRV_To_Wif(xprv: str, compress: bool = False) -> str: return convertor.bytes_to_wif(convertor.xprv_to_bytes(xprv),
compress)
def XPRV_To_Address(xprv: str, compress: bool = False) -> str: return convertor.bytes_to_addr(
convertor.xprv_to_bytes(xprv), compress)
def XPRV_To_Mnemonic(xprv: str) -> str: return convertor.bytes_to_mne(convertor.xprv_to_bytes(xprv))
def XPRV_To_XPUB(xprv: str) -> str: return convertor.bytes_to_xpub(convertor.xprv_to_bytes(xprv))
def XPRV_To_Decimal(xprv: str) -> int: return convertor.bytes_to_int(convertor.xprv_to_bytes(xprv))
# ----------------------------------------------------------
def PrivateKey_To_Bitcoin_Addr(privatekey: str, Type: str = 'p2pkh') -> str:
"""
Convert Private Key To Bitcoin All Type Address, Type: p2pkh, p2sh, p2wpkh, p2wsh, p2wpkh_p2sh, p2wsh_p2sh.
:param privatekey:
:type privatekey: str
:param Type:
:type Type: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Bitcoin_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> p2pkh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2pkh')
>>> p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2sh')
>>> p2wpkh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wpkh')
>>> p2wsh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wsh')
>>> p2wpkh_p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wpkh_p2sh')
>>> p2wsh_p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wsh_p2sh')
--------------------------------------------------------
"""
BTC = Bitcoin()
if Type == 'p2pkh':
return BTC.hex_addr(privatekey, 'p2pkh')
elif Type == 'p2sh':
return BTC.hex_addr(privatekey, 'p2sh')
elif Type == 'p2wpkh':
return BTC.hex_addr(privatekey, 'p2wpkh')
elif Type == 'p2wsh':
return BTC.hex_addr(privatekey, 'p2wsh')
elif Type == 'p2wpkh_p2sh':
return BTC.hex_addr(privatekey, 'p2wpkh_p2sh')
elif Type == 'p2wsh_p2sh':
return BTC.hex_addr(privatekey, 'p2wsh_p2sh')
else:
return BTC.hex_addr(privatekey, 'p2pkh')
# ----------------------------------------------------------
def PrivateKey_To_Ethereum_Addr(privatekey: str) -> str:
"""
Convert Private Key To Ethereum Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Ethereum_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_Ethereum_Addr(Privatekey)
--------------------------------------------------------
"""
ETH = Ethereum()
return ETH.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_BitcoinGold_Addr(privatekey: str) -> str:
"""
Convert Private Key To Bitcoin Gold Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_BitcoinGold_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_BitcoinGold_Addr(Privatekey)
--------------------------------------------------------
"""
BTG = BitcoinGold()
return BTG.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_Dash_Addr(privatekey: str) -> str:
"""
Convert Private Key To Dash Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Dash_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_Dash_Addr(Privatekey)
--------------------------------------------------------
""" | DASH = Dash() | 5 | 2023-11-10 14:51:41+00:00 | 12k |
itzshukla/STRANGER-USERBOT2.0 | Zaid/modules/basic/animation.py | [
{
"identifier": "edit_or_reply",
"path": "Zaid/helper/basic.py",
"snippet": "async def edit_or_reply(message: Message, *args, **kwargs) -> Message:\n apa = (\n message.edit_text\n if bool(message.from_user and message.from_user.is_self or message.outgoing)\n else (message.reply_t... | import asyncio
import random
import requests
from pyrogram import *
from pyrogram import Client, filters
from pyrogram.errors.exceptions.flood_420 import FloodWait
from pyrogram.types import *
from pyrogram.types import Message
from Zaid.helper.basic import edit_or_reply, get_text
from Zaid.helper.constants import MEMES
from Zaid.modules.help import * | 9,519 |
DEFAULTUSER = "Man"
NOBLE = [
"╲╲╲┏━━┓╭━━━╮╱╱╱\n╲╲╲┗┓┏┛┃╭━╮┃╱╱╱\n╲╲╲╲┃┃┏┫┃╭┻┻┓╱╱\n╱╱╱┏╯╰╯┃╰┫┏━╯╱╱\n╱╱┏┻━┳┳┻━┫┗┓╱╱╱\n╱╱╰━┓┃┃╲┏┫┏┛╲╲╲\n╱╱╱╱┃╰╯╲┃┃┗━╮╲╲\n╱╱╱╱╰━━━╯╰━━┛╲╲",
"┏━╮\n┃▔┃▂▂┏━━┓┏━┳━━━┓\n┃▂┣━━┻━╮┃┃▂┃▂┏━╯\n┃▔┃▔╭╮▔┃┃┃▔┃▔┗━┓\n┃▂┃▂╰╯▂┃┗╯▂┃▂▂▂┃\n┃▔┗━━━╮┃▔▔▔┃▔┏━╯\n┃▂▂▂▂▂┣╯▂▂▂┃▂┗━╮\n┗━━━━━┻━━━━┻━━━┛",
"┏┓┏━┳━┳━┳━┓\n┃┗┫╋┣┓┃┏┫┻┫\n┗━┻━┛┗━┛┗━┛\n────YOU────",
"╦──╔╗─╗╔─╔ ─\n║──║║─║║─╠ ─\n╚═─╚╝─╚╝─╚ ─\n╦─╦─╔╗─╦╦ \n╚╦╝─║║─║║ \n─╩──╚╝─╚╝",
"╔══╗....<3 \n╚╗╔╝..('\../') \n╔╝╚╗..( •.• ) \n╚══╝..(,,)(,,) \n╔╗╔═╦╦╦═╗ ╔╗╔╗ \n║╚╣║║║║╩╣ ║╚╝║ \n╚═╩═╩═╩═╝ ╚══╝",
"░I░L░O░V░E░Y░O░U░",
"┈┈╭━╱▔▔▔▔╲━╮┈┈┈\n┈┈╰╱╭▅╮╭▅╮╲╯┈┈┈\n╳┈┈▏╰┈▅▅┈╯▕┈┈┈┈\n┈┈┈╲┈╰━━╯┈╱┈┈╳┈\n┈┈┈╱╱▔╲╱▔╲╲┈┈┈┈\n┈╭━╮▔▏┊┊▕▔╭━╮┈╳\n┈┃┊┣▔╲┊┊╱▔┫┊┃┈┈\n┈╰━━━━╲╱━━━━╯┈╳",
"╔ღ═╗╔╗\n╚╗╔╝║║ღ═╦╦╦═ღ\n╔╝╚╗ღ╚╣║║║║╠╣\n╚═ღ╝╚═╩═╩ღ╩═╝",
"╔══╗ \n╚╗╔╝ \n╔╝(¯'v'¯) \n╚══'.¸./\n╔╗╔═╦╦╦═╗ ╔╗╔╗ \n║╚╣║║║║╩╣ ║╚╝║ \n╚═╩═╩═╩═╝ ╚══╝",
"╔╗ \n║║╔═╦═╦═╦═╗ ╔╦╗ \n║╚╣╬╠╗║╔╣╩╣ ║║║ \n╚═╩═╝╚═╝╚═╝ ╚═╝ \n╔═╗ \n║═╬═╦╦╦═╦═╦═╦═╦═╗ \n║╔╣╬║╔╣╩╬╗║╔╣╩╣╔╝ \n╚╝╚═╩╝╚═╝╚═╝╚═╩╝",
"╔══╗ \n╚╗╔╝ \n╔╝╚╗ \n╚══╝ \n╔╗ \n║║╔═╦╦╦═╗ \n║╚╣║║║║╚╣ \n╚═╩═╩═╩═╝ \n╔╗╔╗ ♥️ \n║╚╝╠═╦╦╗ \n╚╗╔╣║║║║ \n═╚╝╚═╩═╝",
"╔══╗╔╗ ♡ \n╚╗╔╝║║╔═╦╦╦╔╗ \n╔╝╚╗║╚╣║║║║╔╣ \n╚══╝╚═╩═╩═╩═╝\n─────YOU─────",
"╭╮╭╮╮╭╮╮╭╮╮╭╮╮ \n┃┃╰╮╯╰╮╯╰╮╯╰╮╯ \n┃┃╭┳━━┳━╮╭━┳━━╮ \n┃┃┃┃╭╮┣╮┃┃╭┫╭╮┃ \n┃╰╯┃╰╯┃┃╰╯┃┃╰┻┻╮ \n╰━━┻━━╯╰━━╯╰━━━╯",
"┊┊╭━╮┊┊┊┊┊┊┊┊┊┊┊ \n━━╋━╯┊┊┊┊┊┊┊┊┊┊┊ \n┊┊┃┊╭━┳╮╭┓┊╭╮╭━╮ \n╭━╋━╋━╯┣╯┃┊┃╰╋━╯ \n╰━╯┊╰━━╯┊╰━┛┊╰━━",
]
R = "❤️"
W = "🤍"
heart_list = [
W * 9,
W * 2 + R * 2 + W + R * 2 + W * 2,
W + R * 7 + W,
W + R * 7 + W,
W + R * 7 + W,
W * 2 + R * 5 + W * 2,
W * 3 + R * 3 + W * 3,
W * 4 + R + W * 4,
W * 9,
]
joined_heart = "\n".join(heart_list)
heartlet_len = joined_heart.count(R)
SLEEP = 0.1
async def _wrap_edit(message, text: str):
"""Floodwait-safe utility wrapper for edit"""
try:
await message.edit(text)
except FloodWait as fl:
await asyncio.sleep(fl.x)
async def phase1(message):
"""Big scroll"""
BIG_SCROLL = "🧡💛💚💙💜🖤🤎"
await _wrap_edit(message, joined_heart)
for heart in BIG_SCROLL:
await _wrap_edit(message, joined_heart.replace(R, heart))
await asyncio.sleep(SLEEP)
async def phase2(message):
"""Per-heart randomiser"""
ALL = ["❤️"] + list("🧡💛💚💙💜🤎🖤") # don't include white heart
format_heart = joined_heart.replace(R, "{}")
for _ in range(5):
heart = format_heart.format(*random.choices(ALL, k=heartlet_len))
await _wrap_edit(message, heart)
await asyncio.sleep(SLEEP)
async def phase3(message):
"""Fill up heartlet matrix"""
await _wrap_edit(message, joined_heart)
await asyncio.sleep(SLEEP * 2)
repl = joined_heart
for _ in range(joined_heart.count(W)):
repl = repl.replace(W, R, 1)
await _wrap_edit(message, repl)
await asyncio.sleep(SLEEP)
async def phase4(message):
"""Matrix shrinking"""
for i in range(7, 0, -1):
heart_matrix = "\n".join([R * i] * i)
await _wrap_edit(message, heart_matrix)
await asyncio.sleep(SLEEP)
@Client.on_message(filters.command(["heart", "love"], ".") & filters.me)
async def hearts(client: Client, message: Message):
await phase1(message)
await asyncio.sleep(SLEEP * 3)
await message.edit("❤️ I")
await asyncio.sleep(0.5)
await message.edit("❤️ I Love")
await asyncio.sleep(0.5)
await message.edit("❤️ I Love You")
await asyncio.sleep(3)
await message.edit("❤️ 𝐈 𝐋𝐎𝐕𝐄 𝐘𝐎𝐔 𝐉𝐀𝐀𝐍 <3")
@Client.on_message(
filters.me & (filters.command(["loveyou"], ".") | filters.regex("^loveyou "))
)
async def _(client: Client, message: Message):
noble = random.randint(1, len(NOBLE) - 2)
reply_text = NOBLE[noble]
|
DEFAULTUSER = "Man"
NOBLE = [
"╲╲╲┏━━┓╭━━━╮╱╱╱\n╲╲╲┗┓┏┛┃╭━╮┃╱╱╱\n╲╲╲╲┃┃┏┫┃╭┻┻┓╱╱\n╱╱╱┏╯╰╯┃╰┫┏━╯╱╱\n╱╱┏┻━┳┳┻━┫┗┓╱╱╱\n╱╱╰━┓┃┃╲┏┫┏┛╲╲╲\n╱╱╱╱┃╰╯╲┃┃┗━╮╲╲\n╱╱╱╱╰━━━╯╰━━┛╲╲",
"┏━╮\n┃▔┃▂▂┏━━┓┏━┳━━━┓\n┃▂┣━━┻━╮┃┃▂┃▂┏━╯\n┃▔┃▔╭╮▔┃┃┃▔┃▔┗━┓\n┃▂┃▂╰╯▂┃┗╯▂┃▂▂▂┃\n┃▔┗━━━╮┃▔▔▔┃▔┏━╯\n┃▂▂▂▂▂┣╯▂▂▂┃▂┗━╮\n┗━━━━━┻━━━━┻━━━┛",
"┏┓┏━┳━┳━┳━┓\n┃┗┫╋┣┓┃┏┫┻┫\n┗━┻━┛┗━┛┗━┛\n────YOU────",
"╦──╔╗─╗╔─╔ ─\n║──║║─║║─╠ ─\n╚═─╚╝─╚╝─╚ ─\n╦─╦─╔╗─╦╦ \n╚╦╝─║║─║║ \n─╩──╚╝─╚╝",
"╔══╗....<3 \n╚╗╔╝..('\../') \n╔╝╚╗..( •.• ) \n╚══╝..(,,)(,,) \n╔╗╔═╦╦╦═╗ ╔╗╔╗ \n║╚╣║║║║╩╣ ║╚╝║ \n╚═╩═╩═╩═╝ ╚══╝",
"░I░L░O░V░E░Y░O░U░",
"┈┈╭━╱▔▔▔▔╲━╮┈┈┈\n┈┈╰╱╭▅╮╭▅╮╲╯┈┈┈\n╳┈┈▏╰┈▅▅┈╯▕┈┈┈┈\n┈┈┈╲┈╰━━╯┈╱┈┈╳┈\n┈┈┈╱╱▔╲╱▔╲╲┈┈┈┈\n┈╭━╮▔▏┊┊▕▔╭━╮┈╳\n┈┃┊┣▔╲┊┊╱▔┫┊┃┈┈\n┈╰━━━━╲╱━━━━╯┈╳",
"╔ღ═╗╔╗\n╚╗╔╝║║ღ═╦╦╦═ღ\n╔╝╚╗ღ╚╣║║║║╠╣\n╚═ღ╝╚═╩═╩ღ╩═╝",
"╔══╗ \n╚╗╔╝ \n╔╝(¯'v'¯) \n╚══'.¸./\n╔╗╔═╦╦╦═╗ ╔╗╔╗ \n║╚╣║║║║╩╣ ║╚╝║ \n╚═╩═╩═╩═╝ ╚══╝",
"╔╗ \n║║╔═╦═╦═╦═╗ ╔╦╗ \n║╚╣╬╠╗║╔╣╩╣ ║║║ \n╚═╩═╝╚═╝╚═╝ ╚═╝ \n╔═╗ \n║═╬═╦╦╦═╦═╦═╦═╦═╗ \n║╔╣╬║╔╣╩╬╗║╔╣╩╣╔╝ \n╚╝╚═╩╝╚═╝╚═╝╚═╩╝",
"╔══╗ \n╚╗╔╝ \n╔╝╚╗ \n╚══╝ \n╔╗ \n║║╔═╦╦╦═╗ \n║╚╣║║║║╚╣ \n╚═╩═╩═╩═╝ \n╔╗╔╗ ♥️ \n║╚╝╠═╦╦╗ \n╚╗╔╣║║║║ \n═╚╝╚═╩═╝",
"╔══╗╔╗ ♡ \n╚╗╔╝║║╔═╦╦╦╔╗ \n╔╝╚╗║╚╣║║║║╔╣ \n╚══╝╚═╩═╩═╩═╝\n─────YOU─────",
"╭╮╭╮╮╭╮╮╭╮╮╭╮╮ \n┃┃╰╮╯╰╮╯╰╮╯╰╮╯ \n┃┃╭┳━━┳━╮╭━┳━━╮ \n┃┃┃┃╭╮┣╮┃┃╭┫╭╮┃ \n┃╰╯┃╰╯┃┃╰╯┃┃╰┻┻╮ \n╰━━┻━━╯╰━━╯╰━━━╯",
"┊┊╭━╮┊┊┊┊┊┊┊┊┊┊┊ \n━━╋━╯┊┊┊┊┊┊┊┊┊┊┊ \n┊┊┃┊╭━┳╮╭┓┊╭╮╭━╮ \n╭━╋━╋━╯┣╯┃┊┃╰╋━╯ \n╰━╯┊╰━━╯┊╰━┛┊╰━━",
]
R = "❤️"
W = "🤍"
heart_list = [
W * 9,
W * 2 + R * 2 + W + R * 2 + W * 2,
W + R * 7 + W,
W + R * 7 + W,
W + R * 7 + W,
W * 2 + R * 5 + W * 2,
W * 3 + R * 3 + W * 3,
W * 4 + R + W * 4,
W * 9,
]
joined_heart = "\n".join(heart_list)
heartlet_len = joined_heart.count(R)
SLEEP = 0.1
async def _wrap_edit(message, text: str):
"""Floodwait-safe utility wrapper for edit"""
try:
await message.edit(text)
except FloodWait as fl:
await asyncio.sleep(fl.x)
async def phase1(message):
"""Big scroll"""
BIG_SCROLL = "🧡💛💚💙💜🖤🤎"
await _wrap_edit(message, joined_heart)
for heart in BIG_SCROLL:
await _wrap_edit(message, joined_heart.replace(R, heart))
await asyncio.sleep(SLEEP)
async def phase2(message):
"""Per-heart randomiser"""
ALL = ["❤️"] + list("🧡💛💚💙💜🤎🖤") # don't include white heart
format_heart = joined_heart.replace(R, "{}")
for _ in range(5):
heart = format_heart.format(*random.choices(ALL, k=heartlet_len))
await _wrap_edit(message, heart)
await asyncio.sleep(SLEEP)
async def phase3(message):
"""Fill up heartlet matrix"""
await _wrap_edit(message, joined_heart)
await asyncio.sleep(SLEEP * 2)
repl = joined_heart
for _ in range(joined_heart.count(W)):
repl = repl.replace(W, R, 1)
await _wrap_edit(message, repl)
await asyncio.sleep(SLEEP)
async def phase4(message):
"""Matrix shrinking"""
for i in range(7, 0, -1):
heart_matrix = "\n".join([R * i] * i)
await _wrap_edit(message, heart_matrix)
await asyncio.sleep(SLEEP)
@Client.on_message(filters.command(["heart", "love"], ".") & filters.me)
async def hearts(client: Client, message: Message):
await phase1(message)
await asyncio.sleep(SLEEP * 3)
await message.edit("❤️ I")
await asyncio.sleep(0.5)
await message.edit("❤️ I Love")
await asyncio.sleep(0.5)
await message.edit("❤️ I Love You")
await asyncio.sleep(3)
await message.edit("❤️ 𝐈 𝐋𝐎𝐕𝐄 𝐘𝐎𝐔 𝐉𝐀𝐀𝐍 <3")
@Client.on_message(
filters.me & (filters.command(["loveyou"], ".") | filters.regex("^loveyou "))
)
async def _(client: Client, message: Message):
noble = random.randint(1, len(NOBLE) - 2)
reply_text = NOBLE[noble] | await edit_or_reply(message, reply_text) | 0 | 2023-11-13 18:19:50+00:00 | 12k |
atlantic-quantum/Shipyard | shipyard/passes/insert_ct_waveforms.py | [
{
"identifier": "LOGGER",
"path": "shipyard/logger.py",
"snippet": "LOGGER = logging.getLogger(\"Compiler\")"
},
{
"identifier": "LazyRepr",
"path": "shipyard/utilities.py",
"snippet": "class LazyRepr:\n \"\"\"\n wrap representation for lazy evaluation in logging.\n based of htt... | from openpulse import ast
from openpulse.printer import dumps as qasm_dumps
from zhinst.toolkit import CommandTable
from ..logger import LOGGER
from ..utilities import LazyRepr
from ..visitors import GenericTransformer as QASMTransformer | 8,474 |
class InsertCTWaveforms(QASMTransformer):
"""
QASMTransformer to add in assignWaveIndex(placeholder(length), index) statements
for each waveform in the command table
Args:
CommandTable:
ZI CommandTable object
Returns:
list[ast.Statement]:
A list of QASM statements
"""
def __init__(self, commandtable: CommandTable | None) -> None:
self.ct = commandtable or {}
@staticmethod
def add_assignWaveIndex(
waveform_set: set(tuple[int, int])
) -> ast.CalibrationStatement:
"""
Create list of openQASM statements to of
assignWaveIndex(placeholder(length), index) for each waveform in the
waveform_set
Args:
waveform_set (set(tuple[int, int])):
A set of tuples of waveform index and length
Returns:
list[ast.Statement]:
A list of QASM statements
"""
awi_statments = [
ast.FunctionCall(
name=ast.Identifier("assignWaveIndex"),
arguments=[
ast.FunctionCall(
name=ast.Identifier("placeholder"),
arguments=[ast.IntegerLiteral(length)],
),
ast.IntegerLiteral(index),
],
)
for (index, length) in waveform_set
]
return ast.CalibrationStatement(
body=[ast.ExpressionStatement(awi) for awi in awi_statments]
)
# pylint: disable=C0103
# snake_case naming
def visit_Program(self, node: ast.Program):
"""
Program node transformer:
inserts assignWaveformIndex and placeholder statememnets at the beginning
of the program
Args:
node (ast.Program): openQASM program to process
Returns:
ast.Program: same node with waveform declarations inserted
"""
if self.ct:
i = 0
waveform_set = set()
while (
self.ct.table[i].waveform.index is not None
and self.ct.table[i].waveform.length is not None
):
# iterating over the command table items ran indices that were out of
# the bounds of the json schema, could not use for loop/ list
# comprehension
waveform_set.add(
(self.ct.table[i].waveform.index, self.ct.table[i].waveform.length)
)
i += 1
node.statements.insert(1, self.add_assignWaveIndex(waveform_set))
|
class InsertCTWaveforms(QASMTransformer):
"""
QASMTransformer to add in assignWaveIndex(placeholder(length), index) statements
for each waveform in the command table
Args:
CommandTable:
ZI CommandTable object
Returns:
list[ast.Statement]:
A list of QASM statements
"""
def __init__(self, commandtable: CommandTable | None) -> None:
self.ct = commandtable or {}
@staticmethod
def add_assignWaveIndex(
waveform_set: set(tuple[int, int])
) -> ast.CalibrationStatement:
"""
Create list of openQASM statements to of
assignWaveIndex(placeholder(length), index) for each waveform in the
waveform_set
Args:
waveform_set (set(tuple[int, int])):
A set of tuples of waveform index and length
Returns:
list[ast.Statement]:
A list of QASM statements
"""
awi_statments = [
ast.FunctionCall(
name=ast.Identifier("assignWaveIndex"),
arguments=[
ast.FunctionCall(
name=ast.Identifier("placeholder"),
arguments=[ast.IntegerLiteral(length)],
),
ast.IntegerLiteral(index),
],
)
for (index, length) in waveform_set
]
return ast.CalibrationStatement(
body=[ast.ExpressionStatement(awi) for awi in awi_statments]
)
# pylint: disable=C0103
# snake_case naming
def visit_Program(self, node: ast.Program):
"""
Program node transformer:
inserts assignWaveformIndex and placeholder statememnets at the beginning
of the program
Args:
node (ast.Program): openQASM program to process
Returns:
ast.Program: same node with waveform declarations inserted
"""
if self.ct:
i = 0
waveform_set = set()
while (
self.ct.table[i].waveform.index is not None
and self.ct.table[i].waveform.length is not None
):
# iterating over the command table items ran indices that were out of
# the bounds of the json schema, could not use for loop/ list
# comprehension
waveform_set.add(
(self.ct.table[i].waveform.index, self.ct.table[i].waveform.length)
)
i += 1
node.statements.insert(1, self.add_assignWaveIndex(waveform_set)) | LOGGER.debug("\n%s", LazyRepr(qasm_dumps, [node])) | 0 | 2023-11-16 17:37:29+00:00 | 12k |
KevinXu02/ControlledDreamGaussian | frankmocap/bodymocap/body_mocap_api.py | [
{
"identifier": "hmr",
"path": "frankmocap/bodymocap/models/hmr.py",
"snippet": "def hmr(smpl_mean_params, pretrained=True, **kwargs):\n \"\"\" Constructs an HMR model with ResNet50 backbone.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n mode... | import cv2
import sys
import torch
import numpy as np
import pickle
import frankmocap.mocap_utils.geometry_utils as gu
from torchvision.transforms import Normalize
from frankmocap.bodymocap.models import hmr, SMPL, SMPLX
from frankmocap.bodymocap import constants
from frankmocap.bodymocap.utils.imutils import crop, crop_bboxInfo, process_image_bbox, process_image_keypoints, \
bbox_from_keypoints
from frankmocap.mocap_utils.coordconv import convert_smpl_to_bbox, convert_bbox_to_oriIm | 7,461 | # Copyright (c) Facebook, Inc. and its affiliates.
class BodyMocap(object):
def __init__(self, regressor_checkpoint, smpl_dir, device=torch.device('cuda'), use_smplx=False):
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Load parametric model (SMPLX or SMPL)
if use_smplx:
smplModelPath = smpl_dir + '/SMPLX_NEUTRAL.pkl'
self.smpl = SMPLX(smpl_dir,
batch_size=1,
num_betas=10,
use_pca=False,
create_transl=False).to(self.device)
self.use_smplx = True
else:
smplModelPath = smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
self.smpl = SMPL(smplModelPath, batch_size=1, create_transl=False).to(self.device)
self.use_smplx = False
# Load pre-trained neural network
SMPL_MEAN_PARAMS = './frankmocap/extra_data/body_module/data_from_spin/smpl_mean_params.npz'
self.model_regressor = hmr(SMPL_MEAN_PARAMS).to(self.device)
checkpoint = torch.load(regressor_checkpoint)
self.model_regressor.load_state_dict(checkpoint['model'], strict=False)
self.model_regressor.eval()
def regress(self, img_original, body_bbox_list):
"""
args:
img_original: original raw image (BGR order by using cv2.imread)
body_bbox: bounding box around the target: (minX, minY, width, height)
outputs:
pred_vertices_img:
pred_joints_vis_img:
pred_rotmat
pred_betas
pred_camera
bbox: [bbr[0], bbr[1],bbr[0]+bbr[2], bbr[1]+bbr[3]])
bboxTopLeft: bbox top left (redundant)
boxScale_o2n: bbox scaling factor (redundant)
"""
pred_output_list = list()
for body_bbox in body_bbox_list:
img, norm_img, boxScale_o2n, bboxTopLeft, bbox = process_image_bbox(
img_original, body_bbox, input_res=constants.IMG_RES)
bboxTopLeft = np.array(bboxTopLeft)
# bboxTopLeft = bbox['bboxXYWH'][:2]
if img is None:
pred_output_list.append(None)
continue
with torch.no_grad():
# model forward
pred_rotmat, pred_betas, pred_camera = self.model_regressor(norm_img.to(self.device))
# Convert rot_mat to aa since hands are always in aa
# pred_aa = rotmat3x3_to_angle_axis(pred_rotmat)
pred_aa = gu.rotation_matrix_to_angle_axis(pred_rotmat).cuda()
pred_aa = pred_aa.reshape(pred_aa.shape[0], 72)
# remove global rotation
pred_aa[:, :3] = 0
smpl_output = self.smpl(
betas=pred_betas,
body_pose=pred_aa[:, 3:],
global_orient=pred_aa[:, :3],
pose2rot=True)
pred_vertices = smpl_output.vertices
pred_joints_3d = smpl_output.joints
pred_vertices = pred_vertices[0].cpu().numpy()
pred_camera = pred_camera.cpu().numpy().ravel()
camScale = pred_camera[0] # *1.15
camTrans = pred_camera[1:]
pred_output = dict()
# Convert mesh to original image space (X,Y are aligned to image)
# 1. SMPL -> 2D bbox
# 2. 2D bbox -> original 2D image
pred_vertices_bbox = convert_smpl_to_bbox(pred_vertices, camScale, camTrans)
| # Copyright (c) Facebook, Inc. and its affiliates.
class BodyMocap(object):
def __init__(self, regressor_checkpoint, smpl_dir, device=torch.device('cuda'), use_smplx=False):
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Load parametric model (SMPLX or SMPL)
if use_smplx:
smplModelPath = smpl_dir + '/SMPLX_NEUTRAL.pkl'
self.smpl = SMPLX(smpl_dir,
batch_size=1,
num_betas=10,
use_pca=False,
create_transl=False).to(self.device)
self.use_smplx = True
else:
smplModelPath = smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
self.smpl = SMPL(smplModelPath, batch_size=1, create_transl=False).to(self.device)
self.use_smplx = False
# Load pre-trained neural network
SMPL_MEAN_PARAMS = './frankmocap/extra_data/body_module/data_from_spin/smpl_mean_params.npz'
self.model_regressor = hmr(SMPL_MEAN_PARAMS).to(self.device)
checkpoint = torch.load(regressor_checkpoint)
self.model_regressor.load_state_dict(checkpoint['model'], strict=False)
self.model_regressor.eval()
def regress(self, img_original, body_bbox_list):
"""
args:
img_original: original raw image (BGR order by using cv2.imread)
body_bbox: bounding box around the target: (minX, minY, width, height)
outputs:
pred_vertices_img:
pred_joints_vis_img:
pred_rotmat
pred_betas
pred_camera
bbox: [bbr[0], bbr[1],bbr[0]+bbr[2], bbr[1]+bbr[3]])
bboxTopLeft: bbox top left (redundant)
boxScale_o2n: bbox scaling factor (redundant)
"""
pred_output_list = list()
for body_bbox in body_bbox_list:
img, norm_img, boxScale_o2n, bboxTopLeft, bbox = process_image_bbox(
img_original, body_bbox, input_res=constants.IMG_RES)
bboxTopLeft = np.array(bboxTopLeft)
# bboxTopLeft = bbox['bboxXYWH'][:2]
if img is None:
pred_output_list.append(None)
continue
with torch.no_grad():
# model forward
pred_rotmat, pred_betas, pred_camera = self.model_regressor(norm_img.to(self.device))
# Convert rot_mat to aa since hands are always in aa
# pred_aa = rotmat3x3_to_angle_axis(pred_rotmat)
pred_aa = gu.rotation_matrix_to_angle_axis(pred_rotmat).cuda()
pred_aa = pred_aa.reshape(pred_aa.shape[0], 72)
# remove global rotation
pred_aa[:, :3] = 0
smpl_output = self.smpl(
betas=pred_betas,
body_pose=pred_aa[:, 3:],
global_orient=pred_aa[:, :3],
pose2rot=True)
pred_vertices = smpl_output.vertices
pred_joints_3d = smpl_output.joints
pred_vertices = pred_vertices[0].cpu().numpy()
pred_camera = pred_camera.cpu().numpy().ravel()
camScale = pred_camera[0] # *1.15
camTrans = pred_camera[1:]
pred_output = dict()
# Convert mesh to original image space (X,Y are aligned to image)
# 1. SMPL -> 2D bbox
# 2. 2D bbox -> original 2D image
pred_vertices_bbox = convert_smpl_to_bbox(pred_vertices, camScale, camTrans) | pred_vertices_img = convert_bbox_to_oriIm( | 10 | 2023-11-17 05:21:26+00:00 | 12k |
dazhangyu123/OCL | train_source.py | [
{
"identifier": "Eval",
"path": "utils/eval.py",
"snippet": "class Eval():\n def __init__(self, num_class):\n self.num_class = num_class\n self.confusion_matrix = np.zeros((self.num_class,)*2)\n self.ignore_index = None\n self.synthia = True if num_class == 16 else False\n... | import os
import random
import logging
import argparse
import torch
import torch.nn as nn
import torch.utils.data as data
import torch.nn.functional as F
import numpy as np
import sys
import shutil
from tqdm import tqdm
from math import ceil
from distutils.version import LooseVersion
from tensorboardX import SummaryWriter
from torchvision.utils import make_grid
from utils.eval import Eval
from utils.train_helper import get_model
from datasets.cityscapes_Dataset import City_Dataset, City_DataLoader, inv_preprocess, decode_labels
from datasets.gta5_Dataset import GTA5_DataLoader
from datasets.synthia_Dataset import SYNTHIA_DataLoader | 8,576 |
# validate
PA, MPA, MIoU, FWIoU = self.validate()
self.writer.add_scalar('PA', PA, self.current_epoch)
self.writer.add_scalar('MPA', MPA, self.current_epoch)
self.writer.add_scalar('MIoU', MIoU, self.current_epoch)
self.writer.add_scalar('FWIoU', FWIoU, self.current_epoch)
self.current_MIoU = MIoU
is_best = MIoU > self.best_MIou
if is_best:
self.best_MIou = MIoU
self.best_iter = self.current_iter
self.logger.info("=>saving a new best checkpoint...")
self.save_checkpoint(self.train_id+'best.pth')
else:
self.logger.info("=> The MIoU of val does't improve.")
self.logger.info("=> The best MIoU of val is {} at {}".format(self.best_MIou, self.best_iter))
self.current_epoch += 1
state = {
'epoch': self.current_epoch + 1,
'iteration': self.current_iter,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_MIou': self.current_MIoU
}
self.logger.info("=>best_MIou {} at {}".format(self.best_MIou, self.best_iter))
self.logger.info("=>saving the final checkpoint to " + os.path.join(self.args.checkpoint_dir, self.train_id+'final.pth'))
self.save_checkpoint(self.train_id+'final.pth')
def train_one_epoch(self):
tqdm_epoch = tqdm(self.dataloader.data_loader, total=self.dataloader.num_iterations,
desc="Train Epoch-{}-total-{}".format(self.current_epoch+1, self.epoch_num))
self.logger.info("Training one epoch...")
self.Eval.reset()
train_loss = []
loss_seg_value_2 = 0
iter_num = self.dataloader.num_iterations
if self.args.freeze_bn:
self.model.eval()
self.logger.info("freeze bacth normalization successfully!")
else:
self.model.train()
# Initialize your average meters
batch_idx = 0
for x, y, _ in tqdm_epoch:
self.poly_lr_scheduler(
optimizer=self.optimizer,
init_lr=self.args.lr,
iter=self.current_iter,
max_iter=self.args.iter_max,
power=self.args.poly_power,
)
if self.args.iter_stop is not None and self.current_iter >= self.args.iter_stop:
self.logger.info("iteration arrive {}(early stop)/{}(total step)!".format(self.args.iter_stop, self.args.iter_max))
break
if self.current_iter >= self.args.iter_max:
self.logger.info("iteration arrive {}!".format(self.args.iter_max))
break
self.writer.add_scalar('learning_rate', self.optimizer.param_groups[0]["lr"], self.current_iter)
if self.cuda:
x, y = x.to(self.device), y.to(device=self.device, dtype=torch.long)
y = torch.squeeze(y, 1)
self.optimizer.zero_grad()
# model
pred = self.model(x)
if isinstance(pred, tuple):
pred_2 = pred[1]
pred = pred[0]
pred = F.interpolate(pred, size=x.size()[2:], mode='bilinear', align_corners=True)
# loss
cur_loss = self.loss(pred, y)
if self.args.multi:
loss_2 = self.args.lambda_seg * self.loss(pred_2, y)
cur_loss += loss_2
loss_seg_value_2 += loss_2.cpu().item() / iter_num
# optimizer
cur_loss.backward()
self.optimizer.step()
train_loss.append(cur_loss.item())
if batch_idx % 1000 == 0:
if self.args.multi:
self.logger.info("The train loss of epoch{}-batch-{}:{};{}".format(self.current_epoch,
batch_idx, cur_loss.item(), loss_2.item()))
else:
self.logger.info("The train loss of epoch{}-batch-{}:{}".format(self.current_epoch,
batch_idx, cur_loss.item()))
batch_idx += 1
self.current_iter += 1
if np.isnan(float(cur_loss.item())):
raise ValueError('Loss is nan during training...')
pred = pred.data.cpu().numpy()
label = y.cpu().numpy()
argpred = np.argmax(pred, axis=1)
self.Eval.add_batch(label, argpred)
if batch_idx==self.dataloader.num_iterations:
break
self.log_one_train_epoch(x, label, argpred, train_loss)
tqdm_epoch.close()
def log_one_train_epoch(self, x, label, argpred, train_loss):
#show train image on tensorboard
|
sys.path.append(os.path.abspath('tools'))
datasets_path={
'cityscapes': {'data_root_path': '/mnt/Xsky/zyl/dataset/dataset/Cityscapes', 'list_path': './datasets/city_list',
'image_path':'/mnt/Xsky/zyl/dataset/Cityscapes/leftImg8bit',
'gt_path': './datasets/Cityscapes/gtFine'},
'gta5': {'data_root_path': '/mnt/Xsky/zyl/dataset/GTA5', 'list_path': './datasets/gta5_list',
'image_path':'/mnt/Xsky/zyl/dataset/GTA5/images',
'gt_path': './datasets/GTA5/labels'},
'synthia': {'data_root_path': '/mnt/Xsky/zyl/dataset/RAND_CITYSCAPES', 'list_path': './datasets/synthia_list',
'image_path':'/mnt/Xsky/zyl/dataset/RAND_CITYSCAPES/RGB',
'gt_path': './datasets/SYNTHIA/GT/LABELS'},
'NTHU': {'data_root_path': './datasets/NTHU_Datasets', 'list_path': './datasets/NTHU_list'}
}
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
ITER_MAX = 5000
class Trainer():
def __init__(self, args, cuda=None, train_id="None", logger=None):
self.args = args
os.environ["CUDA_VISIBLE_DEVICES"] = self.args.gpu
self.cuda = cuda and torch.cuda.is_available()
self.device = torch.device('cuda' if self.cuda else 'cpu')
self.train_id = train_id
self.logger = logger
self.current_MIoU = 0
self.best_MIou = 0
self.best_source_MIou = 0
self.current_epoch = 0
self.current_iter = 0
self.second_best_MIou = 0
# set TensorboardX
self.writer = SummaryWriter(self.args.checkpoint_dir)
# Metric definition
self.Eval = Eval(self.args.num_classes)
# loss definition
self.loss = nn.CrossEntropyLoss(weight=None, ignore_index= -1)
self.loss.to(self.device)
# model
self.model, params = get_model(self.args)
self.model = nn.DataParallel(self.model, device_ids=[0])
self.model.to(self.device)
if self.args.optim == "SGD":
self.optimizer = torch.optim.SGD(
params=params,
momentum=self.args.momentum,
weight_decay=self.args.weight_decay
)
elif self.args.optim == "Adam":
self.optimizer = torch.optim.Adam(params, betas=(0.9, 0.99), weight_decay=self.args.weight_decay)
# dataloader
if self.args.dataset=="cityscapes":
self.dataloader = City_DataLoader(self.args)
elif self.args.dataset=="gta5":
self.dataloader = GTA5_DataLoader(self.args)
else:
self.dataloader = SYNTHIA_DataLoader(self.args)
self.dataloader.num_iterations = min(self.dataloader.num_iterations, ITER_MAX)
print(self.args.iter_max, self.dataloader.num_iterations)
self.epoch_num = ceil(self.args.iter_max / self.dataloader.num_iterations) if self.args.iter_stop is None else \
ceil(self.args.iter_stop / self.dataloader.num_iterations)
def main(self):
# display args details
self.logger.info("Global configuration as follows:")
for key, val in vars(self.args).items():
self.logger.info("{:16} {}".format(key, val))
# choose cuda
if self.cuda:
current_device = torch.cuda.current_device()
self.logger.info("This model will run on {}".format(torch.cuda.get_device_name(current_device)))
else:
self.logger.info("This model will run on CPU")
# load pretrained checkpoint
if self.args.pretrained_ckpt_file is not None:
if os.path.isdir(self.args.pretrained_ckpt_file):
self.args.pretrained_ckpt_file = os.path.join(self.args.checkpoint_dir, self.train_id + 'best.pth')
self.load_checkpoint(self.args.pretrained_ckpt_file)
if self.args.continue_training:
self.load_checkpoint(os.path.join(self.args.checkpoint_dir, self.train_id + 'best.pth'))
self.best_iter = self.current_iter
self.best_source_iter = self.current_iter
else:
self.current_epoch = 0
# train
self.train()
self.writer.close()
def train(self):
# self.validate() # check image summary
for epoch in tqdm(range(self.current_epoch, self.epoch_num),
desc="Total {} epochs".format(self.epoch_num)):
self.train_one_epoch()
# validate
PA, MPA, MIoU, FWIoU = self.validate()
self.writer.add_scalar('PA', PA, self.current_epoch)
self.writer.add_scalar('MPA', MPA, self.current_epoch)
self.writer.add_scalar('MIoU', MIoU, self.current_epoch)
self.writer.add_scalar('FWIoU', FWIoU, self.current_epoch)
self.current_MIoU = MIoU
is_best = MIoU > self.best_MIou
if is_best:
self.best_MIou = MIoU
self.best_iter = self.current_iter
self.logger.info("=>saving a new best checkpoint...")
self.save_checkpoint(self.train_id+'best.pth')
else:
self.logger.info("=> The MIoU of val does't improve.")
self.logger.info("=> The best MIoU of val is {} at {}".format(self.best_MIou, self.best_iter))
self.current_epoch += 1
state = {
'epoch': self.current_epoch + 1,
'iteration': self.current_iter,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_MIou': self.current_MIoU
}
self.logger.info("=>best_MIou {} at {}".format(self.best_MIou, self.best_iter))
self.logger.info("=>saving the final checkpoint to " + os.path.join(self.args.checkpoint_dir, self.train_id+'final.pth'))
self.save_checkpoint(self.train_id+'final.pth')
def train_one_epoch(self):
tqdm_epoch = tqdm(self.dataloader.data_loader, total=self.dataloader.num_iterations,
desc="Train Epoch-{}-total-{}".format(self.current_epoch+1, self.epoch_num))
self.logger.info("Training one epoch...")
self.Eval.reset()
train_loss = []
loss_seg_value_2 = 0
iter_num = self.dataloader.num_iterations
if self.args.freeze_bn:
self.model.eval()
self.logger.info("freeze bacth normalization successfully!")
else:
self.model.train()
# Initialize your average meters
batch_idx = 0
for x, y, _ in tqdm_epoch:
self.poly_lr_scheduler(
optimizer=self.optimizer,
init_lr=self.args.lr,
iter=self.current_iter,
max_iter=self.args.iter_max,
power=self.args.poly_power,
)
if self.args.iter_stop is not None and self.current_iter >= self.args.iter_stop:
self.logger.info("iteration arrive {}(early stop)/{}(total step)!".format(self.args.iter_stop, self.args.iter_max))
break
if self.current_iter >= self.args.iter_max:
self.logger.info("iteration arrive {}!".format(self.args.iter_max))
break
self.writer.add_scalar('learning_rate', self.optimizer.param_groups[0]["lr"], self.current_iter)
if self.cuda:
x, y = x.to(self.device), y.to(device=self.device, dtype=torch.long)
y = torch.squeeze(y, 1)
self.optimizer.zero_grad()
# model
pred = self.model(x)
if isinstance(pred, tuple):
pred_2 = pred[1]
pred = pred[0]
pred = F.interpolate(pred, size=x.size()[2:], mode='bilinear', align_corners=True)
# loss
cur_loss = self.loss(pred, y)
if self.args.multi:
loss_2 = self.args.lambda_seg * self.loss(pred_2, y)
cur_loss += loss_2
loss_seg_value_2 += loss_2.cpu().item() / iter_num
# optimizer
cur_loss.backward()
self.optimizer.step()
train_loss.append(cur_loss.item())
if batch_idx % 1000 == 0:
if self.args.multi:
self.logger.info("The train loss of epoch{}-batch-{}:{};{}".format(self.current_epoch,
batch_idx, cur_loss.item(), loss_2.item()))
else:
self.logger.info("The train loss of epoch{}-batch-{}:{}".format(self.current_epoch,
batch_idx, cur_loss.item()))
batch_idx += 1
self.current_iter += 1
if np.isnan(float(cur_loss.item())):
raise ValueError('Loss is nan during training...')
pred = pred.data.cpu().numpy()
label = y.cpu().numpy()
argpred = np.argmax(pred, axis=1)
self.Eval.add_batch(label, argpred)
if batch_idx==self.dataloader.num_iterations:
break
self.log_one_train_epoch(x, label, argpred, train_loss)
tqdm_epoch.close()
def log_one_train_epoch(self, x, label, argpred, train_loss):
#show train image on tensorboard | images_inv = inv_preprocess(x.clone().cpu(), self.args.show_num_images, numpy_transform=self.args.numpy_transform) | 4 | 2023-11-14 02:01:11+00:00 | 12k |
raphaelreme/koft | src/experiments/track.py | [
{
"identifier": "FakeDetector",
"path": "src/detector.py",
"snippet": "class FakeDetector(byotrack.Detector): # TODO: include weight\n def __init__(self, mu: torch.Tensor, noise=1.0, fpr=0.1, fnr=0.2, generate_outside_particles=True):\n self.noise = noise\n self.fpr = fpr\n self... | import dataclasses
import enum
import pathlib
import dacite
import torch
import tqdm # type: ignore
import yaml # type: ignore
import byotrack
from typing import Collection, List
from byotrack.implementation.detector.wavelet import WaveletDetector
from byotrack.implementation.linker.icy_emht import EMHTParameters, IcyEMHTLinker, Motion
from byotrack.implementation.linker.trackmate.trackmate import TrackMateLinker, TrackMateParameters
from byotrack.implementation.refiner.interpolater import ForwardBackwardInterpolater
from ..detector import FakeDetector
from ..metrics.detections import DetectionMetric
from ..metrics.tracking import compute_tracking_metrics
from ..skt import constant_kalman_filter, Dist, Method, MatchingConfig, SimpleKalmanTracker, PartialTrack
from ..koft import constant_koft_filter, OptFlowExtraction, SingleUpdateKOFTracker, TwoUpdateKOFTracker
from ..optical_flow import farneback
from ..utils import enforce_all_seeds | 9,608 |
class DetectionMethod(enum.Enum):
WAVELET = "wavelet"
FAKE = "fake"
@dataclasses.dataclass
class WaveletConfig:
k: float = 3.0
scale: int = 1
min_area: float = 10.0
@dataclasses.dataclass
class FakeConfig:
fpr: float = 0.1 # Bad detection rate
fnr: float = 0.2 # Miss detection rate
measurement_noise: float = 1.0
@dataclasses.dataclass
class DetectionConfig:
detector: DetectionMethod
wavelet: WaveletConfig
fake: FakeConfig
# interactive = False # Could tweak the detector parameters interactively ?
def create_detector(self, mu: torch.Tensor) -> byotrack.Detector:
if self.detector == DetectionMethod.WAVELET:
return WaveletDetector(self.wavelet.scale, self.wavelet.k, self.wavelet.min_area)
return FakeDetector(mu, self.fake.measurement_noise, self.fake.fpr, self.fake.fnr)
@dataclasses.dataclass
class KalmanConfig:
detection_noise: float
of_noise: float
process_noise: float # Miss evaluation of the process
dist: Dist
matching_method: Method
always_update_velocities: bool = True
dim: int = 2
order: int = 1
class TrackingMethod(enum.Enum):
SKT = "skt"
KOFT = "koft"
KOFTmm = "koft--"
KOFTpp = "koft++"
TRACKMATE = "trackmate"
TRACKMATE_KF = "trackmate-kf"
EMHT = "emht"
@dataclasses.dataclass
class ExperimentConfig:
seed: int
simulation_path: pathlib.Path
tracking_method: TrackingMethod
detection: DetectionConfig
kalman: KalmanConfig
icy_path: pathlib.Path
fiji_path: pathlib.Path
def create_linker(self, thresh: float) -> byotrack.Linker:
"""Create a linker"""
if self.tracking_method is TrackingMethod.EMHT:
return IcyEMHTLinker(
self.icy_path,
EMHTParameters(
gate_factor=thresh,
motion=Motion.MULTI,
tree_depth=2,
),
)
if self.tracking_method in (TrackingMethod.TRACKMATE, TrackingMethod.TRACKMATE_KF):
# As kalman tracking we let a gap of 2 consecutive miss detections
# In that case, we allow 1.5 thresh
return TrackMateLinker(
self.fiji_path,
TrackMateParameters(
max_frame_gap=PartialTrack.MAX_NON_MEASURE,
linking_max_distance=thresh,
gap_closing_max_distance=thresh * 1.5,
kalman_search_radius=thresh if self.tracking_method is TrackingMethod.TRACKMATE_KF else None,
),
)
if self.tracking_method is TrackingMethod.SKT:
|
class DetectionMethod(enum.Enum):
WAVELET = "wavelet"
FAKE = "fake"
@dataclasses.dataclass
class WaveletConfig:
k: float = 3.0
scale: int = 1
min_area: float = 10.0
@dataclasses.dataclass
class FakeConfig:
fpr: float = 0.1 # Bad detection rate
fnr: float = 0.2 # Miss detection rate
measurement_noise: float = 1.0
@dataclasses.dataclass
class DetectionConfig:
detector: DetectionMethod
wavelet: WaveletConfig
fake: FakeConfig
# interactive = False # Could tweak the detector parameters interactively ?
def create_detector(self, mu: torch.Tensor) -> byotrack.Detector:
if self.detector == DetectionMethod.WAVELET:
return WaveletDetector(self.wavelet.scale, self.wavelet.k, self.wavelet.min_area)
return FakeDetector(mu, self.fake.measurement_noise, self.fake.fpr, self.fake.fnr)
@dataclasses.dataclass
class KalmanConfig:
detection_noise: float
of_noise: float
process_noise: float # Miss evaluation of the process
dist: Dist
matching_method: Method
always_update_velocities: bool = True
dim: int = 2
order: int = 1
class TrackingMethod(enum.Enum):
SKT = "skt"
KOFT = "koft"
KOFTmm = "koft--"
KOFTpp = "koft++"
TRACKMATE = "trackmate"
TRACKMATE_KF = "trackmate-kf"
EMHT = "emht"
@dataclasses.dataclass
class ExperimentConfig:
seed: int
simulation_path: pathlib.Path
tracking_method: TrackingMethod
detection: DetectionConfig
kalman: KalmanConfig
icy_path: pathlib.Path
fiji_path: pathlib.Path
def create_linker(self, thresh: float) -> byotrack.Linker:
"""Create a linker"""
if self.tracking_method is TrackingMethod.EMHT:
return IcyEMHTLinker(
self.icy_path,
EMHTParameters(
gate_factor=thresh,
motion=Motion.MULTI,
tree_depth=2,
),
)
if self.tracking_method in (TrackingMethod.TRACKMATE, TrackingMethod.TRACKMATE_KF):
# As kalman tracking we let a gap of 2 consecutive miss detections
# In that case, we allow 1.5 thresh
return TrackMateLinker(
self.fiji_path,
TrackMateParameters(
max_frame_gap=PartialTrack.MAX_NON_MEASURE,
linking_max_distance=thresh,
gap_closing_max_distance=thresh * 1.5,
kalman_search_radius=thresh if self.tracking_method is TrackingMethod.TRACKMATE_KF else None,
),
)
if self.tracking_method is TrackingMethod.SKT: | kalman_filter = constant_kalman_filter( | 3 | 2023-11-10 10:18:39+00:00 | 12k |
david9dragon9/LOMOLite | lomo/lomo_base.py | [
{
"identifier": "LOMO",
"path": "lomo/lomo_orig.py",
"snippet": "class LOMO(Optimizer):\n \"\"\"\n 一个自定义的优化器类LOMO,用于在分布式训练中的梯度更新。\n\n 该类实现两个梯度更新函数 :meth:`fuse_update` 和 :meth:`fuse_update_zero3`,分别用于非ZeRO和ZeRO模式下的梯度更新。\n\n :param model: 待优化的模型\n :param lr: 学习率,默认值为1e-3\n :param clip_gr... | import torch
import sys
import os
import tqdm
import deepspeed
import deepspeed
import os
from transformers.deepspeed import HfDeepSpeedConfig
from transformers import AutoConfig
from collections import OrderedDict
from lomo.lomo_orig import LOMO
from lomo.adalomo_orig import AdaLomo
from lomo.lomo_utils import LearningRateScheduler, DynamicLossScaler
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator | 7,298 | # Source: https://github.com/OpenLMLab/LOMO
# Source: https://github.com/OpenLMLab/collie/tree/dev/collie
try:
except:
pass
def setup_lomo(model_name_or_path):
torch.set_default_dtype(torch.float16)
ds_config = __file__.replace("lomo_base.py", "ds_config.json")
dschf = HfDeepSpeedConfig(ds_config)
config = AutoConfig.from_pretrained(model_name_or_path)
config.gradient_checkpointing = True
return config
def create_lomo_lr_scheduler(
learning_rate=0.03,
n_steps=1000,
num_train_epochs=10,
warmup=0.1,
lr_scheduler_type="linear",
):
return LearningRateScheduler(
learning_rate=learning_rate,
warmup=warmup,
schedule=lr_scheduler_type,
n_steps=n_steps,
)
name_to_lomo = {
| # Source: https://github.com/OpenLMLab/LOMO
# Source: https://github.com/OpenLMLab/collie/tree/dev/collie
try:
except:
pass
def setup_lomo(model_name_or_path):
torch.set_default_dtype(torch.float16)
ds_config = __file__.replace("lomo_base.py", "ds_config.json")
dschf = HfDeepSpeedConfig(ds_config)
config = AutoConfig.from_pretrained(model_name_or_path)
config.gradient_checkpointing = True
return config
def create_lomo_lr_scheduler(
learning_rate=0.03,
n_steps=1000,
num_train_epochs=10,
warmup=0.1,
lr_scheduler_type="linear",
):
return LearningRateScheduler(
learning_rate=learning_rate,
warmup=warmup,
schedule=lr_scheduler_type,
n_steps=n_steps,
)
name_to_lomo = { | "lomo": LOMO, | 0 | 2023-11-11 03:29:00+00:00 | 12k |
quantuminterface/qiclib | src/qiclib/code/qi_sequencer.py | [
{
"identifier": "QiCellProperty",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class QiCellProperty(QiExpression):\n \"\"\"When describing experiments, properties of cells might not yet be defined. Instead a QiCellProperty object will be generated.\n This object can be used as leng... | from enum import Enum
from typing import List, Union, Any, Dict, Optional, Tuple
from qiclib.code.qi_jobs import (
ForRange,
If,
Parallel,
cQiRecording,
cQiSync,
)
from .qi_var_definitions import (
QiCellProperty,
QiVariableSet,
_QiCalcBase,
_QiVariableBase,
QiExpression,
_QiConstValue,
QiCondition,
QiOpCond,
QiOp,
)
from .qi_seq_instructions import (
SeqLoad,
SeqStore,
SeqAwaitQubitState,
SequencerInstruction,
SeqRegImmediateInst,
SeqRegRegInst,
SeqLoadUpperImm,
SeqJump,
SeqBranch,
SeqWaitImm,
SeqWaitRegister,
SeqTrigger,
SeqEnd,
SeqTriggerWaitRegister,
)
from .qi_util import _get_for_range_iterations
from .qi_var_definitions import _QiVariableBase
from .qi_var_definitions import _QiCalcBase
from .qi_var_definitions import _QiVariableBase
from .qi_jobs import _cQiPlay_base
import warnings
import qiclib.packages.utility as util | 9,554 | """Class of Sequencer representing registers.
Keeps track of values in register. Values are used for program length. Program length is invalidated by use of If/Else.
TODO load commands invalidate value"""
def __init__(self, address) -> None:
self.adr = address
self.value = None
self.valid = True
def addition(self, val1, val2):
self.value = val1 + val2
def subtraction(self, val1, val2):
self.value = val1 - val2
def multiplication(self, val1, val2):
self.value = val1 * val2
def and_values(self, val1, val2):
self.value = val1 & val2
def or_values(self, val1, val2):
self.value = val1 | val2
def xor_values(self, val1, val2):
self.value = val1 ^ val2
def lshift(self, val1, val2):
self.value = val1 << val2
def rshift(self, val1, val2):
self.value = val1 >> val2
def inversion(self, val1, val2):
self.value = ~val1
# Dictionary used to receive function from input QiOp
eval_operation = {
QiOp.PLUS: addition,
QiOp.MINUS: subtraction,
QiOp.MULT: multiplication,
QiOp.AND: and_values,
QiOp.OR: or_values,
QiOp.XOR: xor_values,
QiOp.LSH: lshift,
QiOp.RSH: rshift,
QiOp.NOT: inversion,
}
def get_value(self):
if self.valid:
return self.value
return None
def update_register_value(self, val1, op, val2):
"""Register Values are updated to allow implicit synchronisations through wait when variable Wait/Pulse is used.
When a calculation is done using a invalid variable value, the ensuing value is also invalidated.
"""
if self.adr == 0:
self.value = 0 # reg0 always contains 0
return
if isinstance(val1, _Register):
if val1.value is None:
raise RuntimeError(
f"Variable at Register {val1.adr} has not been properly initialised"
)
if not val1.valid:
self.valid = False
val1 = val1.value
if isinstance(val2, _Register):
if val2.value is None:
raise RuntimeError(
f"Variable at Register {val2.adr} has not been properly initialised"
)
if not val2.valid:
self.valid = False
val2 = val2.value
self.eval_operation[op](self, val1, val2)
class ForRangeEntry:
def __init__(self, reg_addr, start_val, end_val, step_val) -> None:
self.reg_addr = reg_addr
self.start = start_val
self.end = end_val
self.step = step_val
self.end_addr = 0
self.iterations = 0
self.aggregate_iterations = 0
self.contained_entries: List[ForRangeEntry] = []
def _calc_aggregate(self):
"""Calculates the number of loops contained inside, considering nested entries, for later use at progress bar."""
self.iterations = _get_for_range_iterations(self.start, self.end, self.step)
if len(self.contained_entries) == 0 or self.iterations is None:
if self.iterations is None:
self.aggregate_iterations = 0
warnings.warn(
"A loop with variable start/end could not be counted towards total loop count. Progress bar might be inaccurate."
)
else:
self.aggregate_iterations = self.iterations
else:
nested = 0
for entry in self.contained_entries:
if entry.aggregate_iterations == 0:
warnings.warn(
"A loop with variable start/end could not be counted towards total loop count. Progress bar might be inaccurate."
)
continue
nested += entry.aggregate_iterations
self.aggregate_iterations = self.iterations * (nested if nested != 0 else 1)
def get_iteration(self, value: int) -> int:
"""Returns the current iteration depending on the parameter value"""
| # Copyright © 2017-2023 Quantum Interface (quantuminterface@ipe.kit.edu)
# Richard Gebauer, IPE, Karlsruhe Institute of Technology
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
The lower level logic of the code generation.
This module tracks the sequencer state at the current point (e.g. register values, variable to register mapping, etc.),
provides helper functions to generate code for expressions and more.
"""
class _Register:
"""Class of Sequencer representing registers.
Keeps track of values in register. Values are used for program length. Program length is invalidated by use of If/Else.
TODO load commands invalidate value"""
def __init__(self, address) -> None:
self.adr = address
self.value = None
self.valid = True
def addition(self, val1, val2):
self.value = val1 + val2
def subtraction(self, val1, val2):
self.value = val1 - val2
def multiplication(self, val1, val2):
self.value = val1 * val2
def and_values(self, val1, val2):
self.value = val1 & val2
def or_values(self, val1, val2):
self.value = val1 | val2
def xor_values(self, val1, val2):
self.value = val1 ^ val2
def lshift(self, val1, val2):
self.value = val1 << val2
def rshift(self, val1, val2):
self.value = val1 >> val2
def inversion(self, val1, val2):
self.value = ~val1
# Dictionary used to receive function from input QiOp
eval_operation = {
QiOp.PLUS: addition,
QiOp.MINUS: subtraction,
QiOp.MULT: multiplication,
QiOp.AND: and_values,
QiOp.OR: or_values,
QiOp.XOR: xor_values,
QiOp.LSH: lshift,
QiOp.RSH: rshift,
QiOp.NOT: inversion,
}
def get_value(self):
if self.valid:
return self.value
return None
def update_register_value(self, val1, op, val2):
"""Register Values are updated to allow implicit synchronisations through wait when variable Wait/Pulse is used.
When a calculation is done using a invalid variable value, the ensuing value is also invalidated.
"""
if self.adr == 0:
self.value = 0 # reg0 always contains 0
return
if isinstance(val1, _Register):
if val1.value is None:
raise RuntimeError(
f"Variable at Register {val1.adr} has not been properly initialised"
)
if not val1.valid:
self.valid = False
val1 = val1.value
if isinstance(val2, _Register):
if val2.value is None:
raise RuntimeError(
f"Variable at Register {val2.adr} has not been properly initialised"
)
if not val2.valid:
self.valid = False
val2 = val2.value
self.eval_operation[op](self, val1, val2)
class ForRangeEntry:
def __init__(self, reg_addr, start_val, end_val, step_val) -> None:
self.reg_addr = reg_addr
self.start = start_val
self.end = end_val
self.step = step_val
self.end_addr = 0
self.iterations = 0
self.aggregate_iterations = 0
self.contained_entries: List[ForRangeEntry] = []
def _calc_aggregate(self):
"""Calculates the number of loops contained inside, considering nested entries, for later use at progress bar."""
self.iterations = _get_for_range_iterations(self.start, self.end, self.step)
if len(self.contained_entries) == 0 or self.iterations is None:
if self.iterations is None:
self.aggregate_iterations = 0
warnings.warn(
"A loop with variable start/end could not be counted towards total loop count. Progress bar might be inaccurate."
)
else:
self.aggregate_iterations = self.iterations
else:
nested = 0
for entry in self.contained_entries:
if entry.aggregate_iterations == 0:
warnings.warn(
"A loop with variable start/end could not be counted towards total loop count. Progress bar might be inaccurate."
)
continue
nested += entry.aggregate_iterations
self.aggregate_iterations = self.iterations * (nested if nested != 0 else 1)
def get_iteration(self, value: int) -> int:
"""Returns the current iteration depending on the parameter value""" | if isinstance(self.start, _QiVariableBase): | 3 | 2023-11-10 10:26:10+00:00 | 12k |
jpcadena/fastapi-boilerplate | app/api/api_v1/router/user.py | [
{
"identifier": "get_redis_dep",
"path": "app/api/deps.py",
"snippet": "async def get_redis_dep(\n redis_dependency: Annotated[RedisDependency, Depends()]\n) -> AsyncGenerator[Redis, None]: # type: ignore\n \"\"\"\n Lazy generation of Redis dependency\n :param redis_dependency: The dependen... | import logging
from typing import Annotated, Any, Optional
from uuid import uuid4
from fastapi import (
APIRouter,
BackgroundTasks,
Body,
Depends,
HTTPException,
Response,
status,
)
from fastapi.params import Path, Query
from pydantic import UUID4, NonNegativeInt, PositiveInt
from redis.asyncio import Redis
from sqlalchemy.exc import SQLAlchemyError
from app.api.deps import get_redis_dep
from app.api.oauth2_validation import get_current_user
from app.config.config import (
get_auth_settings,
get_init_settings,
get_settings,
init_setting,
)
from app.config.db.auth_settings import AuthSettings
from app.config.init_settings import InitSettings
from app.config.settings import Settings
from app.exceptions.exceptions import NotFoundException, ServiceException
from app.schemas.external.user import (
UserCreate,
UserCreateResponse,
UserResponse,
UsersResponse,
UserUpdate,
UserUpdateResponse,
)
from app.schemas.infrastructure.user import UserAuth
from app.services.infrastructure.cached_user import CachedUserService
from app.services.infrastructure.user import UserService, get_user_service
from app.tasks.email_tasks.email_tasks import (
send_new_account_email,
send_welcome_email,
) | 10,334 | """
User API Router
This module provides CRUD (Create, Retrieve, Update, Delete) operations
for users.
"""
logger: logging.Logger = logging.getLogger(__name__)
router: APIRouter = APIRouter(prefix="/user", tags=["user"])
| """
User API Router
This module provides CRUD (Create, Retrieve, Update, Delete) operations
for users.
"""
logger: logging.Logger = logging.getLogger(__name__)
router: APIRouter = APIRouter(prefix="/user", tags=["user"])
| @router.get("", response_model=UsersResponse) | 11 | 2023-11-17 00:32:32+00:00 | 12k |
dataaug/open-interpreter-free | interpreter/core/core.py | [
{
"identifier": "cli",
"path": "interpreter/cli/cli.py",
"snippet": "def cli(interpreter):\n parser = argparse.ArgumentParser(description=\"Open Interpreter\")\n\n # Add arguments\n for arg in arguments:\n if arg[\"type\"] == bool:\n parser.add_argument(\n f'-{a... | import json
import os
from datetime import datetime
from ..cli.cli import cli
from ..llm.setup_llm import setup_llm
from ..terminal_interface.terminal_interface import terminal_interface
from ..terminal_interface.validate_llm_settings import validate_llm_settings
from ..utils.check_for_update import check_for_update
from ..utils.display_markdown_message import display_markdown_message
from ..utils.get_config import get_config, user_config_path
from ..utils.local_storage_path import get_storage_path
from .generate_system_message import generate_system_message
from .respond import respond | 7,741 | self.conversation_filename = None
self.conversation_history_path = get_storage_path("conversations")
# LLM settings
self.model = ""
self.temperature = None
self.system_message = ""
self.context_window = None
self.max_tokens = None
self.api_base = None
self.api_key = None
self.max_budget = None
self._llm = None
self.function_calling_llm = None
self.vision = False # LLM supports vision
# Load config defaults
self.extend_config(self.config_file)
# Check for update
try:
if not self.local:
# This should actually be pushed into the utility
if check_for_update():
display_markdown_message(
"> **A new version of Open Interpreter is available.**\n>Please run: `pip install --upgrade open-interpreter`\n\n---"
)
except:
# Doesn't matter
pass
def extend_config(self, config_path):
if self.debug_mode:
print(f"Extending configuration from `{config_path}`")
config = get_config(config_path)
self.__dict__.update(config)
def chat(self, message=None, display=True, stream=False):
if stream:
return self._streaming_chat(message=message, display=display)
# If stream=False, *pull* from the stream.
for _ in self._streaming_chat(message=message, display=display):
pass
return self.messages
def _streaming_chat(self, message=None, display=True):
# If we have a display,
# we can validate our LLM settings w/ the user first
gpt4free = True
if display and not gpt4free:
validate_llm_settings(self)
# Setup the LLM
if not self._llm:
self._llm = setup_llm(self)
# Sometimes a little more code -> a much better experience!
# Display mode actually runs interpreter.chat(display=False, stream=True) from within the terminal_interface.
# wraps the vanilla .chat(display=False) generator in a display.
# Quite different from the plain generator stuff. So redirect to that
if display:
yield from terminal_interface(self, message)
return
# One-off message
if message or message == "":
if message == "":
message = "No entry from user - please suggest something to enter."
## We support multiple formats for the incoming message:
# Dict (these are passed directly in)
if isinstance(message, dict):
if "role" not in message:
message["role"] = "user"
self.messages.append(message)
# String (we construct a user message dict)
elif isinstance(message, str):
self.messages.append({"role": "user", "message": message})
# List (this is like the OpenAI API)
elif isinstance(message, list):
self.messages = message
yield from self._respond()
# Save conversation if we've turned conversation_history on
if self.conversation_history:
# If it's the first message, set the conversation name
if not self.conversation_filename:
first_few_words = "_".join(
self.messages[0]["message"][:25].split(" ")[:-1]
)
for char in '<>:"/\\|?*!': # Invalid characters for filenames
first_few_words = first_few_words.replace(char, "")
date = datetime.now().strftime("%B_%d_%Y_%H-%M-%S")
self.conversation_filename = (
"__".join([first_few_words, date]) + ".json"
)
# Check if the directory exists, if not, create it
if not os.path.exists(self.conversation_history_path):
os.makedirs(self.conversation_history_path)
# Write or overwrite the file
with open(
os.path.join(
self.conversation_history_path, self.conversation_filename
),
"w",
) as f:
json.dump(self.messages, f)
return
raise Exception(
"`interpreter.chat()` requires a display. Set `display=True` or pass a message into `interpreter.chat(message)`."
)
def _respond(self):
| """
This file defines the Interpreter class.
It's the main file. `import interpreter` will import an instance of this class.
"""
class Interpreter:
def cli(self):
cli(self)
def __init__(self):
# State
self.messages = []
self._code_interpreters = {}
self.config_file = user_config_path
# Settings
self.local = False
self.auto_run = False
self.debug_mode = False
self.max_output = 2000
self.safe_mode = "off"
self.disable_procedures = False
# Conversation history
self.conversation_history = True
self.conversation_filename = None
self.conversation_history_path = get_storage_path("conversations")
# LLM settings
self.model = ""
self.temperature = None
self.system_message = ""
self.context_window = None
self.max_tokens = None
self.api_base = None
self.api_key = None
self.max_budget = None
self._llm = None
self.function_calling_llm = None
self.vision = False # LLM supports vision
# Load config defaults
self.extend_config(self.config_file)
# Check for update
try:
if not self.local:
# This should actually be pushed into the utility
if check_for_update():
display_markdown_message(
"> **A new version of Open Interpreter is available.**\n>Please run: `pip install --upgrade open-interpreter`\n\n---"
)
except:
# Doesn't matter
pass
def extend_config(self, config_path):
if self.debug_mode:
print(f"Extending configuration from `{config_path}`")
config = get_config(config_path)
self.__dict__.update(config)
def chat(self, message=None, display=True, stream=False):
if stream:
return self._streaming_chat(message=message, display=display)
# If stream=False, *pull* from the stream.
for _ in self._streaming_chat(message=message, display=display):
pass
return self.messages
def _streaming_chat(self, message=None, display=True):
# If we have a display,
# we can validate our LLM settings w/ the user first
gpt4free = True
if display and not gpt4free:
validate_llm_settings(self)
# Setup the LLM
if not self._llm:
self._llm = setup_llm(self)
# Sometimes a little more code -> a much better experience!
# Display mode actually runs interpreter.chat(display=False, stream=True) from within the terminal_interface.
# wraps the vanilla .chat(display=False) generator in a display.
# Quite different from the plain generator stuff. So redirect to that
if display:
yield from terminal_interface(self, message)
return
# One-off message
if message or message == "":
if message == "":
message = "No entry from user - please suggest something to enter."
## We support multiple formats for the incoming message:
# Dict (these are passed directly in)
if isinstance(message, dict):
if "role" not in message:
message["role"] = "user"
self.messages.append(message)
# String (we construct a user message dict)
elif isinstance(message, str):
self.messages.append({"role": "user", "message": message})
# List (this is like the OpenAI API)
elif isinstance(message, list):
self.messages = message
yield from self._respond()
# Save conversation if we've turned conversation_history on
if self.conversation_history:
# If it's the first message, set the conversation name
if not self.conversation_filename:
first_few_words = "_".join(
self.messages[0]["message"][:25].split(" ")[:-1]
)
for char in '<>:"/\\|?*!': # Invalid characters for filenames
first_few_words = first_few_words.replace(char, "")
date = datetime.now().strftime("%B_%d_%Y_%H-%M-%S")
self.conversation_filename = (
"__".join([first_few_words, date]) + ".json"
)
# Check if the directory exists, if not, create it
if not os.path.exists(self.conversation_history_path):
os.makedirs(self.conversation_history_path)
# Write or overwrite the file
with open(
os.path.join(
self.conversation_history_path, self.conversation_filename
),
"w",
) as f:
json.dump(self.messages, f)
return
raise Exception(
"`interpreter.chat()` requires a display. Set `display=True` or pass a message into `interpreter.chat(message)`."
)
def _respond(self): | yield from respond(self) | 9 | 2023-11-16 03:10:42+00:00 | 12k |
3dp-accelerometer/octoprint-accelerometer | octoprint_accelerometer/plugin.py | [
{
"identifier": "DataPostProcessRunner",
"path": "octoprint_accelerometer/data_post_process.py",
"snippet": "class DataPostProcessRunner:\n \"\"\"\n Runner for traversing stream files and post-processing (FFT) if necessary.\n \"\"\"\n def __init__(self,\n logger: Logger,\n ... | import os
import flask
import octoprint.plugin
from typing import Any, Dict, List, Literal, Optional, Tuple
from octoprint.server.util.tornado import LargeResponseHandler, path_validation_factory
from octoprint.util import is_hidden_path
from py3dpaxxel.cli.args import convert_axis_from_str
from py3dpaxxel.controller.api import Py3dpAxxel
from py3dpaxxel.sampling_tasks.series_argument_generator import RunArgsGenerator
from py3dpaxxel.storage.file_filter import FileSelector, File
from py3dpaxxel.storage.filename import timestamp_from_args
from py3dpaxxel.storage.filename_meta import FilenameMetaStream, FilenameMetaFft
from octoprint_accelerometer.data_post_process import DataPostProcessRunner
from octoprint_accelerometer.event_types import DataProcessingEventType, RecordingEventType
from octoprint_accelerometer.record_step_series import RecordStepSeriesRunner
from octoprint_accelerometer.transfer_types import RunMeta, SequenceMeta, StreamMeta, DataSets, FftMeta, Timestamp | 9,090 | self.speed_y_mm_s = self._settings.get_int(["speed_y_mm_s"])
self.speed_z_mm_s = self._settings.get_int(["speed_z_mm_s"])
self.acceleration_x_mm_ss = self._settings.get_int(["acceleration_x_mm_ss"])
self.acceleration_y_mm_ss = self._settings.get_int(["acceleration_y_mm_ss"])
self.acceleration_z_mm_ss = self._settings.get_int(["acceleration_z_mm_ss"])
self.anchor_point_coord_x_mm = self._settings.get_int(["anchor_point_coord_x_mm"])
self.anchor_point_coord_y_mm = self._settings.get_int(["anchor_point_coord_y_mm"])
self.anchor_point_coord_z_mm = self._settings.get_int(["anchor_point_coord_z_mm"])
self.sequence_count = self._settings.get_int(["sequence_count"])
self.go_start = self._settings.get_boolean(["go_start"])
self.return_start = self._settings.get_boolean(["return_start"])
self.auto_home = self._settings.get_boolean(["auto_home"])
self.start_frequency_hz = self._settings.get_int(["start_frequency_hz"])
self.stop_frequency_hz = self._settings.get_int(["stop_frequency_hz"])
self.step_frequency_hz = self._settings.get_int(["step_frequency_hz"])
self.start_zeta_em2 = self._settings.get_int(["start_zeta_em2"])
self.stop_zeta_em2 = self._settings.get_int(["stop_zeta_em2"])
self.step_zeta_em2 = self._settings.get_int(["step_zeta_em2"])
self.sensor_output_data_rate_hz = self._settings.get_int(["sensor_output_data_rate_hz"])
self.data_remove_before_run = self._settings.get_boolean(["data_remove_before_run"])
self.do_sample_x = self._settings.get_boolean(["do_sample_x"])
self.do_sample_y = self._settings.get_boolean(["do_sample_y"])
self.do_sample_z = self._settings.get_boolean(["do_sample_z"])
self.recording_timespan_s = self._settings.get_float(["recording_timespan_s"])
self.sequence_separation_s = self._settings.get_float(["sequence_separation_s"])
self.step_separation_s = self._settings.get_float(["step_separation_s"])
self.do_dry_run = self._settings.get_boolean(["do_dry_run"])
self._compute_start_points()
def _compute_start_points(self) -> None:
self.axis_x_sampling_start = Point3D(self.anchor_point_coord_x_mm - int(self.distance_x_mm // 2),
self.anchor_point_coord_y_mm,
self.anchor_point_coord_z_mm)
self.axis_y_sampling_start = Point3D(self.anchor_point_coord_x_mm,
self.anchor_point_coord_y_mm - int(self.distance_y_mm // 2),
self.anchor_point_coord_z_mm)
self.axis_z_sampling_start = Point3D(self.anchor_point_coord_x_mm,
self.anchor_point_coord_y_mm,
self.anchor_point_coord_z_mm + int(self.distance_z_mm // 2))
def _estimate_duration(self) -> float:
axs: List[Literal["x", "y", "z"]] = [ax for ax, enabled in [("x", self.do_sample_x), ("y", self.do_sample_y), ("z", self.do_sample_z)] if enabled]
sequences_count = len(RunArgsGenerator(
sequence_repeat_count=self.sequence_count,
fx_start_hz=self.start_frequency_hz,
fx_stop_hz=self.stop_frequency_hz,
fx_step_hz=self.step_frequency_hz,
zeta_start_em2=self.start_zeta_em2,
zeta_stop_em2=self.stop_zeta_em2,
zeta_step_em2=self.step_zeta_em2,
axis=axs,
out_file_prefix_1="", out_file_prefix_2="").generate())
duration_s = (sequences_count * self.recording_timespan_s +
(sequences_count - 1) * self.sequence_separation_s +
(self.step_count - 1) * sequences_count * self.step_separation_s)
return duration_s
def _get_parameter_dict(self, args: Dict[str, str] = None) -> Dict[str, str]:
key_name: str = "v"
requested_values: List[str] = []
if args and key_name in args.keys() and args[key_name] is not None:
requested_values.extend(args[key_name].split(","))
# reply all parameters if no names were explicitly specified
requested_values = self._get_ui_exposed_parameters() if len(requested_values) == 0 else requested_values
params_dict: Dict[str, str] = dict()
exposed_parameters = self._get_ui_exposed_parameters()
for parameter_name in [pn for pn in requested_values if pn in exposed_parameters]:
params_dict[parameter_name] = getattr(self, parameter_name)
self._logger.debug(f"xxx supply with requested parameters: {params_dict}")
return params_dict
def _get_selected_axis_str(self) -> List[Literal["x", "y", "z"]]:
return convert_axis_from_str(
f"{'x' if self.do_sample_x else ''}{'y' if self.do_sample_y else ''}{'z' if self.do_sample_z else ''}"
)
def _construct_new_data_processing_runner(self) -> DataPostProcessRunner:
return DataPostProcessRunner(
logger=self._logger,
on_event_callback=self.on_data_processing_callback,
input_dir=self.get_plugin_data_folder(),
input_file_prefix=self.OUTPUT_STREAM_FILE_NAME_PREFIX,
algorithm_d1="discrete_blackman",
output_dir=self.get_plugin_data_folder(),
output_file_prefix=self.OUTPUT_FFT_FILE_NAME_PREFIX,
output_overwrite=False,
do_dry_run=False)
def _construct_new_step_series_runner(self) -> RecordStepSeriesRunner:
return RecordStepSeriesRunner(
logger=self._logger,
printer=self._printer,
controller_serial_device=self.device,
on_event_callback=self.on_recording_callback,
controller_record_timelapse_s=self.recording_timespan_s,
controller_decode_timeout_s=3.0,
sensor_odr_hz=self.sensor_output_data_rate_hz,
gcode_start_point_mm=(self.anchor_point_coord_x_mm, self.anchor_point_coord_y_mm, self.anchor_point_coord_z_mm),
gcode_axis=self._get_selected_axis_str(),
gcode_distance_mm=self.distance_x_mm,
gcode_step_count=self.step_count,
gcode_sequence_count=self.sequence_count,
start_frequency_hz=self.start_frequency_hz,
stop_frequency_hz=self.stop_frequency_hz,
step_frequency_hz=self.step_frequency_hz,
start_zeta_em2=self.start_zeta_em2,
stop_zeta_em2=self.stop_zeta_em2,
step_zeta_em2=self.step_zeta_em2,
output_file_prefix=self.OUTPUT_STREAM_FILE_NAME_PREFIX,
output_dir=self.get_plugin_data_folder(),
do_dry_run=self.do_dry_run)
def _push_data_to_ui(self, data: Dict[str, str]):
self._plugin_manager.send_plugin_message(self._identifier, data)
|
class Point3D:
def __init__(self, x: int, y: int, z: int):
self.x: int = x
self.y: int = y
self.z: int = z
def __str__(self):
return f"x={self.x} y={self.y} z={self.z}"
class OctoprintAccelerometerPlugin(octoprint.plugin.StartupPlugin,
octoprint.plugin.SettingsPlugin,
octoprint.plugin.AssetPlugin,
octoprint.plugin.TemplatePlugin,
octoprint.plugin.BlueprintPlugin):
OUTPUT_STREAM_FILE_NAME_PREFIX: str = "axxel"
OUTPUT_FFT_FILE_NAME_PREFIX: str = "fft"
# noinspection PyMissingConstructor
def __init__(self):
# following parameters are shared among settings and UI
self.distance_x_mm: int = 0
self.distance_y_mm: int = 0
self.distance_z_mm: int = 0
self.step_count: int = 0
self.speed_x_mm_s: int = 0
self.speed_y_mm_s: int = 0
self.speed_z_mm_s: int = 0
self.acceleration_x_mm_ss: int = 0
self.acceleration_y_mm_ss: int = 0
self.acceleration_z_mm_ss: int = 0
self.anchor_point_coord_x_mm: int = 0
self.anchor_point_coord_y_mm: int = 0
self.anchor_point_coord_z_mm: int = 0
self.sequence_count: int = 0
self.go_start: bool = False
self.return_start: bool = False
self.auto_home: bool = False
self.start_frequency_hz: int = 0
self.stop_frequency_hz: int = 0
self.step_frequency_hz: int = 0
self.start_zeta_em2: int = 0
self.stop_zeta_em2: int = 0
self.step_zeta_em2: int = 0
self.sensor_output_data_rate_hz: int = 0
self.data_remove_before_run: bool = False
self.do_sample_x: bool = False
self.do_sample_y: bool = False
self.do_sample_z: bool = False
self.recording_timespan_s: float = 0
self.sequence_separation_s: float = 0
self.step_separation_s: float = 0
self.do_dry_run: bool = False
# other parameters shared with UI
self.devices_seen: List[str] = []
self.device: str = ""
self.controller_fifo_overrun_error: bool = False
self.controller_response_error: bool = False
# following parameters are computed from above parameters
self.axis_x_sampling_start: Point3D = Point3D(0, 0, 0)
self.axis_y_sampling_start: Point3D = Point3D(0, 0, 0)
self.axis_z_sampling_start: Point3D = Point3D(0, 0, 0)
# recording runner: once constructed before invocation all properties shall be updated
self.data_recording_runner: Optional[RecordStepSeriesRunner] = None
self.data_processing_runner: Optional[DataPostProcessRunner] = None
@staticmethod
def _get_devices() -> Tuple[str, List[str]]:
"""
:return: tuple of primary device (if any) and list of all devices
"""
seen_devices: List[str] = [k for k in Py3dpAxxel.get_devices_dict().keys()]
primary: str = seen_devices[0] if len(seen_devices) > 0 else None
return primary, seen_devices
def _update_seen_devices(self):
primary, seen_devices = self._get_devices()
self._logger.debug(f"seen devices: primary={primary}, seen={seen_devices}")
self.devices_seen = seen_devices
self.device = primary if primary is not None else ""
@octoprint.plugin.BlueprintPlugin.route("/set_values", methods=["POST"])
def on_api_set_values(self):
data = flask.request.json
self._update_members_from_api(data)
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/start_recording", methods=["POST"])
def on_api_start_recording(self):
self._start_recording()
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/abort_recording", methods=["POST"])
def on_api_abort_recording(self):
self._abort_recording()
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/start_data_processing", methods=["POST"])
def on_api_start_data_processing(self):
self._start_data_processing()
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/get_estimate", methods=["GET"])
def on_api_get_estimate(self):
return flask.jsonify({f"estimate": self._estimate_duration()})
@octoprint.plugin.BlueprintPlugin.route("/get_parameters", methods=["GET"])
def on_api_get_parameters(self):
return flask.jsonify({f"parameters": self._get_parameter_dict(flask.request.args)})
@octoprint.plugin.BlueprintPlugin.route("/get_files_listing", methods=["GET"])
def on_api_get_files_listing(self):
fs = FileSelector(os.path.join(self.get_plugin_data_folder(), ".*"))
files_details = fs.filter()
return flask.jsonify({f"files": files_details})
@octoprint.plugin.BlueprintPlugin.route("/get_stream_files_listing", methods=["GET"])
def on_api_get_stream_files_listing(self):
fs = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_STREAM_FILE_NAME_PREFIX}-.*\\.tsv$"))
files = fs.filter()
files_details = [StreamMeta(f, FilenameMetaStream().from_filename(f.filename_ext)) for f in files]
return flask.jsonify({f"stream_files": files_details})
@octoprint.plugin.BlueprintPlugin.route("/get_fft_files_listing", methods=["GET"])
def on_api_get_fft_files_listing(self):
fs = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_FFT_FILE_NAME_PREFIX}-.*\\.tsv$"))
files = fs.filter()
files_details = [FftMeta(f, FilenameMetaFft().from_filename(f.filename_ext)) for f in files]
return flask.jsonify({f"fft_files": files_details})
@octoprint.plugin.BlueprintPlugin.route("/get_data_listing", methods=["GET"])
def on_api_get_data_listing(self):
fs_stream = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_STREAM_FILE_NAME_PREFIX}-.*\\.tsv$"))
fs_fft = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_FFT_FILE_NAME_PREFIX}-.*\\.tsv$"))
files_meta_data_stream: List[Tuple[File, FilenameMetaStream]] = [(f, FilenameMetaStream().from_filename(f.filename_ext)) for f in fs_stream.filter()]
files_meta_data_fft: List[Tuple[File, FilenameMetaFft]] = [(f, FilenameMetaFft().from_filename(f.filename_ext)) for f in fs_fft.filter()]
data_sets: DataSets = DataSets()
# append all streams
for file_meta, filename_meta in files_meta_data_stream:
run_hash, sequence_nr, stream_hash = filename_meta.run_hash, filename_meta.sequence_nr, filename_meta.stream_hash
if run_hash not in data_sets.runs.keys():
data_sets.runs[run_hash] = RunMeta()
if sequence_nr not in data_sets.runs[run_hash].sequences.keys():
data_sets.runs[run_hash].sequences[sequence_nr] = SequenceMeta()
if stream_hash not in data_sets.runs[run_hash].sequences[sequence_nr].streams.keys():
data_sets.runs[run_hash].sequences[sequence_nr].streams[stream_hash] = StreamMeta(file_meta, filename_meta)
# append all FFTs to their respective stream
for file_meta, filename_meta in files_meta_data_fft:
run_hash, sequence_nr, stream_hash = filename_meta.run_hash, filename_meta.sequence_nr, filename_meta.stream_hash
if run_hash not in data_sets.runs.keys():
self._logger.warning(f"failed to assign orphaned FFT file={file_meta.filename_ext} to run, run_hash={run_hash} unknown")
continue
if sequence_nr not in data_sets.runs[run_hash].sequences.keys():
self._logger.warning(f"failed to assign orphaned FFT file={file_meta.filename_ext} to sequence, sequence_nr={sequence_nr} unknown")
continue
if stream_hash not in data_sets.runs[run_hash].sequences[sequence_nr].streams.keys():
self._logger.warning(f"failed to assign orphaned FFT file={file_meta.filename_ext} to stream, stream_hash={stream_hash} unknown")
continue
fft_key: str = filename_meta.fft_axis
if fft_key not in data_sets.runs[run_hash].sequences[sequence_nr].streams[stream_hash].ffts.keys():
data_sets.runs[run_hash].sequences[sequence_nr].streams[stream_hash].ffts[fft_key] = FftMeta(file_meta, filename_meta)
# store first and last timestamp of run
for run in data_sets.runs.values():
youngest_ts: str = "00000000-000000000"
oldest_ts: str = "99999999-235959999"
for sequence in run.sequences.values():
for stream in sequence.streams.values():
meta: FilenameMetaStream = stream.meta
ts = timestamp_from_args(meta.year, meta.month, meta.day, meta.hour, meta.minute, meta.second, meta.milli_second)
if ts < oldest_ts:
oldest_ts = ts
run.started = Timestamp(meta.year, meta.month, meta.day, meta.hour, meta.minute, meta.second, meta.milli_second)
if ts > youngest_ts:
youngest_ts = ts
run.stopped = Timestamp(meta.year, meta.month, meta.day, meta.hour, meta.minute, meta.second, meta.milli_second)
return flask.jsonify({f"data_sets": data_sets})
def route_hook(self, _server_routes, *_args, **_kwargs):
return [
(r"/download/(.*)",
LargeResponseHandler,
dict(path=self.get_plugin_data_folder(),
mime_type_guesser=lambda *args, **kwargs: "text/plain",
stream_body=True,
as_attachment=False,
path_validation=path_validation_factory(
lambda path: not is_hidden_path(path), status_code=404)
)
)
]
def get_template_vars(self):
return dict(estimated_duration_s=self._estimate_duration())
def get_template_configs(self):
return [dict(type="settings", custom_bindings=True),
dict(type="tab", custom_bindings=True)]
def get_settings_defaults(self):
profile: Dict[str, Any] = self._printer_profile_manager.get_current_or_default()
width = profile["volume"]["width"]
height = profile["volume"]["height"]
depth = profile["volume"]["depth"]
origin_center: bool = True if profile["volume"]["origin"] == "center" else False
anchor_point = Point3D(0, 0, 50) if origin_center else Point3D(int(width // 2), int(depth // 2), int(height // 2))
return dict(
distance_x_mm=10,
distance_y_mm=10,
distance_z_mm=10,
step_count=2,
speed_x_mm_s=100,
speed_y_mm_s=100,
speed_z_mm_s=100,
acceleration_x_mm_ss=1000,
acceleration_y_mm_ss=1000,
acceleration_z_mm_ss=1000,
anchor_point_coord_x_mm=anchor_point.x,
anchor_point_coord_y_mm=anchor_point.y,
anchor_point_coord_z_mm=anchor_point.z,
sequence_count=1,
go_start=True,
return_start=True,
auto_home=True,
start_frequency_hz=10,
stop_frequency_hz=60,
step_frequency_hz=10,
start_zeta_em2=15,
stop_zeta_em2=15,
step_zeta_em2=5,
sensor_output_data_rate_hz=800,
data_remove_before_run=True,
do_sample_x=True,
do_sample_y=False,
do_sample_z=False,
recording_timespan_s=1.5,
sequence_separation_s=0.1,
step_separation_s=0.1,
do_dry_run=False,
)
def on_settings_save(self, data):
octoprint.plugin.SettingsPlugin.on_settings_save(self, data)
self._update_members_from_settings()
def on_after_startup(self):
self._update_members_from_settings()
self._update_seen_devices()
self.data_recording_runner = self._construct_new_step_series_runner()
self.data_processing_runner = self._construct_new_data_processing_runner()
self._start_data_processing()
def get_assets(self):
return {"js": ["js/octoprint_accelerometer.js",
"js/d3.js",
"js/datavis.js"]}
def get_update_information(self):
# see https://docs.octoprint.org/en/master/bundledplugins/softwareupdate.html
return {
"octoprint_accelerometer": {
"displayName": "Octoprint Accelerometer",
"displayVersion": self._plugin_version,
# version check: GitHub repository
"type": "github_release",
"user": "rubienr",
"repo": "https://github.com/3dp-accelerometer/octoprint-accelerometer",
"current": self._plugin_version,
# update method: pip
"pip": "https://github.com/3dp-accelerometer/octoprint-accelerometer/archive/{target_version}.zip",
}
}
@staticmethod
def _get_ui_exposed_parameters() -> List[str]:
return ["distance_x_mm", "distance_y_mm", "distance_z_mm",
"step_count",
"speed_x_mm_s", "speed_y_mm_s", "speed_z_mm_s",
"acceleration_x_mm_ss", "acceleration_y_mm_ss", "acceleration_z_mm_ss",
"anchor_point_coord_x_mm", "anchor_point_coord_y_mm", "anchor_point_coord_z_mm",
"sequence_count",
"go_start", "return_start", "auto_home",
"start_frequency_hz", "stop_frequency_hz", "step_frequency_hz",
"start_zeta_em2", "stop_zeta_em2", "step_zeta_em2",
"sensor_output_data_rate_hz",
"data_remove_before_run",
"do_sample_x", "do_sample_y", "do_sample_z",
"recording_timespan_s", "sequence_separation_s", "step_separation_s",
"devices_seen", "device", "do_dry_run"]
def _update_member_from_str_value(self, parameter: str, value: str):
if parameter in self._get_ui_exposed_parameters():
old_value = getattr(self, parameter)
value_type = type(old_value)
setattr(self, parameter, value_type(value))
new_value = getattr(self, parameter)
self._logger.debug(f"xxx update {parameter}: {old_value} -> {new_value} from api")
def _update_members_from_api(self, data: Dict[str, str]):
for k, v in data.items():
if hasattr(self, k):
self._update_member_from_str_value(k, v)
self._compute_start_points()
def _update_members_from_settings(self) -> None:
self._logger.debug("xxx update from settings ...")
self.distance_x_mm = self._settings.get_int(["distance_x_mm"])
self.distance_y_mm = self._settings.get_int(["distance_y_mm"])
self.distance_z_mm = self._settings.get_int(["distance_z_mm"])
self.step_count = self._settings.get_int(["step_count"])
self.speed_x_mm_s = self._settings.get_int(["speed_x_mm_s"])
self.speed_y_mm_s = self._settings.get_int(["speed_y_mm_s"])
self.speed_z_mm_s = self._settings.get_int(["speed_z_mm_s"])
self.acceleration_x_mm_ss = self._settings.get_int(["acceleration_x_mm_ss"])
self.acceleration_y_mm_ss = self._settings.get_int(["acceleration_y_mm_ss"])
self.acceleration_z_mm_ss = self._settings.get_int(["acceleration_z_mm_ss"])
self.anchor_point_coord_x_mm = self._settings.get_int(["anchor_point_coord_x_mm"])
self.anchor_point_coord_y_mm = self._settings.get_int(["anchor_point_coord_y_mm"])
self.anchor_point_coord_z_mm = self._settings.get_int(["anchor_point_coord_z_mm"])
self.sequence_count = self._settings.get_int(["sequence_count"])
self.go_start = self._settings.get_boolean(["go_start"])
self.return_start = self._settings.get_boolean(["return_start"])
self.auto_home = self._settings.get_boolean(["auto_home"])
self.start_frequency_hz = self._settings.get_int(["start_frequency_hz"])
self.stop_frequency_hz = self._settings.get_int(["stop_frequency_hz"])
self.step_frequency_hz = self._settings.get_int(["step_frequency_hz"])
self.start_zeta_em2 = self._settings.get_int(["start_zeta_em2"])
self.stop_zeta_em2 = self._settings.get_int(["stop_zeta_em2"])
self.step_zeta_em2 = self._settings.get_int(["step_zeta_em2"])
self.sensor_output_data_rate_hz = self._settings.get_int(["sensor_output_data_rate_hz"])
self.data_remove_before_run = self._settings.get_boolean(["data_remove_before_run"])
self.do_sample_x = self._settings.get_boolean(["do_sample_x"])
self.do_sample_y = self._settings.get_boolean(["do_sample_y"])
self.do_sample_z = self._settings.get_boolean(["do_sample_z"])
self.recording_timespan_s = self._settings.get_float(["recording_timespan_s"])
self.sequence_separation_s = self._settings.get_float(["sequence_separation_s"])
self.step_separation_s = self._settings.get_float(["step_separation_s"])
self.do_dry_run = self._settings.get_boolean(["do_dry_run"])
self._compute_start_points()
def _compute_start_points(self) -> None:
self.axis_x_sampling_start = Point3D(self.anchor_point_coord_x_mm - int(self.distance_x_mm // 2),
self.anchor_point_coord_y_mm,
self.anchor_point_coord_z_mm)
self.axis_y_sampling_start = Point3D(self.anchor_point_coord_x_mm,
self.anchor_point_coord_y_mm - int(self.distance_y_mm // 2),
self.anchor_point_coord_z_mm)
self.axis_z_sampling_start = Point3D(self.anchor_point_coord_x_mm,
self.anchor_point_coord_y_mm,
self.anchor_point_coord_z_mm + int(self.distance_z_mm // 2))
def _estimate_duration(self) -> float:
axs: List[Literal["x", "y", "z"]] = [ax for ax, enabled in [("x", self.do_sample_x), ("y", self.do_sample_y), ("z", self.do_sample_z)] if enabled]
sequences_count = len(RunArgsGenerator(
sequence_repeat_count=self.sequence_count,
fx_start_hz=self.start_frequency_hz,
fx_stop_hz=self.stop_frequency_hz,
fx_step_hz=self.step_frequency_hz,
zeta_start_em2=self.start_zeta_em2,
zeta_stop_em2=self.stop_zeta_em2,
zeta_step_em2=self.step_zeta_em2,
axis=axs,
out_file_prefix_1="", out_file_prefix_2="").generate())
duration_s = (sequences_count * self.recording_timespan_s +
(sequences_count - 1) * self.sequence_separation_s +
(self.step_count - 1) * sequences_count * self.step_separation_s)
return duration_s
def _get_parameter_dict(self, args: Dict[str, str] = None) -> Dict[str, str]:
key_name: str = "v"
requested_values: List[str] = []
if args and key_name in args.keys() and args[key_name] is not None:
requested_values.extend(args[key_name].split(","))
# reply all parameters if no names were explicitly specified
requested_values = self._get_ui_exposed_parameters() if len(requested_values) == 0 else requested_values
params_dict: Dict[str, str] = dict()
exposed_parameters = self._get_ui_exposed_parameters()
for parameter_name in [pn for pn in requested_values if pn in exposed_parameters]:
params_dict[parameter_name] = getattr(self, parameter_name)
self._logger.debug(f"xxx supply with requested parameters: {params_dict}")
return params_dict
def _get_selected_axis_str(self) -> List[Literal["x", "y", "z"]]:
return convert_axis_from_str(
f"{'x' if self.do_sample_x else ''}{'y' if self.do_sample_y else ''}{'z' if self.do_sample_z else ''}"
)
def _construct_new_data_processing_runner(self) -> DataPostProcessRunner:
return DataPostProcessRunner(
logger=self._logger,
on_event_callback=self.on_data_processing_callback,
input_dir=self.get_plugin_data_folder(),
input_file_prefix=self.OUTPUT_STREAM_FILE_NAME_PREFIX,
algorithm_d1="discrete_blackman",
output_dir=self.get_plugin_data_folder(),
output_file_prefix=self.OUTPUT_FFT_FILE_NAME_PREFIX,
output_overwrite=False,
do_dry_run=False)
def _construct_new_step_series_runner(self) -> RecordStepSeriesRunner:
return RecordStepSeriesRunner(
logger=self._logger,
printer=self._printer,
controller_serial_device=self.device,
on_event_callback=self.on_recording_callback,
controller_record_timelapse_s=self.recording_timespan_s,
controller_decode_timeout_s=3.0,
sensor_odr_hz=self.sensor_output_data_rate_hz,
gcode_start_point_mm=(self.anchor_point_coord_x_mm, self.anchor_point_coord_y_mm, self.anchor_point_coord_z_mm),
gcode_axis=self._get_selected_axis_str(),
gcode_distance_mm=self.distance_x_mm,
gcode_step_count=self.step_count,
gcode_sequence_count=self.sequence_count,
start_frequency_hz=self.start_frequency_hz,
stop_frequency_hz=self.stop_frequency_hz,
step_frequency_hz=self.step_frequency_hz,
start_zeta_em2=self.start_zeta_em2,
stop_zeta_em2=self.stop_zeta_em2,
step_zeta_em2=self.step_zeta_em2,
output_file_prefix=self.OUTPUT_STREAM_FILE_NAME_PREFIX,
output_dir=self.get_plugin_data_folder(),
do_dry_run=self.do_dry_run)
def _push_data_to_ui(self, data: Dict[str, str]):
self._plugin_manager.send_plugin_message(self._identifier, data)
| def _push_recording_event_to_ui(self, event: RecordingEventType): | 2 | 2023-11-14 17:15:15+00:00 | 12k |
hmmbug/pythaidate | tests/test_pakdate.py | [
{
"identifier": "julianday",
"path": "pythaidate/julianday.py",
"snippet": "def to_julianday(year, month, day):\ndef from_julianday(jd):\ndef today(): # pragma: no cover\ndef date_to_julianday(d):\ndef julianday_to_date(obj):\n B = 0\n A = math.trunc(yearp / 100.)\n B = 2 - A + mat... | from datetime import date, timedelta
from pythaidate import PakDate, CsDate, julianday
from pythaidate.constants import PAK_JULIAN_DAY_OFFSET
import json
import unittest
import os
import pathlib
import random
import logging | 9,969 |
RUN_PERCENT = 10
if os.environ.get("RUN_PERCENT"):
RUN_PERCENT = int(os.environ.get("RUN_PERCENT"))
if RUN_PERCENT > 100:
RUN_PERCENT = 100
RUN_PERCENT /= 100
for datafile in ("pak.data", "pak.min.data"):
datafile = os.path.join(pathlib.Path(__file__).parent.resolve(), "data", datafile)
if os.path.exists(datafile):
break
else:
raise FileNotFoundError("Pak data file not found.")
random.seed()
def read_test_date(sample=1, minjd=None):
with open(datafile) as fh:
for ln in fh:
if random.random() > sample:
continue
i = ln.rstrip().split(" ")
y, m, d = i[4].split("-")
e = {
"pakcode": i[0],
"jd": int(i[1][3:]),
"hk": int(i[2][3:]),
"masak": int(i[3][6:]),
"year": int(y),
"month": int(m),
"day": int(d),
"iswanphra": i[5] == 't',
}
if minjd and e["jd"] < minjd:
continue
yield e
class Test_PakDate(unittest.TestCase):
def test_jd_pre_epoch(self):
with self.assertRaises(ValueError):
# pre-epoch jd
|
RUN_PERCENT = 10
if os.environ.get("RUN_PERCENT"):
RUN_PERCENT = int(os.environ.get("RUN_PERCENT"))
if RUN_PERCENT > 100:
RUN_PERCENT = 100
RUN_PERCENT /= 100
for datafile in ("pak.data", "pak.min.data"):
datafile = os.path.join(pathlib.Path(__file__).parent.resolve(), "data", datafile)
if os.path.exists(datafile):
break
else:
raise FileNotFoundError("Pak data file not found.")
random.seed()
def read_test_date(sample=1, minjd=None):
with open(datafile) as fh:
for ln in fh:
if random.random() > sample:
continue
i = ln.rstrip().split(" ")
y, m, d = i[4].split("-")
e = {
"pakcode": i[0],
"jd": int(i[1][3:]),
"hk": int(i[2][3:]),
"masak": int(i[3][6:]),
"year": int(y),
"month": int(m),
"day": int(d),
"iswanphra": i[5] == 't',
}
if minjd and e["jd"] < minjd:
continue
yield e
class Test_PakDate(unittest.TestCase):
def test_jd_pre_epoch(self):
with self.assertRaises(ValueError):
# pre-epoch jd | p = PakDate(jd=PAK_JULIAN_DAY_OFFSET - 5) | 2 | 2023-11-18 21:14:01+00:00 | 12k |
CmosWolf1/Code_implementation_for_paper_SKZC | demo.py | [
{
"identifier": "VisualizationDemo",
"path": "diffusiondet/predictor.py",
"snippet": "class VisualizationDemo(object):\n def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):\n \"\"\"\n Args:\n cfg (CfgNode):\n instance_mode (ColorMode):\n ... | import argparse
import glob
import multiprocessing as mp
import numpy as np
import os
import tempfile
import time
import warnings
import cv2
import tqdm
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from diffusiondet.predictor import VisualizationDemo
from diffusiondet import DiffusionDetDatasetMapper, add_diffusiondet_config, DiffusionDetWithTTA
from diffusiondet.util.model_ema import add_model_ema_configs, may_build_model_ema, may_get_ema_checkpointer, EMAHook, \
apply_model_ema_and_restore, EMADetectionCheckpointer | 7,250 | # Copyright (c) Facebook, Inc. and its affiliates.
# constants
WINDOW_NAME = "COCO detections"
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
# To use demo for Panoptic-DeepLab, please uncomment the following two lines.
# from detectron2.projects.panoptic_deeplab import add_panoptic_deeplab_config # noqa
# add_panoptic_deeplab_config(cfg)
add_diffusiondet_config(cfg)
add_model_ema_configs(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 demo for builtin configs")
parser.add_argument(
"--config-file",
default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
def test_opencv_video_format(codec, file_ext):
with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
filename = os.path.join(dir, "test_file" + file_ext)
writer = cv2.VideoWriter(
filename=filename,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(30),
frameSize=(10, 10),
isColor=True,
)
[writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
writer.release()
if os.path.isfile(filename):
return True
return False
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
| # Copyright (c) Facebook, Inc. and its affiliates.
# constants
WINDOW_NAME = "COCO detections"
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
# To use demo for Panoptic-DeepLab, please uncomment the following two lines.
# from detectron2.projects.panoptic_deeplab import add_panoptic_deeplab_config # noqa
# add_panoptic_deeplab_config(cfg)
add_diffusiondet_config(cfg)
add_model_ema_configs(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 demo for builtin configs")
parser.add_argument(
"--config-file",
default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument(
"--input",
nargs="+",
help="A list of space separated input images; "
"or a single glob pattern such as 'directory/*.jpg'",
)
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.5,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
def test_opencv_video_format(codec, file_ext):
with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
filename = os.path.join(dir, "test_file" + file_ext)
writer = cv2.VideoWriter(
filename=filename,
fourcc=cv2.VideoWriter_fourcc(*codec),
fps=float(30),
frameSize=(10, 10),
isColor=True,
)
[writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
writer.release()
if os.path.isfile(filename):
return True
return False
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
setup_logger(name="fvcore")
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
| demo = VisualizationDemo(cfg) | 0 | 2023-11-17 02:37:37+00:00 | 12k |
fg320/DEASC | examples/08_3x3_farm_wso_TURBO.py | [
{
"identifier": "WfModel",
"path": "deasc/wf_model.py",
"snippet": "class WfModel:\n \"\"\"\n Class for wind farm modelling (Interface setup but not limited to FLORIS\n framework).\n \"\"\"\n\n def __init__(self, input_file, path):\n \"\"\"\n Initialise wind farm object by p... | import numpy as np
from deasc import WfModel
from deasc import WSOpt | 8,358 |
"""
This example shows wake steering optimisation on a 3x3 wind farm of NREL 5 MW turbines.
The initial conditions are 0 deg for all wind turbines. The optimisation variables are
all turbines, except the last, most downstream row. The optimiser is TURBO.
"""
# Input file definition
path = "./inputs/"
input_file = "gch.yaml"
# Initialise wind farm model
|
"""
This example shows wake steering optimisation on a 3x3 wind farm of NREL 5 MW turbines.
The initial conditions are 0 deg for all wind turbines. The optimisation variables are
all turbines, except the last, most downstream row. The optimiser is TURBO.
"""
# Input file definition
path = "./inputs/"
input_file = "gch.yaml"
# Initialise wind farm model | wf_model = WfModel(input_file, path) | 0 | 2023-11-10 18:13:27+00:00 | 12k |
Subsets and Splits
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have consistent code formatting levels across multiple scales (2k, 4k, 8k, 12k) and reveals the structured formatting patterns within these repositories.
SQL Console for tianyang/repobench_python_v1.1
Compares cross-file and in-file code structure patterns across different complexity levels, revealing how file organization strategies vary with code size and potentially informing better code architecture decisions.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have complete performance data across all seven code complexity levels, revealing consistent benchmarking patterns across different code sizes.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that contain all 7 distinct quality levels (2k through 32k), revealing complete datasets that might be useful for comprehensive analysis.