repo_name stringlengths 7 71 | file_path stringlengths 5 118 | context list | import_statement stringlengths 45 12.5k | token_num int64 641 99.4k | cropped_code stringlengths 44 17k | all_code stringlengths 43 754k | next_line stringlengths 2 330 | gold_snippet_index int64 0 68 | created_at stringlengths 25 25 | level stringclasses 9 values |
|---|---|---|---|---|---|---|---|---|---|---|
DLYuanGod/TinyGPT-V | eval_ref.py | [
{
"identifier": "Config",
"path": "minigpt4/common/config.py",
"snippet": "class Config:\n def __init__(self, args):\n self.config = {}\n\n self.args = args\n\n # Register the config and configuration for setup\n registry.register(\"configuration\", self)\n\n user_c... | import os
import re
import json
import argparse
import random
import numpy as np
import torch
from collections import defaultdict
from PIL import Image
from tqdm import tqdm
from torch.utils.data import DataLoader
from minigpt4.common.config import Config
from minigpt4.common.eval_utils import prepare_texts, init_model, eval_parser, computeIoU
from minigpt4.conversation.conversation import CONV_VISION_minigptv2
from minigpt4.datasets.datasets.coco_caption import RefCOCOEvalData | 3,340 |
def list_of_str(arg):
return list(map(str, arg.split(',')))
parser = eval_parser()
parser.add_argument("--dataset", type=list_of_str, default='refcoco', help="dataset to evaluate")
parser.add_argument("--res", type=float, default=100.0, help="resolution used in refcoco")
parser.add_argument("--resample", action='store_true', help="resolution used in refcoco")
args = parser.parse_args()
cfg = Config(args)
eval_dict = {'refcoco': ['val','testA','testB'],
'refcoco+': ['val','testA','testB'],
'refcocog': ['val','testA','testB']}
model, vis_processor = init_model(args)
model.eval()
CONV_VISION = CONV_VISION_minigptv2
conv_temp = CONV_VISION.copy()
conv_temp.system = ""
model.eval()
save_path = cfg.run_cfg.save_path
for dataset in args.dataset:
for split in eval_dict[dataset]:
eval_file_path = cfg.evaluation_datasets_cfg[dataset]["eval_file_path"]
img_path = cfg.evaluation_datasets_cfg[dataset]["img_path"]
batch_size = cfg.evaluation_datasets_cfg[dataset]["batch_size"]
max_new_tokens = cfg.evaluation_datasets_cfg[dataset]["max_new_tokens"]
# with open(os.path.join(eval_file_path,f"{dataset}/{dataset}_{split}.json"), 'r') as f:
# refcoco = json.load(f)
print(eval_file_path)
with open(eval_file_path,'r') as f:
refcoco = json.load(f)
#print("1111 here")
#print(img_path)
#print(refcoco)
|
def list_of_str(arg):
return list(map(str, arg.split(',')))
parser = eval_parser()
parser.add_argument("--dataset", type=list_of_str, default='refcoco', help="dataset to evaluate")
parser.add_argument("--res", type=float, default=100.0, help="resolution used in refcoco")
parser.add_argument("--resample", action='store_true', help="resolution used in refcoco")
args = parser.parse_args()
cfg = Config(args)
eval_dict = {'refcoco': ['val','testA','testB'],
'refcoco+': ['val','testA','testB'],
'refcocog': ['val','testA','testB']}
model, vis_processor = init_model(args)
model.eval()
CONV_VISION = CONV_VISION_minigptv2
conv_temp = CONV_VISION.copy()
conv_temp.system = ""
model.eval()
save_path = cfg.run_cfg.save_path
for dataset in args.dataset:
for split in eval_dict[dataset]:
eval_file_path = cfg.evaluation_datasets_cfg[dataset]["eval_file_path"]
img_path = cfg.evaluation_datasets_cfg[dataset]["img_path"]
batch_size = cfg.evaluation_datasets_cfg[dataset]["batch_size"]
max_new_tokens = cfg.evaluation_datasets_cfg[dataset]["max_new_tokens"]
# with open(os.path.join(eval_file_path,f"{dataset}/{dataset}_{split}.json"), 'r') as f:
# refcoco = json.load(f)
print(eval_file_path)
with open(eval_file_path,'r') as f:
refcoco = json.load(f)
#print("1111 here")
#print(img_path)
#print(refcoco)
| data = RefCOCOEvalData(refcoco, vis_processor, img_path) | 6 | 2023-12-28 05:47:18+00:00 | 4k |
ali-vilab/dreamtalk | core/networks/disentangle_decoder.py | [
{
"identifier": "PositionalEncoding",
"path": "core/networks/transformer.py",
"snippet": "class PositionalEncoding(nn.Module):\r\n\r\n def __init__(self, d_hid, n_position=200):\r\n super(PositionalEncoding, self).__init__()\r\n\r\n # Not a parameter\r\n self.register_buffer('pos... | import torch
import sys
from torch import nn
from .transformer import (
PositionalEncoding,
TransformerDecoderLayer,
TransformerDecoder,
)
from core.networks.dynamic_fc_decoder import DynamicFCDecoderLayer, DynamicFCDecoder
from core.utils import _reset_parameters
from configs.default import get_cfg_defaults | 3,260 |
def get_decoder_network(
network_type,
d_model,
nhead,
dim_feedforward,
dropout,
activation,
normalize_before,
num_decoder_layers,
return_intermediate_dec,
dynamic_K,
dynamic_ratio,
):
decoder = None
if network_type == "TransformerDecoder":
decoder_layer = TransformerDecoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
)
norm = nn.LayerNorm(d_model)
decoder = TransformerDecoder(
decoder_layer,
num_decoder_layers,
norm,
return_intermediate_dec,
)
elif network_type == "DynamicFCDecoder":
d_style = d_model
|
def get_decoder_network(
network_type,
d_model,
nhead,
dim_feedforward,
dropout,
activation,
normalize_before,
num_decoder_layers,
return_intermediate_dec,
dynamic_K,
dynamic_ratio,
):
decoder = None
if network_type == "TransformerDecoder":
decoder_layer = TransformerDecoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
)
norm = nn.LayerNorm(d_model)
decoder = TransformerDecoder(
decoder_layer,
num_decoder_layers,
norm,
return_intermediate_dec,
)
elif network_type == "DynamicFCDecoder":
d_style = d_model | decoder_layer = DynamicFCDecoderLayer( | 3 | 2023-12-28 05:39:31+00:00 | 4k |
jiawei-ren/dreamgaussian4d | diffusers/src/diffusers/utils/testing_utils.py | [
{
"identifier": "BACKENDS_MAPPING",
"path": "diffusers/src/diffusers/utils/import_utils.py",
"snippet": "BACKENDS_MAPPING = OrderedDict(\n [\n (\"bs4\", (is_bs4_available, BS4_IMPORT_ERROR)),\n (\"flax\", (is_flax_available, FLAX_IMPORT_ERROR)),\n (\"inflect\", (is_inflect_availa... | import functools
import importlib
import inspect
import io
import logging
import multiprocessing
import os
import random
import re
import struct
import sys
import tempfile
import time
import unittest
import urllib.parse
import numpy as np
import PIL.Image
import PIL.ImageOps
import requests
import torch
import cv2
from contextlib import contextmanager
from distutils.util import strtobool
from io import BytesIO, StringIO
from pathlib import Path
from typing import List, Optional, Union
from numpy.linalg import norm
from packaging import version
from .import_utils import (
BACKENDS_MAPPING,
is_compel_available,
is_flax_available,
is_note_seq_available,
is_onnx_available,
is_opencv_available,
is_peft_available,
is_torch_available,
is_torch_version,
is_torchsde_available,
is_transformers_available,
)
from .logging import get_logger
from _pytest.config import create_terminal_writer | 2,574 |
tensor_str = str(tensor.detach().cpu().flatten().to(torch.float32)).replace("\n", "")
# format is usually:
# expected_slice = np.array([-0.5713, -0.3018, -0.9814, 0.04663, -0.879, 0.76, -1.734, 0.1044, 1.161])
output_str = tensor_str.replace("tensor", f"{expected_tensor_name} = np.array")
test_file, test_class, test_fn = test_name.split("::")
test_fn = test_fn.split()[0]
with open(filename, "a") as f:
print(";".join([test_file, test_class, test_fn, output_str]), file=f)
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False)
def floats_tensor(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = global_rng
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.random() * scale)
return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous()
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def nightly(test_case):
"""
Decorator marking a test that runs nightly in the diffusers CI.
Slow tests are skipped by default. Set the RUN_NIGHTLY environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_torch_2(test_case):
"""
Decorator marking a test that requires PyTorch 2. These tests are skipped when it isn't installed.
"""
return unittest.skipUnless(is_torch_available() and is_torch_version(">=", "2.0.0"), "test requires PyTorch 2")(
test_case
)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(is_torch_available() and torch_device == "cuda", "test requires PyTorch+CUDA")(
test_case
)
def skip_mps(test_case):
"""Decorator marking a test to skip if torch_device is 'mps'"""
return unittest.skipUnless(torch_device != "mps", "test requires non 'mps' device")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
"""
|
global_rng = random.Random()
logger = get_logger(__name__)
_required_peft_version = is_peft_available() and version.parse(
version.parse(importlib.metadata.version("peft")).base_version
) > version.parse("0.5")
_required_transformers_version = is_transformers_available() and version.parse(
version.parse(importlib.metadata.version("transformers")).base_version
) > version.parse("4.33")
USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version
if is_torch_available():
if "DIFFUSERS_TEST_DEVICE" in os.environ:
torch_device = os.environ["DIFFUSERS_TEST_DEVICE"]
try:
# try creating device to see if provided device is valid
_ = torch.device(torch_device)
except RuntimeError as e:
raise RuntimeError(
f"Unknown testing device specified by environment variable `DIFFUSERS_TEST_DEVICE`: {torch_device}"
) from e
logger.info(f"torch_device overrode to {torch_device}")
else:
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
is_torch_higher_equal_than_1_12 = version.parse(
version.parse(torch.__version__).base_version
) >= version.parse("1.12")
if is_torch_higher_equal_than_1_12:
# Some builds of torch 1.12 don't have the mps backend registered. See #892 for more details
mps_backend_registered = hasattr(torch.backends, "mps")
torch_device = "mps" if (mps_backend_registered and torch.backends.mps.is_available()) else torch_device
def torch_all_close(a, b, *args, **kwargs):
if not is_torch_available():
raise ValueError("PyTorch needs to be installed to use this function.")
if not torch.allclose(a, b, *args, **kwargs):
assert False, f"Max diff is absolute {(a - b).abs().max()}. Diff tensor is {(a - b).abs()}."
return True
def numpy_cosine_similarity_distance(a, b):
similarity = np.dot(a, b) / (norm(a) * norm(b))
distance = 1.0 - similarity.mean()
return distance
def print_tensor_test(tensor, filename="test_corrections.txt", expected_tensor_name="expected_slice"):
test_name = os.environ.get("PYTEST_CURRENT_TEST")
if not torch.is_tensor(tensor):
tensor = torch.from_numpy(tensor)
tensor_str = str(tensor.detach().cpu().flatten().to(torch.float32)).replace("\n", "")
# format is usually:
# expected_slice = np.array([-0.5713, -0.3018, -0.9814, 0.04663, -0.879, 0.76, -1.734, 0.1044, 1.161])
output_str = tensor_str.replace("tensor", f"{expected_tensor_name} = np.array")
test_file, test_class, test_fn = test_name.split("::")
test_fn = test_fn.split()[0]
with open(filename, "a") as f:
print(";".join([test_file, test_class, test_fn, output_str]), file=f)
def get_tests_dir(append_path=None):
"""
Args:
append_path: optional path to append to the tests dir path
Return:
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
joined after the `tests` dir the former is provided.
"""
# this function caller's __file__
caller__file__ = inspect.stack()[1][1]
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
while not tests_dir.endswith("tests"):
tests_dir = os.path.dirname(tests_dir)
if append_path:
return os.path.join(tests_dir, append_path)
else:
return tests_dir
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False)
def floats_tensor(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = global_rng
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.random() * scale)
return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous()
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
def nightly(test_case):
"""
Decorator marking a test that runs nightly in the diffusers CI.
Slow tests are skipped by default. Set the RUN_NIGHTLY environment variable to a truthy value to run them.
"""
return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case)
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed.
"""
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
def require_torch_2(test_case):
"""
Decorator marking a test that requires PyTorch 2. These tests are skipped when it isn't installed.
"""
return unittest.skipUnless(is_torch_available() and is_torch_version(">=", "2.0.0"), "test requires PyTorch 2")(
test_case
)
def require_torch_gpu(test_case):
"""Decorator marking a test that requires CUDA and PyTorch."""
return unittest.skipUnless(is_torch_available() and torch_device == "cuda", "test requires PyTorch+CUDA")(
test_case
)
def skip_mps(test_case):
"""Decorator marking a test to skip if torch_device is 'mps'"""
return unittest.skipUnless(torch_device != "mps", "test requires non 'mps' device")(test_case)
def require_flax(test_case):
"""
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
""" | return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case) | 2 | 2023-12-28 08:17:40+00:00 | 4k |
oppo-us-research/SpacetimeGaussians | thirdparty/gaussian_splatting/scene/ourslite.py | [
{
"identifier": "getcolormodel",
"path": "helper_model.py",
"snippet": "def getcolormodel(rgbfuntion):\n if rgbfuntion == \"sandwich\":\n rgbdecoder = Sandwich(9,3)\n \n elif rgbfuntion == \"sandwichnoact\":\n rgbdecoder = Sandwichnoact(9,3)\n else :\n return None \n ... | import torch
import numpy as np
import os
from utils.general_utils import inverse_sigmoid, get_expon_lr_func, build_rotation
from torch import nn
from utils.system_utils import mkdir_p
from plyfile import PlyData, PlyElement
from simple_knn._C import distCUDA2
from utils.graphics_utils import BasicPointCloud
from utils.general_utils import strip_symmetric, build_scaling_rotation, update_quaternion
from helper_model import getcolormodel, interpolate_point, interpolate_partuse,interpolate_pointv3 | 2,770 | self._rotation = torch.empty(0)
self._opacity = torch.empty(0)
self.max_radii2D = torch.empty(0)
self.xyz_gradient_accum = torch.empty(0)
self.denom = torch.empty(0)
self._motion = torch.empty(0)
self.optimizer = None
self.percent_dense = 0
self.spatial_lr_scale = 0
self._omega = torch.empty(0)
self.rgbdecoder = getcolormodel(rgbfuntion)
self.setup_functions()
self.delta_t = None
self.omegamask = None
self.maskforems = None
self.distancetocamera = None
self.trbfslinit = None
self.ts = None
self.trbfoutput = None
self.preprocesspoints = False
self.addsphpointsscale = 0.8
self.maxz, self.minz = 0.0 , 0.0
self.maxy, self.miny = 0.0 , 0.0
self.maxx, self.minx = 0.0 , 0.0
self.computedtrbfscale = None
self.computedopacity = None
self.raystart = 0.7
def capture(self):
return (
self.active_sh_degree,
self._xyz,
self._features_dc,
self._scaling,
self._rotation,
self._opacity,
self.max_radii2D,
self.xyz_gradient_accum,
self.denom,
self.optimizer.state_dict(),
self.spatial_lr_scale,
)
def restore(self, model_args, training_args):
(self.active_sh_degree,
self._xyz,
self._features_dc,
self._scaling,
self._rotation,
self._opacity,
self.max_radii2D,
xyz_gradient_accum,
denom,
opt_dict,
self.spatial_lr_scale) = model_args
self.training_setup(training_args)
self.xyz_gradient_accum = xyz_gradient_accum
self.denom = denom
self.optimizer.load_state_dict(opt_dict)
@property
def get_scaling(self):
return self.scaling_activation(self._scaling)
def get_rotation(self, delta_t):
rotation = self._rotation + delta_t*self._omega
self.delta_t = delta_t
return self.rotation_activation(rotation)
@property
def get_xyz(self):
return self._xyz
@property
def get_trbfcenter(self):
return self._trbf_center
@property
def get_trbfscale(self):
return self._trbf_scale
def get_features(self, deltat):
return self._features_dc
@property
def get_opacity(self):
return self.opacity_activation(self._opacity)
def get_covariance(self, scaling_modifier = 1):
return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation)
def oneupSHdegree(self):
if self.active_sh_degree < self.max_sh_degree:
self.active_sh_degree += 1
def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):
if self.preprocesspoints == 3:
pcd = interpolate_point(pcd, 4)
elif self.preprocesspoints == 4:
pcd = interpolate_point(pcd, 2)
elif self.preprocesspoints == 5:
pcd = interpolate_point(pcd, 6)
elif self.preprocesspoints == 6:
pcd = interpolate_point(pcd, 8)
elif self.preprocesspoints == 7:
pcd = interpolate_point(pcd, 16)
elif self.preprocesspoints == 8:
pcd = interpolate_pointv3(pcd, 4)
elif self.preprocesspoints == 14:
| #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact george.drettakis@inria.fr
#
class GaussianModel:
def setup_functions(self):
def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
L = build_scaling_rotation(scaling_modifier * scaling, rotation)
actual_covariance = L @ L.transpose(1, 2)
symm = strip_symmetric(actual_covariance)
return symm
self.scaling_activation = torch.exp
self.scaling_inverse_activation = torch.log
self.covariance_activation = build_covariance_from_scaling_rotation
self.opacity_activation = torch.sigmoid
self.inverse_opacity_activation = inverse_sigmoid
self.rotation_activation = torch.nn.functional.normalize
#self.featureact = torch.sigmoid
def __init__(self, sh_degree : int, rgbfuntion="rgbv1"):
self.active_sh_degree = 0
self.max_sh_degree = sh_degree
self._xyz = torch.empty(0)
self._features_dc = torch.empty(0)
# self._features_rest = torch.empty(0)
self._scaling = torch.empty(0)
self._rotation = torch.empty(0)
self._opacity = torch.empty(0)
self.max_radii2D = torch.empty(0)
self.xyz_gradient_accum = torch.empty(0)
self.denom = torch.empty(0)
self._motion = torch.empty(0)
self.optimizer = None
self.percent_dense = 0
self.spatial_lr_scale = 0
self._omega = torch.empty(0)
self.rgbdecoder = getcolormodel(rgbfuntion)
self.setup_functions()
self.delta_t = None
self.omegamask = None
self.maskforems = None
self.distancetocamera = None
self.trbfslinit = None
self.ts = None
self.trbfoutput = None
self.preprocesspoints = False
self.addsphpointsscale = 0.8
self.maxz, self.minz = 0.0 , 0.0
self.maxy, self.miny = 0.0 , 0.0
self.maxx, self.minx = 0.0 , 0.0
self.computedtrbfscale = None
self.computedopacity = None
self.raystart = 0.7
def capture(self):
return (
self.active_sh_degree,
self._xyz,
self._features_dc,
self._scaling,
self._rotation,
self._opacity,
self.max_radii2D,
self.xyz_gradient_accum,
self.denom,
self.optimizer.state_dict(),
self.spatial_lr_scale,
)
def restore(self, model_args, training_args):
(self.active_sh_degree,
self._xyz,
self._features_dc,
self._scaling,
self._rotation,
self._opacity,
self.max_radii2D,
xyz_gradient_accum,
denom,
opt_dict,
self.spatial_lr_scale) = model_args
self.training_setup(training_args)
self.xyz_gradient_accum = xyz_gradient_accum
self.denom = denom
self.optimizer.load_state_dict(opt_dict)
@property
def get_scaling(self):
return self.scaling_activation(self._scaling)
def get_rotation(self, delta_t):
rotation = self._rotation + delta_t*self._omega
self.delta_t = delta_t
return self.rotation_activation(rotation)
@property
def get_xyz(self):
return self._xyz
@property
def get_trbfcenter(self):
return self._trbf_center
@property
def get_trbfscale(self):
return self._trbf_scale
def get_features(self, deltat):
return self._features_dc
@property
def get_opacity(self):
return self.opacity_activation(self._opacity)
def get_covariance(self, scaling_modifier = 1):
return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation)
def oneupSHdegree(self):
if self.active_sh_degree < self.max_sh_degree:
self.active_sh_degree += 1
def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):
if self.preprocesspoints == 3:
pcd = interpolate_point(pcd, 4)
elif self.preprocesspoints == 4:
pcd = interpolate_point(pcd, 2)
elif self.preprocesspoints == 5:
pcd = interpolate_point(pcd, 6)
elif self.preprocesspoints == 6:
pcd = interpolate_point(pcd, 8)
elif self.preprocesspoints == 7:
pcd = interpolate_point(pcd, 16)
elif self.preprocesspoints == 8:
pcd = interpolate_pointv3(pcd, 4)
elif self.preprocesspoints == 14: | pcd = interpolate_partuse(pcd, 2) | 2 | 2023-12-28 04:16:32+00:00 | 4k |
Meituan-AutoML/MobileVLM | scripts/inference.py | [
{
"identifier": "load_pretrained_model",
"path": "mobilevlm/model/mobilevlm.py",
"snippet": "def load_pretrained_model(model_path, load_8bit=False, load_4bit=False, device_map=\"auto\", device=\"cuda\"):\n\n from mobilevlm.model.mobilellama import MobileLlamaForCausalLM\n\n kwargs = {\"device_map\... | import sys
import torch
import argparse
from PIL import Image
from pathlib import Path
from mobilevlm.model.mobilevlm import load_pretrained_model
from mobilevlm.conversation import conv_templates, SeparatorStyle
from mobilevlm.utils import disable_torch_init, process_images, tokenizer_image_token, KeywordsStoppingCriteria
from mobilevlm.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN | 1,683 |
sys.path.append(str(Path(__file__).parent.parent.resolve()))
def inference_once(args):
disable_torch_init()
model_name = args.model_path.split('/')[-1]
tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path)
images = [Image.open(args.image_file).convert("RGB")]
|
sys.path.append(str(Path(__file__).parent.parent.resolve()))
def inference_once(args):
disable_torch_init()
model_name = args.model_path.split('/')[-1]
tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path)
images = [Image.open(args.image_file).convert("RGB")] | images_tensor = process_images(images, image_processor, model.config).to(model.device, dtype=torch.float16) | 3 | 2023-12-29 03:35:49+00:00 | 4k |
kinggongzilla/ai-clone-whatsapp | utils/config_utils.py | [
{
"identifier": "datasets",
"path": "configs/datasets.py",
"snippet": "class custom_dataset:"
},
{
"identifier": "lora_config",
"path": "configs/peft.py",
"snippet": "class lora_config:\n r: int=8\n lora_alpha: int=32\n target_modules: List[str] = field(default_factory=lambda... | import inspect
import torch.distributed as dist
from dataclasses import asdict
from torch.utils.data import DistributedSampler
from peft import (
LoraConfig,
AdaptionPromptConfig,
PrefixTuningConfig,
)
from transformers import default_data_collator
from transformers.data import DataCollatorForSeq2Seq
from configs import datasets, lora_config, llama_adapter_config, prefix_config, train_config
from data.sampler import LengthBasedBatchSampler, DistributedLengthBasedBatchSampler
from utils.dataset_utils import DATASET_PREPROC | 1,825 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
def update_config(config, **kwargs):
if isinstance(config, (tuple, list)):
for c in config:
update_config(c, **kwargs)
else:
for k, v in kwargs.items():
if hasattr(config, k):
setattr(config, k, v)
elif "." in k:
# allow --some_config.some_param=True
config_name, param_name = k.split(".")
if type(config).__name__ == config_name:
if hasattr(config, param_name):
setattr(config, param_name, v)
else:
# In case of specialized config we can warm user
print(f"Warning: {config_name} does not accept parameter: {k}")
elif isinstance(config, train_config):
print(f"Warning: unknown parameter {k}")
def generate_peft_config(train_config, kwargs):
configs = (lora_config, llama_adapter_config, prefix_config)
peft_configs = (LoraConfig, AdaptionPromptConfig, PrefixTuningConfig)
names = tuple(c.__name__.rstrip("_config") for c in configs)
assert train_config.peft_method in names, f"Peft config not found: {train_config.peft_method}"
config = configs[names.index(train_config.peft_method)]()
update_config(config, **kwargs)
params = asdict(config)
peft_config = peft_configs[names.index(train_config.peft_method)](**params)
return peft_config
def generate_dataset_config(train_config, kwargs):
names = tuple(DATASET_PREPROC.keys())
assert train_config.dataset in names, f"Unknown dataset: {train_config.dataset}"
dataset_config = {k:v for k, v in inspect.getmembers(datasets)}[train_config.dataset]()
update_config(dataset_config, **kwargs)
return dataset_config
def get_dataloader_kwargs(train_config, dataset, tokenizer, mode):
kwargs = {}
batch_size = train_config.batch_size_training if mode=="train" else train_config.val_batch_size
if train_config.batching_strategy == "padding":
if train_config.enable_fsdp:
kwargs["batch_sampler"] = DistributedLengthBasedBatchSampler(
dataset,
batch_size=batch_size,
rank=dist.get_rank(),
num_replicas=dist.get_world_size(),
shuffle=mode=="train",
)
else:
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
def update_config(config, **kwargs):
if isinstance(config, (tuple, list)):
for c in config:
update_config(c, **kwargs)
else:
for k, v in kwargs.items():
if hasattr(config, k):
setattr(config, k, v)
elif "." in k:
# allow --some_config.some_param=True
config_name, param_name = k.split(".")
if type(config).__name__ == config_name:
if hasattr(config, param_name):
setattr(config, param_name, v)
else:
# In case of specialized config we can warm user
print(f"Warning: {config_name} does not accept parameter: {k}")
elif isinstance(config, train_config):
print(f"Warning: unknown parameter {k}")
def generate_peft_config(train_config, kwargs):
configs = (lora_config, llama_adapter_config, prefix_config)
peft_configs = (LoraConfig, AdaptionPromptConfig, PrefixTuningConfig)
names = tuple(c.__name__.rstrip("_config") for c in configs)
assert train_config.peft_method in names, f"Peft config not found: {train_config.peft_method}"
config = configs[names.index(train_config.peft_method)]()
update_config(config, **kwargs)
params = asdict(config)
peft_config = peft_configs[names.index(train_config.peft_method)](**params)
return peft_config
def generate_dataset_config(train_config, kwargs):
names = tuple(DATASET_PREPROC.keys())
assert train_config.dataset in names, f"Unknown dataset: {train_config.dataset}"
dataset_config = {k:v for k, v in inspect.getmembers(datasets)}[train_config.dataset]()
update_config(dataset_config, **kwargs)
return dataset_config
def get_dataloader_kwargs(train_config, dataset, tokenizer, mode):
kwargs = {}
batch_size = train_config.batch_size_training if mode=="train" else train_config.val_batch_size
if train_config.batching_strategy == "padding":
if train_config.enable_fsdp:
kwargs["batch_sampler"] = DistributedLengthBasedBatchSampler(
dataset,
batch_size=batch_size,
rank=dist.get_rank(),
num_replicas=dist.get_world_size(),
shuffle=mode=="train",
)
else: | kwargs["batch_sampler"] = LengthBasedBatchSampler(dataset, batch_size, drop_last=True, shuffle=mode=="train") | 5 | 2023-12-28 00:02:08+00:00 | 4k |
FoundationVision/UniRef | projects/UniRef/uniref/models/deformable_detr/deformable_detr.py | [
{
"identifier": "box_ops",
"path": "projects/UniRef/uniref/util/box_ops.py",
"snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef multi_box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef generalized_multi_box_iou(boxes1, boxes2):\ndef... | import torch
import torch.nn.functional as F
import math
import copy
from torch import nn
from ...util import box_ops
from ...util.misc import (NestedTensor, nested_tensor_from_tensor_list,
accuracy, get_world_size, interpolate,
is_dist_avail_and_initialized, inverse_sigmoid)
from .backbone import build_backbone
from .matcher import build_matcher | 3,201 | def __init__(self, cfg, backbone, transformer, num_classes, num_queries, num_feature_levels,
aux_loss=True, with_box_refine=False, two_stage=False, mixed_selection=False, use_iou_branch=False):
""" Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of object classes
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
DETR can detect in a single image. For COCO, we recommend 100 queries.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
with_box_refine: iterative bounding box refinement
two_stage: two-stage Deformable DETR
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.class_embed = nn.Linear(hidden_dim, num_classes)
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
if use_iou_branch:
self.iou_head = nn.Linear(hidden_dim, 1)
self.num_feature_levels = num_feature_levels
if not two_stage:
self.query_embed = nn.Embedding(num_queries, hidden_dim*2)
elif mixed_selection:
self.query_embed = nn.Embedding(num_queries, hidden_dim)
if num_feature_levels > 1:
num_backbone_outs = len(backbone.strides)
input_proj_list = []
for _ in range(num_backbone_outs):
in_channels = backbone.num_channels[_]
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
))
for _ in range(num_feature_levels - num_backbone_outs):
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(32, hidden_dim),
))
in_channels = hidden_dim
self.input_proj = nn.ModuleList(input_proj_list)
else:
self.input_proj = nn.ModuleList([
nn.Sequential(
nn.Conv2d(backbone.num_channels[0], hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
)])
self.backbone = backbone
self.aux_loss = aux_loss
self.with_box_refine = with_box_refine
self.two_stage = two_stage
prior_prob = 0.01
bias_value = -math.log((1 - prior_prob) / prior_prob)
if use_iou_branch:
self.iou_head.bias.data = torch.ones(1) * bias_value
self.class_embed.bias.data = torch.ones(num_classes) * bias_value
nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)
nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)
for proj in self.input_proj:
nn.init.xavier_uniform_(proj[0].weight, gain=1)
nn.init.constant_(proj[0].bias, 0)
# if two-stage, the last class_embed and bbox_embed is for region proposal generation
num_pred = (transformer.decoder.num_layers + 1) if two_stage else transformer.decoder.num_layers
if with_box_refine:
self.class_embed = _get_clones(self.class_embed, num_pred)
self.bbox_embed = _get_clones(self.bbox_embed, num_pred)
if use_iou_branch:
self.iou_head = _get_clones(self.iou_head, num_pred-1) if two_stage else _get_clones(self.iou_head, num_pred)
nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)
# hack implementation for iterative bounding box refinement
self.transformer.decoder.bbox_embed = self.bbox_embed
else:
nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)
self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])
self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])
if use_iou_branch:
self.iou_head = _get_clones(self.iou_head, num_pred-1) if two_stage else _get_clones(self.iou_head, num_pred)
self.transformer.decoder.bbox_embed = None
if two_stage:
# hack implementation for two-stage
self.transformer.decoder.class_embed = self.class_embed
for box_embed in self.bbox_embed:
nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0)
self.mixed_selection = mixed_selection
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_coord):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
return [{'pred_logits': a, 'pred_boxes': b}
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
class PostProcess(nn.Module):
""" This module converts the model's output into the format expected by the coco api"""
@torch.no_grad()
def forward(self, outputs, target_sizes):
""" Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = out_logits.sigmoid()
topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1)
scores = topk_values
topk_boxes = topk_indexes // out_logits.shape[2]
labels = topk_indexes % out_logits.shape[2]
| # ------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
"""
Deformable DETR model and criterion classes.
"""
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
class DeformableDETR(nn.Module):
""" This is the Deformable DETR module that performs object detection """
def __init__(self, cfg, backbone, transformer, num_classes, num_queries, num_feature_levels,
aux_loss=True, with_box_refine=False, two_stage=False, mixed_selection=False, use_iou_branch=False):
""" Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of object classes
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
DETR can detect in a single image. For COCO, we recommend 100 queries.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
with_box_refine: iterative bounding box refinement
two_stage: two-stage Deformable DETR
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.class_embed = nn.Linear(hidden_dim, num_classes)
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
if use_iou_branch:
self.iou_head = nn.Linear(hidden_dim, 1)
self.num_feature_levels = num_feature_levels
if not two_stage:
self.query_embed = nn.Embedding(num_queries, hidden_dim*2)
elif mixed_selection:
self.query_embed = nn.Embedding(num_queries, hidden_dim)
if num_feature_levels > 1:
num_backbone_outs = len(backbone.strides)
input_proj_list = []
for _ in range(num_backbone_outs):
in_channels = backbone.num_channels[_]
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
))
for _ in range(num_feature_levels - num_backbone_outs):
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(32, hidden_dim),
))
in_channels = hidden_dim
self.input_proj = nn.ModuleList(input_proj_list)
else:
self.input_proj = nn.ModuleList([
nn.Sequential(
nn.Conv2d(backbone.num_channels[0], hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
)])
self.backbone = backbone
self.aux_loss = aux_loss
self.with_box_refine = with_box_refine
self.two_stage = two_stage
prior_prob = 0.01
bias_value = -math.log((1 - prior_prob) / prior_prob)
if use_iou_branch:
self.iou_head.bias.data = torch.ones(1) * bias_value
self.class_embed.bias.data = torch.ones(num_classes) * bias_value
nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)
nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)
for proj in self.input_proj:
nn.init.xavier_uniform_(proj[0].weight, gain=1)
nn.init.constant_(proj[0].bias, 0)
# if two-stage, the last class_embed and bbox_embed is for region proposal generation
num_pred = (transformer.decoder.num_layers + 1) if two_stage else transformer.decoder.num_layers
if with_box_refine:
self.class_embed = _get_clones(self.class_embed, num_pred)
self.bbox_embed = _get_clones(self.bbox_embed, num_pred)
if use_iou_branch:
self.iou_head = _get_clones(self.iou_head, num_pred-1) if two_stage else _get_clones(self.iou_head, num_pred)
nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)
# hack implementation for iterative bounding box refinement
self.transformer.decoder.bbox_embed = self.bbox_embed
else:
nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)
self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])
self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])
if use_iou_branch:
self.iou_head = _get_clones(self.iou_head, num_pred-1) if two_stage else _get_clones(self.iou_head, num_pred)
self.transformer.decoder.bbox_embed = None
if two_stage:
# hack implementation for two-stage
self.transformer.decoder.class_embed = self.class_embed
for box_embed in self.bbox_embed:
nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0)
self.mixed_selection = mixed_selection
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_coord):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
return [{'pred_logits': a, 'pred_boxes': b}
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
class PostProcess(nn.Module):
""" This module converts the model's output into the format expected by the coco api"""
@torch.no_grad()
def forward(self, outputs, target_sizes):
""" Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = out_logits.sigmoid()
topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1)
scores = topk_values
topk_boxes = topk_indexes // out_logits.shape[2]
labels = topk_indexes % out_logits.shape[2] | boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) | 0 | 2023-12-22 13:31:33+00:00 | 4k |
xhuangcv/humannorm | threestudio/models/geometry/implicit_volume.py | [
{
"identifier": "BaseGeometry",
"path": "threestudio/models/geometry/base.py",
"snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Uni... | from dataclasses import dataclass, field
from threestudio.models.geometry.base import (
BaseGeometry,
BaseImplicitGeometry,
contract_to_unisphere,
)
from threestudio.models.networks import get_encoding, get_mlp
from threestudio.utils.ops import get_activation
from threestudio.utils.typing import *
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio | 3,023 |
@threestudio.register("implicit-volume")
class ImplicitVolume(BaseImplicitGeometry):
@dataclass
class Config(BaseImplicitGeometry.Config):
n_input_dims: int = 3
n_feature_dims: int = 3
density_activation: Optional[str] = "softplus"
density_bias: Union[float, str] = "blob_magic3d"
density_blob_scale: float = 10.0
density_blob_std: float = 0.5
pos_encoding_config: dict = field(
default_factory=lambda: {
"otype": "HashGrid",
"n_levels": 16,
"n_features_per_level": 2,
"log2_hashmap_size": 19,
"base_resolution": 16,
"per_level_scale": 1.447269237440378,
}
)
mlp_network_config: dict = field(
default_factory=lambda: {
"otype": "VanillaMLP",
"activation": "ReLU",
"output_activation": "none",
"n_neurons": 64,
"n_hidden_layers": 1,
}
)
normal_type: Optional[
str
] = "finite_difference" # in ['pred', 'finite_difference', 'finite_difference_laplacian']
finite_difference_normal_eps: float = 0.01
# automatically determine the threshold
isosurface_threshold: Union[float, str] = 25.0
cfg: Config
def configure(self) -> None:
super().configure()
|
@threestudio.register("implicit-volume")
class ImplicitVolume(BaseImplicitGeometry):
@dataclass
class Config(BaseImplicitGeometry.Config):
n_input_dims: int = 3
n_feature_dims: int = 3
density_activation: Optional[str] = "softplus"
density_bias: Union[float, str] = "blob_magic3d"
density_blob_scale: float = 10.0
density_blob_std: float = 0.5
pos_encoding_config: dict = field(
default_factory=lambda: {
"otype": "HashGrid",
"n_levels": 16,
"n_features_per_level": 2,
"log2_hashmap_size": 19,
"base_resolution": 16,
"per_level_scale": 1.447269237440378,
}
)
mlp_network_config: dict = field(
default_factory=lambda: {
"otype": "VanillaMLP",
"activation": "ReLU",
"output_activation": "none",
"n_neurons": 64,
"n_hidden_layers": 1,
}
)
normal_type: Optional[
str
] = "finite_difference" # in ['pred', 'finite_difference', 'finite_difference_laplacian']
finite_difference_normal_eps: float = 0.01
# automatically determine the threshold
isosurface_threshold: Union[float, str] = 25.0
cfg: Config
def configure(self) -> None:
super().configure() | self.encoding = get_encoding( | 3 | 2023-12-23 12:37:48+00:00 | 4k |
thooton/muse | main.py | [
{
"identifier": "API_KEYS",
"path": "config.py",
"snippet": "API_KEYS = json.loads(config.get(\"Gemini\", \"API_KEYS\", fallback=\"[]\"))"
},
{
"identifier": "OUT_DIR",
"path": "config.py",
"snippet": "OUT_DIR = config.get(\"Misc\", \"OUT_DIR\", fallback=\"\")"
},
{
"identifier":... | import os
import aiohttp
import asyncio
import secrets
import json
import huggingface_hub
import traceback
from tqdm import tqdm
from config import API_KEYS, OUT_DIR, COUNT_PER_FILE, BEGIN_INDEX, VERBOSE_EXCEPTIONS
from data_processing import TEXT_DATASET, CODE_DATASET, TEMPLATES, load_iter_from_spec
from llm_queries import llm_template_query | 1,960 |
def exc_fmt(exc):
if VERBOSE_EXCEPTIONS:
return "\n".join(traceback.format_exception(exc)).strip()
else:
return str(repr(exc))
async def main():
huggingface_hub.login(new_session=False)
hf_api = huggingface_hub.HfApi()
hf_user = hf_api.whoami()["name"]
repo_id = f"{hf_user}/muse_textbooks"
hf_api.create_repo(repo_id=repo_id, repo_type="dataset", exist_ok=True)
text_iter = iter([])
code_iter = iter([])
sess = aiohttp.ClientSession()
tasks = set()
lines = 0
try:
with open(os.path.join(OUT_DIR, "cur.jsonl"), "rb") as f:
lines = len(f.read().decode("utf-8").split("\n")) - 1
except Exception:
pass
pbar = tqdm(initial=lines, total=COUNT_PER_FILE)
outfile = open(os.path.join(OUT_DIR, "cur.jsonl"), "ab")
while True:
for api_key in API_KEYS:
|
def exc_fmt(exc):
if VERBOSE_EXCEPTIONS:
return "\n".join(traceback.format_exception(exc)).strip()
else:
return str(repr(exc))
async def main():
huggingface_hub.login(new_session=False)
hf_api = huggingface_hub.HfApi()
hf_user = hf_api.whoami()["name"]
repo_id = f"{hf_user}/muse_textbooks"
hf_api.create_repo(repo_id=repo_id, repo_type="dataset", exist_ok=True)
text_iter = iter([])
code_iter = iter([])
sess = aiohttp.ClientSession()
tasks = set()
lines = 0
try:
with open(os.path.join(OUT_DIR, "cur.jsonl"), "rb") as f:
lines = len(f.read().decode("utf-8").split("\n")) - 1
except Exception:
pass
pbar = tqdm(initial=lines, total=COUNT_PER_FILE)
outfile = open(os.path.join(OUT_DIR, "cur.jsonl"), "ab")
while True:
for api_key in API_KEYS: | template = TEMPLATES[secrets.randbits(64) % len(TEMPLATES)] | 7 | 2023-12-26 03:41:10+00:00 | 4k |
vithursant/nanoGPT_mlx | train.py | [
{
"identifier": "GPTConfig",
"path": "model.py",
"snippet": "class GPTConfig:\n block_size: int = 1024\n vocab_size: int = 50304 # GPT-2 vocab_size of 50257, padded up to nearest multiple of 64 for efficiency\n n_layer: int = 12\n n_head: int = 12\n n_embd: int = 768\n dropout: float =... | import os
import math
import time
import numpy as np
import mlx
import mlx.core as mx
import mlx.nn as nn
import mlx.optimizers as optim
from typing import List
from mlx.utils import tree_flatten, tree_map
from model import GPTConfig, GPT
from optimizer import AdamW
from tboard_utils import init_tensorboard, get_tensorboard | 2,850 |
# model
n_layer = 12
n_head = 12
n_embd = 768
dropout = 0.0 # for pretraining 0 is good, for finetuning try 0.1+
bias = False # do we use bias inside LayerNorm and Linear layers?
d_type = 'float32'
# adamw optimizer
learning_rate = 6.0e-4 # max learning rate
min_lr = 6.0e-5
num_iters = 600000 # total number of training iterations
warmup_pct = 0.1
warmup_iters = 2000
lr_decay_iters = 600000
weight_decay = 1e-1
beta1 = 0.9
beta2 = 0.95
meta_vocab_size = None
# dataset
dataset = 'openwebtext'
batch_size = 1
gradient_accumulation_steps = 512
context_size = 1024
# eval
eval_interval = 10
log_interval = 10
eval_only = False
out_dir = 'gpt2_openwebtext_pretrain'
# -----------------------------------------------------------------------------
config_keys = [k for k,v in globals().items() if not k.startswith('_') and isinstance(v, (int, float, bool, str))]
exec(open('configurator.py').read()) # overrides from command line or config file
config = {k: globals()[k] for k in config_keys} # will be useful for logging
# -----------------------------------------------------------------------------
# Load vocab and dataset:
# poor man's data loader
data_dir = os.path.join('data', dataset)
train_data = np.memmap(os.path.join(data_dir, 'train.bin'), dtype=np.uint16, mode='r')
val_data = np.memmap(os.path.join(data_dir, 'val.bin'), dtype=np.uint16, mode='r')
# initialize tboard logging:
os.makedirs(out_dir, exist_ok=True)
tboard_dir = os.path.join(out_dir, "tboard_log")
init_tensorboard(tboard_dir)
def get_batch(split):
data = train_data if split == 'train' else val_data
ix = np.random.randint(len(data) - context_size, size=(batch_size,))
x = mx.stack([(mx.array(data[i:i+context_size])) for i in ix]).astype(mx.int64)
y = mx.stack([(mx.array(data[i+1:i+1+context_size])) for i in ix]).astype(mx.int64)
return x, y
def print_loss(optimizer, iteration_count, average_loss, tic):
toc = time.perf_counter()
print(
f"iter {iteration_count}: train loss {average_loss:.3f}, "
f"it/sec {1.0 / (toc - tic):.3f}, "
f"lr {optimizer.learning_rate:.9f}"
)
return toc
def update_learning_rate(it):
if it < warmup_iters:
return learning_rate * it / warmup_iters
if it > lr_decay_iters:
return min_lr
decay_ratio = (it - warmup_iters) / (
lr_decay_iters - warmup_iters
)
assert 0 <= decay_ratio <= 1
coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio))
new_lr = min_lr + coeff * (learning_rate - min_lr)
return new_lr
def log_tboard_dict(log_dict, itr, pre, post=''):
writer = get_tensorboard()
for k, v in log_dict.items():
writer.add_scalar(f'{pre}/{k}{post}', v, itr)
def main():
# model init
model_args = dict(n_layer=n_layer, n_head=n_head, n_embd=n_embd, block_size=context_size,
bias=bias, vocab_size=None, dropout=dropout) # start with model_args from command line
# initialize model:
if meta_vocab_size is None:
print("defaulting to vocab_size of GPT-2 to 50304 (50257 rounded up for efficiency)")
model_args['vocab_size'] = meta_vocab_size if meta_vocab_size is not None else 50304
gptconf = GPTConfig(**model_args)
|
# model
n_layer = 12
n_head = 12
n_embd = 768
dropout = 0.0 # for pretraining 0 is good, for finetuning try 0.1+
bias = False # do we use bias inside LayerNorm and Linear layers?
d_type = 'float32'
# adamw optimizer
learning_rate = 6.0e-4 # max learning rate
min_lr = 6.0e-5
num_iters = 600000 # total number of training iterations
warmup_pct = 0.1
warmup_iters = 2000
lr_decay_iters = 600000
weight_decay = 1e-1
beta1 = 0.9
beta2 = 0.95
meta_vocab_size = None
# dataset
dataset = 'openwebtext'
batch_size = 1
gradient_accumulation_steps = 512
context_size = 1024
# eval
eval_interval = 10
log_interval = 10
eval_only = False
out_dir = 'gpt2_openwebtext_pretrain'
# -----------------------------------------------------------------------------
config_keys = [k for k,v in globals().items() if not k.startswith('_') and isinstance(v, (int, float, bool, str))]
exec(open('configurator.py').read()) # overrides from command line or config file
config = {k: globals()[k] for k in config_keys} # will be useful for logging
# -----------------------------------------------------------------------------
# Load vocab and dataset:
# poor man's data loader
data_dir = os.path.join('data', dataset)
train_data = np.memmap(os.path.join(data_dir, 'train.bin'), dtype=np.uint16, mode='r')
val_data = np.memmap(os.path.join(data_dir, 'val.bin'), dtype=np.uint16, mode='r')
# initialize tboard logging:
os.makedirs(out_dir, exist_ok=True)
tboard_dir = os.path.join(out_dir, "tboard_log")
init_tensorboard(tboard_dir)
def get_batch(split):
data = train_data if split == 'train' else val_data
ix = np.random.randint(len(data) - context_size, size=(batch_size,))
x = mx.stack([(mx.array(data[i:i+context_size])) for i in ix]).astype(mx.int64)
y = mx.stack([(mx.array(data[i+1:i+1+context_size])) for i in ix]).astype(mx.int64)
return x, y
def print_loss(optimizer, iteration_count, average_loss, tic):
toc = time.perf_counter()
print(
f"iter {iteration_count}: train loss {average_loss:.3f}, "
f"it/sec {1.0 / (toc - tic):.3f}, "
f"lr {optimizer.learning_rate:.9f}"
)
return toc
def update_learning_rate(it):
if it < warmup_iters:
return learning_rate * it / warmup_iters
if it > lr_decay_iters:
return min_lr
decay_ratio = (it - warmup_iters) / (
lr_decay_iters - warmup_iters
)
assert 0 <= decay_ratio <= 1
coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio))
new_lr = min_lr + coeff * (learning_rate - min_lr)
return new_lr
def log_tboard_dict(log_dict, itr, pre, post=''):
writer = get_tensorboard()
for k, v in log_dict.items():
writer.add_scalar(f'{pre}/{k}{post}', v, itr)
def main():
# model init
model_args = dict(n_layer=n_layer, n_head=n_head, n_embd=n_embd, block_size=context_size,
bias=bias, vocab_size=None, dropout=dropout) # start with model_args from command line
# initialize model:
if meta_vocab_size is None:
print("defaulting to vocab_size of GPT-2 to 50304 (50257 rounded up for efficiency)")
model_args['vocab_size'] = meta_vocab_size if meta_vocab_size is not None else 50304
gptconf = GPTConfig(**model_args) | model = GPT(gptconf) | 1 | 2023-12-27 04:14:24+00:00 | 4k |
jesenzhang/ComfyUI_StreamDiffusion | streamdiffusion/acceleration/tensorrt/builder.py | [
{
"identifier": "BaseModel",
"path": "streamdiffusion/acceleration/tensorrt/models.py",
"snippet": "class BaseModel:\n def __init__(\n self,\n fp16=False,\n device=\"cuda\",\n verbose=True,\n max_batch_size=16,\n min_batch_size=1,\n embedding_dim=768,\... | import gc
import os
import torch
from typing import *
from .models import BaseModel
from .utilities import (
build_engine,
export_onnx,
optimize_onnx,
) | 1,630 |
def create_onnx_path(name, onnx_dir, opt=True):
return os.path.join(onnx_dir, name + (".opt" if opt else "") + ".onnx")
class EngineBuilder:
def __init__(
self,
|
def create_onnx_path(name, onnx_dir, opt=True):
return os.path.join(onnx_dir, name + (".opt" if opt else "") + ".onnx")
class EngineBuilder:
def __init__(
self, | model: BaseModel, | 0 | 2023-12-29 09:00:03+00:00 | 4k |
neobundy/MLX-Stable-Diffusion-WebUI | stable_diffusion/vae.py | [
{
"identifier": "AutoencoderConfig",
"path": "stable_diffusion/config.py",
"snippet": "class AutoencoderConfig(BaseConfig):\n in_channels: int = 3\n out_channels: int = 3\n latent_channels_out: int = 8\n latent_channels_in: int = 4\n block_out_channels: Tuple[int] = (128, 256, 512, 512)\n... | import math
import mlx.core as mx
import mlx.nn as nn
from typing import List
from .config import AutoencoderConfig
from .unet import ResnetBlock2D, upsample_nearest | 2,596 | out_channels,
num_layers=layers_per_block,
resnet_groups=resnet_groups,
add_downsample=i < len(block_out_channels) - 1,
add_upsample=False,
)
for i, (in_channels, out_channels) in enumerate(zip(channels, channels[1:]))
]
self.mid_blocks = [
ResnetBlock2D(
in_channels=block_out_channels[-1],
out_channels=block_out_channels[-1],
groups=resnet_groups,
),
Attention(block_out_channels[-1], resnet_groups),
ResnetBlock2D(
in_channels=block_out_channels[-1],
out_channels=block_out_channels[-1],
groups=resnet_groups,
),
]
self.conv_norm_out = nn.GroupNorm(
resnet_groups, block_out_channels[-1], pytorch_compatible=True
)
self.conv_out = nn.Conv2d(block_out_channels[-1], out_channels, 3, padding=1)
def __call__(self, x):
# input block
x = self.conv_in(x)
# downsample + feature increase blocks
for l in self.down_blocks:
x = l(x)
# residual block + attention + residual block
x = self.mid_blocks[0](x)
x = self.mid_blocks[1](x)
x = self.mid_blocks[2](x)
# normalization + activation + output block
x = self.conv_norm_out(x)
x = nn.silu(x)
x = self.conv_out(x)
return x
class Decoder(nn.Module):
"""Implements the decoder side of the Autoencoder."""
def __init__(
self,
in_channels: int,
out_channels: int,
block_out_channels: List[int] = [64],
layers_per_block: int = 2,
resnet_groups: int = 32,
):
super().__init__()
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1
)
self.mid_blocks = [
ResnetBlock2D(
in_channels=block_out_channels[-1],
out_channels=block_out_channels[-1],
groups=resnet_groups,
),
Attention(block_out_channels[-1], resnet_groups),
ResnetBlock2D(
in_channels=block_out_channels[-1],
out_channels=block_out_channels[-1],
groups=resnet_groups,
),
]
channels = list(reversed(block_out_channels))
channels = [channels[0]] + channels
self.up_blocks = [
EncoderDecoderBlock2D(
in_channels,
out_channels,
num_layers=layers_per_block,
resnet_groups=resnet_groups,
add_downsample=False,
add_upsample=i < len(block_out_channels) - 1,
)
for i, (in_channels, out_channels) in enumerate(zip(channels, channels[1:]))
]
self.conv_norm_out = nn.GroupNorm(
resnet_groups, block_out_channels[0], pytorch_compatible=True
)
self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
def __call__(self, x):
x = self.conv_in(x)
x = self.mid_blocks[0](x)
x = self.mid_blocks[1](x)
x = self.mid_blocks[2](x)
for l in self.up_blocks:
x = l(x)
x = self.conv_norm_out(x)
x = nn.silu(x)
x = self.conv_out(x)
return x
class Autoencoder(nn.Module):
"""The autoencoder that allows us to perform diffusion in the latent space."""
| # Copyright © 2023 Apple Inc.
class Attention(nn.Module):
"""A single head unmasked attention for use with the VAE."""
def __init__(self, dims: int, norm_groups: int = 32):
super().__init__()
self.group_norm = nn.GroupNorm(norm_groups, dims, pytorch_compatible=True)
self.query_proj = nn.Linear(dims, dims)
self.key_proj = nn.Linear(dims, dims)
self.value_proj = nn.Linear(dims, dims)
self.out_proj = nn.Linear(dims, dims)
def __call__(self, x):
B, H, W, C = x.shape
y = self.group_norm(x)
queries = self.query_proj(y).reshape(B, H * W, C)
keys = self.key_proj(y).reshape(B, H * W, C)
values = self.value_proj(y).reshape(B, H * W, C)
scale = 1 / math.sqrt(queries.shape[-1])
scores = (queries * scale) @ keys.transpose(0, 2, 1)
attn = mx.softmax(scores, axis=-1)
y = (attn @ values).reshape(B, H, W, C)
y = self.out_proj(y)
x = x + y
return x
# Skip connections (Residual blocks) + downsampling + upsampling: common building blocks for Encoder and Decoder
class EncoderDecoderBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
num_layers: int = 1,
resnet_groups: int = 32,
add_downsample=True,
add_upsample=True,
):
super().__init__()
# Add the resnet blocks
self.resnets = [
ResnetBlock2D(
in_channels=in_channels if i == 0 else out_channels,
out_channels=out_channels,
groups=resnet_groups,
)
for i in range(num_layers)
]
# Add an optional downsampling layer
if add_downsample:
self.downsample = nn.Conv2d(
out_channels, out_channels, kernel_size=3, stride=2, padding=1
)
# or upsampling layer
if add_upsample:
self.upsample = nn.Conv2d(
out_channels, out_channels, kernel_size=3, stride=1, padding=1
)
def __call__(self, x):
for resnet in self.resnets:
x = resnet(x)
if "downsample" in self:
x = self.downsample(x)
if "upsample" in self:
x = self.upsample(upsample_nearest(x))
return x
class Encoder(nn.Module):
"""Implements the encoder side of the Autoencoder."""
def __init__(
self,
in_channels: int,
out_channels: int,
block_out_channels: List[int] = [64],
layers_per_block: int = 2,
resnet_groups: int = 32,
):
super().__init__()
# (B, H, W, C) -> (B, H, W, 64)
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=3, stride=1, padding=1
)
channels = [block_out_channels[0]] + list(block_out_channels)
self.down_blocks = [
EncoderDecoderBlock2D(
in_channels,
out_channels,
num_layers=layers_per_block,
resnet_groups=resnet_groups,
add_downsample=i < len(block_out_channels) - 1,
add_upsample=False,
)
for i, (in_channels, out_channels) in enumerate(zip(channels, channels[1:]))
]
self.mid_blocks = [
ResnetBlock2D(
in_channels=block_out_channels[-1],
out_channels=block_out_channels[-1],
groups=resnet_groups,
),
Attention(block_out_channels[-1], resnet_groups),
ResnetBlock2D(
in_channels=block_out_channels[-1],
out_channels=block_out_channels[-1],
groups=resnet_groups,
),
]
self.conv_norm_out = nn.GroupNorm(
resnet_groups, block_out_channels[-1], pytorch_compatible=True
)
self.conv_out = nn.Conv2d(block_out_channels[-1], out_channels, 3, padding=1)
def __call__(self, x):
# input block
x = self.conv_in(x)
# downsample + feature increase blocks
for l in self.down_blocks:
x = l(x)
# residual block + attention + residual block
x = self.mid_blocks[0](x)
x = self.mid_blocks[1](x)
x = self.mid_blocks[2](x)
# normalization + activation + output block
x = self.conv_norm_out(x)
x = nn.silu(x)
x = self.conv_out(x)
return x
class Decoder(nn.Module):
"""Implements the decoder side of the Autoencoder."""
def __init__(
self,
in_channels: int,
out_channels: int,
block_out_channels: List[int] = [64],
layers_per_block: int = 2,
resnet_groups: int = 32,
):
super().__init__()
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1
)
self.mid_blocks = [
ResnetBlock2D(
in_channels=block_out_channels[-1],
out_channels=block_out_channels[-1],
groups=resnet_groups,
),
Attention(block_out_channels[-1], resnet_groups),
ResnetBlock2D(
in_channels=block_out_channels[-1],
out_channels=block_out_channels[-1],
groups=resnet_groups,
),
]
channels = list(reversed(block_out_channels))
channels = [channels[0]] + channels
self.up_blocks = [
EncoderDecoderBlock2D(
in_channels,
out_channels,
num_layers=layers_per_block,
resnet_groups=resnet_groups,
add_downsample=False,
add_upsample=i < len(block_out_channels) - 1,
)
for i, (in_channels, out_channels) in enumerate(zip(channels, channels[1:]))
]
self.conv_norm_out = nn.GroupNorm(
resnet_groups, block_out_channels[0], pytorch_compatible=True
)
self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
def __call__(self, x):
x = self.conv_in(x)
x = self.mid_blocks[0](x)
x = self.mid_blocks[1](x)
x = self.mid_blocks[2](x)
for l in self.up_blocks:
x = l(x)
x = self.conv_norm_out(x)
x = nn.silu(x)
x = self.conv_out(x)
return x
class Autoencoder(nn.Module):
"""The autoencoder that allows us to perform diffusion in the latent space."""
| def __init__(self, config: AutoencoderConfig): | 0 | 2023-12-25 05:49:34+00:00 | 4k |
ffmemes/ff-backend | src/flows/storage/memes.py | [
{
"identifier": "etl_memes_from_raw_telegram_posts",
"path": "src/storage/service.py",
"snippet": "async def etl_memes_from_raw_telegram_posts() -> None:\n insert_query = f\"\"\"\n INSERT INTO meme (\n meme_source_id, \n raw_meme_id, \n caption, \n s... | import asyncio
from typing import Any
from prefect import flow, get_run_logger
from src.storage.service import (
etl_memes_from_raw_telegram_posts,
etl_memes_from_raw_vk_posts,
get_unloaded_tg_memes,
get_unloaded_vk_memes,
get_pending_memes,
get_memes_to_ocr,
update_meme_status_of_ready_memes,
update_meme,
)
from src.storage.upload import (
download_meme_content_file,
upload_meme_content_to_tg,
download_meme_content_from_tg,
)
from src.storage import ads
from src.storage.ocr.mystic import ocr_content
from src.storage.constants import MemeStatus
from src.storage.watermark import add_watermark | 2,635 |
@flow
async def upload_memes_to_telegram(unloaded_memes: list[dict[str, Any]]) -> list[dict[str, Any]]:
logger = get_run_logger()
logger.info(f"Received {len(unloaded_memes)} memes to upload to Telegram.")
memes = []
for unloaded_meme in unloaded_memes:
try:
logger.info(f"Downloading meme {unloaded_meme['id']} content file.")
meme_original_content = await download_meme_content_file(unloaded_meme["content_url"])
except Exception as e:
logger.info(f"Meme {unloaded_meme['id']} content is not available to download, reason: {e}.")
|
@flow
async def upload_memes_to_telegram(unloaded_memes: list[dict[str, Any]]) -> list[dict[str, Any]]:
logger = get_run_logger()
logger.info(f"Received {len(unloaded_memes)} memes to upload to Telegram.")
memes = []
for unloaded_meme in unloaded_memes:
try:
logger.info(f"Downloading meme {unloaded_meme['id']} content file.")
meme_original_content = await download_meme_content_file(unloaded_meme["content_url"])
except Exception as e:
logger.info(f"Meme {unloaded_meme['id']} content is not available to download, reason: {e}.") | await update_meme(unloaded_meme["id"], status=MemeStatus.BROKEN_CONTENT_LINK) | 13 | 2023-12-23 12:55:43+00:00 | 4k |
Con6924/SPM | src/evaluation/eval_util.py | [
{
"identifier": "text2img",
"path": "src/engine/train_util.py",
"snippet": "def text2img(pipe: DiffusionPipeline,\n prompts: Union[str, list[str]], \n negative_prompt: Union[str, list[str]] = \"\", \n width: int = 512, \n height: int = 512,\n n... | import torch
import clip
import numpy as np
import random
from typing import List, Union
from PIL import Image
from src.engine.train_util import text2img
from src.configs.config import RootConfig
from src.misc.clip_templates import imagenet_templates
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from diffusers.pipelines import DiffusionPipeline | 2,700 |
# extract all images
images_feats = [image_preprocess(img) for img in images]
images_feats = torch.stack(images_feats, dim=0).cuda()
images_feats = model.encode_image(images_feats)
# compute the similarity
images_feats = images_feats / images_feats.norm(dim=1, p=2, keepdim=True)
texts_feats = texts_feats / texts_feats.norm(dim=1, p=2, keepdim=True)
if cross_matching:
score = w * images_feats @ texts_feats.T
# TODO: the *SUM* here remains to be verified
return score.sum(dim=1).clamp(min=0).cpu().numpy()
else:
score = w * images_feats * texts_feats
return score.sum(dim=1).clamp(min=0).cpu().numpy()
@torch.no_grad()
def clip_accuracy(
images: List[Union[torch.Tensor, np.ndarray, Image.Image, str]],
ablated_texts: Union[List[str], str],
anchor_texts: Union[List[str], str],
w: float = 2.5,
clip_model: str = "ViT-B/32",
n_px: int = 224,
):
"""
Compute CLIPAccuracy according to CLIPScore.
Args:
images (List[Union[torch.Tensor, np.ndarray, PIL.Image.Image, str]]): A list of generated images.
Can be a list of torch.Tensor, numpy.ndarray, PIL.Image.Image, or a str of image path.
ablated_texts (Union[List[str], str]): A list of prompts that are ablated from the anchor texts.
anchor_texts (Union[List[str], str]): A list of prompts that the ablated concepts fall back to.
w (float, optional): The weight of the similarity score. Defaults to 2.5.
clip_model (str, optional): The name of CLIP model. Defaults to "ViT-B/32".
n_px (int, optional): The size of images. Defaults to 224.
Returns:
accuracy (float): The CLIPAccuracy of generated images. size: (len(images), )
"""
if isinstance(ablated_texts, str):
ablated_texts = [ablated_texts]
if isinstance(anchor_texts, str):
anchor_texts = [anchor_texts]
assert len(ablated_texts) == len(
anchor_texts
), "The length of ablated_texts and anchor_texts should be the same."
ablated_clip_score = clip_score(images, ablated_texts, w, clip_model, n_px)
anchor_clip_score = clip_score(images, anchor_texts, w, clip_model, n_px)
accuracy = np.mean(anchor_clip_score < ablated_clip_score).item()
return accuracy
def clip_eval_by_image(
images: List[Union[torch.Tensor, np.ndarray, Image.Image, str]],
ablated_texts: Union[List[str], str],
anchor_texts: Union[List[str], str],
w: float = 2.5,
clip_model: str = "ViT-B/32",
n_px: int = 224,
):
"""
Compute CLIPScore and CLIPAccuracy with generated images.
Args:
images (List[Union[torch.Tensor, np.ndarray, PIL.Image.Image, str]]): A list of generated images.
Can be a list of torch.Tensor, numpy.ndarray, PIL.Image.Image, or a str of image path.
ablated_texts (Union[List[str], str]): A list of prompts that are ablated from the anchor texts.
anchor_texts (Union[List[str], str]): A list of prompts that the ablated concepts fall back to.
w (float, optional): The weight of the similarity score. Defaults to 2.5.
clip_model (str, optional): The name of CLIP model. Defaults to "ViT-B/32".
n_px (int, optional): The size of images. Defaults to 224.
Returns:
score (float): The CLIPScore of generated images.
accuracy (float): The CLIPAccuracy of generated images.
"""
ablated_clip_score = clip_score(images, ablated_texts, w, clip_model, n_px)
anchor_clip_score = clip_score(images, anchor_texts, w, clip_model, n_px)
accuracy = np.mean(anchor_clip_score < ablated_clip_score).item()
score = np.mean(ablated_clip_score).item()
return score, accuracy
def clip_eval(
pipe: DiffusionPipeline,
config: RootConfig,
w: float = 2.5,
clip_model: str = "ViT-B/32",
n_px: int = 224,
):
"""
Compute CLIPScore and CLIPAccuracy.
For each given prompt in config.logging.prompts, we:
1. sample config.logging.eval_num templates
2. generate images with the sampled templates
3. compute CLIPScore and CLIPAccuracy between each generated image and the *corresponding* template
to get the final CLIPScore and CLIPAccuracy for each prompt.
Args:
pipe (DiffusionPipeline): The diffusion pipeline.
config (RootConfig): The root config.
w (float, optional): The weight of the similarity score. Defaults to 2.5.
clip_model (str, optional): The name of CLIP model. Defaults to "ViT-B/32".
n_px (int, optional): The size of images. Defaults to 224.
Returns:
score (list[float]): The CLIPScore of each concept to evaluate.
accuracy (list[float]): The CLIPAccuracy of each concept to evaluate.
"""
scores, accs = [], []
for prompt in config.logging.prompts:
templates = random.choices(imagenet_templates, k=config.logging.eval_num)
templated_prompts = [template.format(prompt) for template in templates]
| # ref:
# - https://github.com/jmhessel/clipscore/blob/main/clipscore.py
# - https://github.com/openai/CLIP/blob/main/notebooks/Prompt_Engineering_for_ImageNet.ipynb
def get_clip_preprocess(n_px=224):
def Convert(image):
return image.convert("RGB")
image_preprocess = Compose(
[
Resize(n_px, interpolation=Image.BICUBIC),
CenterCrop(n_px),
Convert,
ToTensor(),
Normalize(
(0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711),
),
]
)
def text_preprocess(text):
return clip.tokenize(text, truncate=True)
return image_preprocess, text_preprocess
@torch.no_grad()
def clip_score(
images: List[Union[torch.Tensor, np.ndarray, Image.Image, str]],
texts: str,
w: float = 2.5,
clip_model: str = "ViT-B/32",
n_px: int = 224,
cross_matching: bool = False,
):
"""
Compute CLIPScore (https://arxiv.org/abs/2104.08718) for generated images according to their prompts.
*Important*: same as the official implementation, we take *SUM* of the similarity scores across all the
reference texts. If you are evaluating on the Concept Erasing task, it might should be modified to *MEAN*,
or only one reference text should be given.
Args:
images (List[Union[torch.Tensor, np.ndarray, PIL.Image.Image, str]]): A list of generated images.
Can be a list of torch.Tensor, numpy.ndarray, PIL.Image.Image, or a str of image path.
texts (str): A list of prompts.
w (float, optional): The weight of the similarity score. Defaults to 2.5.
clip_model (str, optional): The name of CLIP model. Defaults to "ViT-B/32".
n_px (int, optional): The size of images. Defaults to 224.
cross_matching (bool, optional): Whether to compute the similarity between images and texts in cross-matching manner.
Returns:
score (np.ndarray): The CLIPScore of generated images.
size: (len(images), )
"""
if isinstance(texts, str):
texts = [texts]
if not cross_matching:
assert len(images) == len(
texts
), "The length of images and texts should be the same if cross_matching is False."
if isinstance(images[0], str):
images = [Image.open(img) for img in images]
elif isinstance(images[0], np.ndarray):
images = [Image.fromarray(img) for img in images]
elif isinstance(images[0], torch.Tensor):
images = [Image.fromarray(img.cpu().numpy()) for img in images]
else:
assert isinstance(images[0], Image.Image), "Invalid image type."
model, _ = clip.load(clip_model, device="cuda")
image_preprocess, text_preprocess = get_clip_preprocess(
n_px
) # following the official implementation, rather than using the default CLIP preprocess
# extract all texts
texts_feats = text_preprocess(texts).cuda()
texts_feats = model.encode_text(texts_feats)
# extract all images
images_feats = [image_preprocess(img) for img in images]
images_feats = torch.stack(images_feats, dim=0).cuda()
images_feats = model.encode_image(images_feats)
# compute the similarity
images_feats = images_feats / images_feats.norm(dim=1, p=2, keepdim=True)
texts_feats = texts_feats / texts_feats.norm(dim=1, p=2, keepdim=True)
if cross_matching:
score = w * images_feats @ texts_feats.T
# TODO: the *SUM* here remains to be verified
return score.sum(dim=1).clamp(min=0).cpu().numpy()
else:
score = w * images_feats * texts_feats
return score.sum(dim=1).clamp(min=0).cpu().numpy()
@torch.no_grad()
def clip_accuracy(
images: List[Union[torch.Tensor, np.ndarray, Image.Image, str]],
ablated_texts: Union[List[str], str],
anchor_texts: Union[List[str], str],
w: float = 2.5,
clip_model: str = "ViT-B/32",
n_px: int = 224,
):
"""
Compute CLIPAccuracy according to CLIPScore.
Args:
images (List[Union[torch.Tensor, np.ndarray, PIL.Image.Image, str]]): A list of generated images.
Can be a list of torch.Tensor, numpy.ndarray, PIL.Image.Image, or a str of image path.
ablated_texts (Union[List[str], str]): A list of prompts that are ablated from the anchor texts.
anchor_texts (Union[List[str], str]): A list of prompts that the ablated concepts fall back to.
w (float, optional): The weight of the similarity score. Defaults to 2.5.
clip_model (str, optional): The name of CLIP model. Defaults to "ViT-B/32".
n_px (int, optional): The size of images. Defaults to 224.
Returns:
accuracy (float): The CLIPAccuracy of generated images. size: (len(images), )
"""
if isinstance(ablated_texts, str):
ablated_texts = [ablated_texts]
if isinstance(anchor_texts, str):
anchor_texts = [anchor_texts]
assert len(ablated_texts) == len(
anchor_texts
), "The length of ablated_texts and anchor_texts should be the same."
ablated_clip_score = clip_score(images, ablated_texts, w, clip_model, n_px)
anchor_clip_score = clip_score(images, anchor_texts, w, clip_model, n_px)
accuracy = np.mean(anchor_clip_score < ablated_clip_score).item()
return accuracy
def clip_eval_by_image(
images: List[Union[torch.Tensor, np.ndarray, Image.Image, str]],
ablated_texts: Union[List[str], str],
anchor_texts: Union[List[str], str],
w: float = 2.5,
clip_model: str = "ViT-B/32",
n_px: int = 224,
):
"""
Compute CLIPScore and CLIPAccuracy with generated images.
Args:
images (List[Union[torch.Tensor, np.ndarray, PIL.Image.Image, str]]): A list of generated images.
Can be a list of torch.Tensor, numpy.ndarray, PIL.Image.Image, or a str of image path.
ablated_texts (Union[List[str], str]): A list of prompts that are ablated from the anchor texts.
anchor_texts (Union[List[str], str]): A list of prompts that the ablated concepts fall back to.
w (float, optional): The weight of the similarity score. Defaults to 2.5.
clip_model (str, optional): The name of CLIP model. Defaults to "ViT-B/32".
n_px (int, optional): The size of images. Defaults to 224.
Returns:
score (float): The CLIPScore of generated images.
accuracy (float): The CLIPAccuracy of generated images.
"""
ablated_clip_score = clip_score(images, ablated_texts, w, clip_model, n_px)
anchor_clip_score = clip_score(images, anchor_texts, w, clip_model, n_px)
accuracy = np.mean(anchor_clip_score < ablated_clip_score).item()
score = np.mean(ablated_clip_score).item()
return score, accuracy
def clip_eval(
pipe: DiffusionPipeline,
config: RootConfig,
w: float = 2.5,
clip_model: str = "ViT-B/32",
n_px: int = 224,
):
"""
Compute CLIPScore and CLIPAccuracy.
For each given prompt in config.logging.prompts, we:
1. sample config.logging.eval_num templates
2. generate images with the sampled templates
3. compute CLIPScore and CLIPAccuracy between each generated image and the *corresponding* template
to get the final CLIPScore and CLIPAccuracy for each prompt.
Args:
pipe (DiffusionPipeline): The diffusion pipeline.
config (RootConfig): The root config.
w (float, optional): The weight of the similarity score. Defaults to 2.5.
clip_model (str, optional): The name of CLIP model. Defaults to "ViT-B/32".
n_px (int, optional): The size of images. Defaults to 224.
Returns:
score (list[float]): The CLIPScore of each concept to evaluate.
accuracy (list[float]): The CLIPAccuracy of each concept to evaluate.
"""
scores, accs = [], []
for prompt in config.logging.prompts:
templates = random.choices(imagenet_templates, k=config.logging.eval_num)
templated_prompts = [template.format(prompt) for template in templates] | samples = text2img( | 0 | 2023-12-26 03:19:16+00:00 | 4k |
theOneAndOnlyOne/BeReel | main.py | [
{
"identifier": "create_images",
"path": "combineImages.py",
"snippet": "def create_images():\n # Example usage\n primary_folder = os.path.join(os.getcwd(), \"primary\")\n secondary_folder = os.path.join(os.getcwd(), \"secondary\")\n output_folder = os.path.join(os.getcwd(), \"combined\")\n\... | import os
import requests
from flask import Flask, render_template, request, jsonify
from combineImages import create_images
from generateSlideshow import buildSlideshow
from recap import butidRecap
from datetime import datetime | 2,143 | # Convert the input strings to datetime objects
start_date_object = datetime.strptime(start_date_str, "%Y-%m-%d")
end_date_object = datetime.strptime(end_date_str, "%Y-%m-%d")
# Iterate through the 'data' array and download images
for item in data_array:
image_url = item["primary"].get("url", "")
secondary_image_url = item["secondary"].get("url", "")
date = item["memoryDay"]
date_object = datetime.strptime(date, "%Y-%m-%d")
if image_url and start_date_object <= date_object <= end_date_object:
# Extracting the image name from the URL
image_name = date + "_" + image_url.split("/")[-1]
# Downloading the image
image_path = os.path.join(folder_name, image_name)
with open(image_path, "wb") as img_file:
img_response = requests.get(image_url)
if img_response.status_code == 200:
img_file.write(img_response.content)
print(f"Downloaded {image_name} to {folder_name}")
else:
print(f"Failed to download {image_name}")
if secondary_image_url and start_date_object <= date_object <= end_date_object:
# Extracting the image name from the URL
image_name = date + "_" + secondary_image_url.split("/")[-1]
# Downloading the image
image_path = os.path.join(secondary_folder_name, image_name)
with open(image_path, "wb") as img_file:
img_response = requests.get(secondary_image_url)
if img_response.status_code == 200:
img_file.write(img_response.content)
print(f"Downloaded {image_name} to {secondary_folder_name}")
else:
print(f"Failed to download {image_name}")
return "complete"
# All images referenced in the 'primary' URLs should now be saved in the 'primary' folder
# 'secondary' URLS saved in 'secondary', etc.
# -------------------------------------------------------------------------------------------------------------------------
# Flask App Routing
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "POST":
phone_number = request.form["phone_number"]
otp_session = send_code(phone_number)
if otp_session != "n/a":
return render_template("verify.html", otp_session=otp_session)
return render_template(
"index.html",
message="Invalid phone number. Check formatting and Please try again.",
)
return render_template("index.html")
@app.route("/verify", methods=["POST"])
def verify_code():
if request.method == "POST":
user_code = request.form["verification_code"]
otp_session = request.form["otp_session"]
print("> verify_code otp_session: ", otp_session)
tokenObj = verify(otp_session, user_code)
if tokenObj != "n/a":
return render_template("process.html", tokenObj=tokenObj)
else:
return render_template("failure.html")
# return render_template('verify.html', tokenObj='n/a', message='Invalid verification code. Please try again.')
return render_template("verify.html")
@app.route("/process", methods=["POST"])
def process_data():
if request.method == "POST":
start_date_range = request.form["start_date_range"]
end_date_range = request.form["end_date_range"]
wav_file = request.files["wav_file"]
tokenObj = request.form["tokenObj"]
mode = request.form.get("mode")
print("> HTML Form Elements: ")
print("start_date_range ", str(start_date_range))
print("end_date_range ", str(end_date_range))
print("wav_file ", str(wav_file))
print("mode", str(mode))
# Call get_memories function
print("> donwloading music file locally: ")
try:
# Save the uploaded WAV file locally
upload_directory = os.getcwd()
print("saving file to ", upload_directory)
if not os.path.exists(upload_directory):
os.makedirs(upload_directory)
wav_file.save(os.path.join(upload_directory, "curr_song.wav"))
except Exception as e:
print(f"Error in processing data: {str(e)}")
result = " "
if not os.path.exists("primary") or not os.path.exists("secondary"):
print("> downloading images locally")
result = get_memories(tokenObj, start_date_range, end_date_range)
if result != "n/a":
# Execute the Python functions
create_images() # process images and apply effects
# do something with current page here
|
app = Flask(__name__, template_folder="templates")
# Acquire Phone Number from User
def send_code(phone):
print("> Entered phone number is ", phone)
# First Post to send out OTP session and code
url_send_code = "https://berealapi.fly.dev/login/send-code"
# IMPORTANT: Format must be +##########
payload = {"phone": phone}
print("-- Sending OTP Session Request --")
response = requests.post(url_send_code, json=payload)
otp_session = "n/a"
if response.status_code == 201:
print("> Request successful!")
print("Response:", response.json())
response_json = response.json()
if "data" in response_json and "otpSession" in response_json["data"]:
otp_session = response_json["data"]["otpSession"]
print("OTP Session:", otp_session)
else:
print("No 'otpSession' found in the response.")
else:
print("Request failed with status code:", response.status_code)
print(response.json())
return otp_session
# Verify Session using otp_session code and user entered otp_code recieved from phone notification
def verify(otp_session, otp_code):
# print("please enter OTP code")
# otp_code = input()
print("> OTP: ", otp_code)
# Second POST request to verify base don user input
url_verify = "https://berealapi.fly.dev/login/verify"
payload_verify = {"code": otp_code, "otpSession": otp_session}
print("-- Sending Verify Request --")
response_verify = requests.post(url_verify, json=payload_verify)
tokenObj = "n/a"
if response_verify.status_code == 201:
print("> Verification request successful!")
print("Response:", response_verify.json())
# Process the verification response if needed
response_json = response_verify.json()
if "data" in response_json and "token" in response_json["data"]:
tokenObj = response_json["data"]["token"]
print("tokenObj:", tokenObj)
else:
print("No 'tokenObj' found in the response.")
exit()
else:
print(
"> Verification request failed with status code:",
response_verify.status_code,
)
print(response_verify.json())
exit()
return tokenObj
# Fetch user memories. Skip to this stage if we already acquired reusable token
def get_memories(tokenObj, start_date_range, end_date_range):
url_mem_feed = "https://berealapi.fly.dev/friends/mem-feed"
headers = {"token": tokenObj}
# Create a folder named 'primary' if it doesn't exist
folder_name = "primary"
if not os.path.exists(folder_name):
os.makedirs(folder_name)
# Create a folder named 'secondary' if it doesn't exist
secondary_folder_name = "secondary"
if not os.path.exists(secondary_folder_name):
os.makedirs(secondary_folder_name)
print("-- Sending Get Memories Request --")
response_mem_feed = requests.get(url_mem_feed, headers=headers)
data_array = []
if response_mem_feed.status_code == 200:
print("> GET request successful!")
# Process the response from mem-feed endpoint
print("Response:", response_mem_feed.json())
print("we did it yay")
response_data = response_mem_feed.json().get("data", {})
data_array = response_data.get("data", [])
else:
print("GET request failed with status code:", response_mem_feed.status_code)
start_date_str = str(start_date_range)
end_date_str = str(end_date_range)
# Convert the input strings to datetime objects
start_date_object = datetime.strptime(start_date_str, "%Y-%m-%d")
end_date_object = datetime.strptime(end_date_str, "%Y-%m-%d")
# Iterate through the 'data' array and download images
for item in data_array:
image_url = item["primary"].get("url", "")
secondary_image_url = item["secondary"].get("url", "")
date = item["memoryDay"]
date_object = datetime.strptime(date, "%Y-%m-%d")
if image_url and start_date_object <= date_object <= end_date_object:
# Extracting the image name from the URL
image_name = date + "_" + image_url.split("/")[-1]
# Downloading the image
image_path = os.path.join(folder_name, image_name)
with open(image_path, "wb") as img_file:
img_response = requests.get(image_url)
if img_response.status_code == 200:
img_file.write(img_response.content)
print(f"Downloaded {image_name} to {folder_name}")
else:
print(f"Failed to download {image_name}")
if secondary_image_url and start_date_object <= date_object <= end_date_object:
# Extracting the image name from the URL
image_name = date + "_" + secondary_image_url.split("/")[-1]
# Downloading the image
image_path = os.path.join(secondary_folder_name, image_name)
with open(image_path, "wb") as img_file:
img_response = requests.get(secondary_image_url)
if img_response.status_code == 200:
img_file.write(img_response.content)
print(f"Downloaded {image_name} to {secondary_folder_name}")
else:
print(f"Failed to download {image_name}")
return "complete"
# All images referenced in the 'primary' URLs should now be saved in the 'primary' folder
# 'secondary' URLS saved in 'secondary', etc.
# -------------------------------------------------------------------------------------------------------------------------
# Flask App Routing
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "POST":
phone_number = request.form["phone_number"]
otp_session = send_code(phone_number)
if otp_session != "n/a":
return render_template("verify.html", otp_session=otp_session)
return render_template(
"index.html",
message="Invalid phone number. Check formatting and Please try again.",
)
return render_template("index.html")
@app.route("/verify", methods=["POST"])
def verify_code():
if request.method == "POST":
user_code = request.form["verification_code"]
otp_session = request.form["otp_session"]
print("> verify_code otp_session: ", otp_session)
tokenObj = verify(otp_session, user_code)
if tokenObj != "n/a":
return render_template("process.html", tokenObj=tokenObj)
else:
return render_template("failure.html")
# return render_template('verify.html', tokenObj='n/a', message='Invalid verification code. Please try again.')
return render_template("verify.html")
@app.route("/process", methods=["POST"])
def process_data():
if request.method == "POST":
start_date_range = request.form["start_date_range"]
end_date_range = request.form["end_date_range"]
wav_file = request.files["wav_file"]
tokenObj = request.form["tokenObj"]
mode = request.form.get("mode")
print("> HTML Form Elements: ")
print("start_date_range ", str(start_date_range))
print("end_date_range ", str(end_date_range))
print("wav_file ", str(wav_file))
print("mode", str(mode))
# Call get_memories function
print("> donwloading music file locally: ")
try:
# Save the uploaded WAV file locally
upload_directory = os.getcwd()
print("saving file to ", upload_directory)
if not os.path.exists(upload_directory):
os.makedirs(upload_directory)
wav_file.save(os.path.join(upload_directory, "curr_song.wav"))
except Exception as e:
print(f"Error in processing data: {str(e)}")
result = " "
if not os.path.exists("primary") or not os.path.exists("secondary"):
print("> downloading images locally")
result = get_memories(tokenObj, start_date_range, end_date_range)
if result != "n/a":
# Execute the Python functions
create_images() # process images and apply effects
# do something with current page here | buildSlideshow(mode) # assemble files and load audio | 1 | 2023-12-25 20:55:01+00:00 | 4k |
dakpinaroglu/Frame2seq | frame2seq/utils/design.py | [
{
"identifier": "residue_constants",
"path": "frame2seq/utils/residue_constants.py",
"snippet": "def load_stereo_chemical_props() -> Tuple[Mapping[str, List[Bond]],\n def make_bond_key(atom1_name, atom2_name):\ndef sequence_to_onehot(\n sequence: str,\n mapping: Mapping[str, int],\n) -> np.ndarra... | import os
import torch
import numpy as np
from tqdm import tqdm
from frame2seq.utils import residue_constants
from frame2seq.utils.util import get_neg_pll
from frame2seq.utils.pdb2input import get_inference_inputs
from frame2seq.utils.pred2output import output_fasta, output_indiv_fasta, output_indiv_csv | 2,315 |
def design(self, pdb_file, chain_id, temperature, num_samples, omit_AA,
fixed_positions, save_indiv_seqs, save_indiv_neg_pll, verbose):
seq_mask, aatype, X = get_inference_inputs(pdb_file, chain_id)
seq_mask = seq_mask.to(self.device)
aatype = aatype.to(self.device)
X = X.to(self.device)
str_form = [residue_constants.ID_TO_AA[int(i)] for i in aatype[0]]
input_aatype_onehot = residue_constants.sequence_to_onehot(
sequence=str_form,
mapping=residue_constants.AA_TO_ID,
)
input_aatype_onehot = torch.from_numpy(input_aatype_onehot).float()
input_aatype_onehot = input_aatype_onehot.unsqueeze(0)
input_aatype_onehot = input_aatype_onehot.to(self.device)
input_aatype_onehot = torch.zeros_like(input_aatype_onehot)
input_aatype_onehot[:, :,
20] = 1 # all positions are masked (set to unknown)
if fixed_positions is not None:
for pos in fixed_positions:
pos = pos - 1 # convert to 0-indexing
input_aatype_onehot[:, pos, :] = 0
input_aatype_onehot[:, pos, aatype[0][
pos]] = 1 # fixed positions set to the input sequence
model_outs, scores, preds = {}, {}, []
with torch.no_grad():
pred_seq1 = self.models[0].forward(X, seq_mask, input_aatype_onehot)
pred_seq2 = self.models[1].forward(X, seq_mask, input_aatype_onehot)
pred_seq3 = self.models[2].forward(X, seq_mask, input_aatype_onehot)
pred_seq = (pred_seq1 + pred_seq2 + pred_seq3) / 3 # ensemble
if omit_AA is not None:
for aa in omit_AA:
pred_seq[:, :, residue_constants.AA_TO_ID[aa]] = -np.inf
pred_seq = pred_seq / temperature
pred_seq = torch.nn.functional.softmax(pred_seq, dim=-1)
pred_seq = pred_seq[seq_mask]
sampled_seq = torch.multinomial(pred_seq,
num_samples=num_samples,
replacement=True)
for sample in tqdm(range(num_samples)):
sampled_seq_i = sampled_seq[:, sample]
input_seq_i = aatype[seq_mask] # sequence from the input PDB file
neg_pll, avg_neg_pll = get_neg_pll(pred_seq, sampled_seq_i)
input_neg_pll, input_avg_neg_pll = get_neg_pll(
pred_seq, input_seq_i
) # negative pseudo-log-likelihood of the input sequence
recovery = torch.sum(
sampled_seq_i == aatype[seq_mask]) / torch.sum(seq_mask)
sampled_seq_i = [
residue_constants.ID_TO_AA[int(i)] for i in sampled_seq_i
]
sampled_seq_i = "".join(sampled_seq_i)
if verbose:
print(f"Recovery : {recovery*100:.2f}%")
print(
f"Average negative pseudo-log-likelihood : {avg_neg_pll:.2f}"
)
print(f"Sequence: {sampled_seq_i}")
model_outs['pdbid'] = pdb_file.split('/')[-1].split('.')[0]
model_outs['chain'] = chain_id
model_outs['sample'] = sample
model_outs['seq'] = sampled_seq_i
model_outs['recovery'] = recovery
model_outs['avg_neg_pll'] = avg_neg_pll
model_outs['temp'] = temperature
preds.append(model_outs)
fasta_dir = os.path.join(self.save_dir, 'seqs')
os.makedirs(fasta_dir, exist_ok=True)
if save_indiv_seqs: # save per-sequence fasta files
output_indiv_fasta(model_outs, fasta_dir)
if save_indiv_neg_pll: # save per-residue negative pseudo-log-likelihoods
scores['pdbid'] = pdb_file.split('/')[-1].split('.')[0]
scores['chain'] = chain_id
scores['sample'] = sample
scores['res_idx'] = [i for i in range(len(sampled_seq_i))]
scores['neg_pll'] = [
neg_pll[i].item() for i in range(len(sampled_seq_i))
]
csv_dir = os.path.join(self.save_dir, 'scores')
os.makedirs(csv_dir, exist_ok=True)
output_indiv_csv(scores, csv_dir)
|
def design(self, pdb_file, chain_id, temperature, num_samples, omit_AA,
fixed_positions, save_indiv_seqs, save_indiv_neg_pll, verbose):
seq_mask, aatype, X = get_inference_inputs(pdb_file, chain_id)
seq_mask = seq_mask.to(self.device)
aatype = aatype.to(self.device)
X = X.to(self.device)
str_form = [residue_constants.ID_TO_AA[int(i)] for i in aatype[0]]
input_aatype_onehot = residue_constants.sequence_to_onehot(
sequence=str_form,
mapping=residue_constants.AA_TO_ID,
)
input_aatype_onehot = torch.from_numpy(input_aatype_onehot).float()
input_aatype_onehot = input_aatype_onehot.unsqueeze(0)
input_aatype_onehot = input_aatype_onehot.to(self.device)
input_aatype_onehot = torch.zeros_like(input_aatype_onehot)
input_aatype_onehot[:, :,
20] = 1 # all positions are masked (set to unknown)
if fixed_positions is not None:
for pos in fixed_positions:
pos = pos - 1 # convert to 0-indexing
input_aatype_onehot[:, pos, :] = 0
input_aatype_onehot[:, pos, aatype[0][
pos]] = 1 # fixed positions set to the input sequence
model_outs, scores, preds = {}, {}, []
with torch.no_grad():
pred_seq1 = self.models[0].forward(X, seq_mask, input_aatype_onehot)
pred_seq2 = self.models[1].forward(X, seq_mask, input_aatype_onehot)
pred_seq3 = self.models[2].forward(X, seq_mask, input_aatype_onehot)
pred_seq = (pred_seq1 + pred_seq2 + pred_seq3) / 3 # ensemble
if omit_AA is not None:
for aa in omit_AA:
pred_seq[:, :, residue_constants.AA_TO_ID[aa]] = -np.inf
pred_seq = pred_seq / temperature
pred_seq = torch.nn.functional.softmax(pred_seq, dim=-1)
pred_seq = pred_seq[seq_mask]
sampled_seq = torch.multinomial(pred_seq,
num_samples=num_samples,
replacement=True)
for sample in tqdm(range(num_samples)):
sampled_seq_i = sampled_seq[:, sample]
input_seq_i = aatype[seq_mask] # sequence from the input PDB file
neg_pll, avg_neg_pll = get_neg_pll(pred_seq, sampled_seq_i)
input_neg_pll, input_avg_neg_pll = get_neg_pll(
pred_seq, input_seq_i
) # negative pseudo-log-likelihood of the input sequence
recovery = torch.sum(
sampled_seq_i == aatype[seq_mask]) / torch.sum(seq_mask)
sampled_seq_i = [
residue_constants.ID_TO_AA[int(i)] for i in sampled_seq_i
]
sampled_seq_i = "".join(sampled_seq_i)
if verbose:
print(f"Recovery : {recovery*100:.2f}%")
print(
f"Average negative pseudo-log-likelihood : {avg_neg_pll:.2f}"
)
print(f"Sequence: {sampled_seq_i}")
model_outs['pdbid'] = pdb_file.split('/')[-1].split('.')[0]
model_outs['chain'] = chain_id
model_outs['sample'] = sample
model_outs['seq'] = sampled_seq_i
model_outs['recovery'] = recovery
model_outs['avg_neg_pll'] = avg_neg_pll
model_outs['temp'] = temperature
preds.append(model_outs)
fasta_dir = os.path.join(self.save_dir, 'seqs')
os.makedirs(fasta_dir, exist_ok=True)
if save_indiv_seqs: # save per-sequence fasta files
output_indiv_fasta(model_outs, fasta_dir)
if save_indiv_neg_pll: # save per-residue negative pseudo-log-likelihoods
scores['pdbid'] = pdb_file.split('/')[-1].split('.')[0]
scores['chain'] = chain_id
scores['sample'] = sample
scores['res_idx'] = [i for i in range(len(sampled_seq_i))]
scores['neg_pll'] = [
neg_pll[i].item() for i in range(len(sampled_seq_i))
]
csv_dir = os.path.join(self.save_dir, 'scores')
os.makedirs(csv_dir, exist_ok=True)
output_indiv_csv(scores, csv_dir) | output_fasta( | 3 | 2023-12-25 09:29:36+00:00 | 4k |
davep/oshit | oshit/app/widgets/hacker_news.py | [
{
"identifier": "Article",
"path": "oshit/hn/item/article.py",
"snippet": "class Article(Item):\n \"\"\"Base class for all types of articles on HackerNews.\"\"\"\n\n descendants: int = 0\n \"\"\"The number of descendants of the article.\"\"\"\n\n score: int = 0\n \"\"\"The score of the ar... | from textual import on
from textual.reactive import var
from textual.widgets import TabbedContent, Tabs
from ...hn.item import Article
from ..data import load_configuration, save_configuration
from .items import Items | 1,696 | """Widget that displays the HackerNews content."""
##############################################################################
# Textual imports.
##############################################################################
# Local imports.
##############################################################################
class HackerNews(TabbedContent):
"""The HackerNews content."""
BINDINGS = [
("escape", "escape"),
("down, enter", "pane"),
("left", "previous"),
("right", "next"),
]
compact: var[bool] = var(True)
"""Should we use a compact or relaxed display?"""
def on_mount(self) -> None:
"""Configure the widget once the DOM is ready."""
self.compact = load_configuration().compact_mode
@property
| """Widget that displays the HackerNews content."""
##############################################################################
# Textual imports.
##############################################################################
# Local imports.
##############################################################################
class HackerNews(TabbedContent):
"""The HackerNews content."""
BINDINGS = [
("escape", "escape"),
("down, enter", "pane"),
("left", "previous"),
("right", "next"),
]
compact: var[bool] = var(True)
"""Should we use a compact or relaxed display?"""
def on_mount(self) -> None:
"""Configure the widget once the DOM is ready."""
self.compact = load_configuration().compact_mode
@property | def active_items(self) -> Items[Article]: | 0 | 2023-12-25 14:06:07+00:00 | 4k |
Maximilian-Winter/llama-cpp-agent | src/llama_cpp_agent/llm_agent.py | [
{
"identifier": "LlamaLLMSettings",
"path": "src/llama_cpp_agent/llm_settings.py",
"snippet": "class LlamaLLMSettings:\n model_path: str\n n_gpu_layers: int = 0\n f16_kv: bool = True\n offload_kqv: bool = True\n use_mlock: bool = False\n embedding: bool = False\n n_threads: int = No... | import json
from dataclasses import dataclass
from typing import List, Dict, Literal, Callable, Union
from llama_cpp import Llama, LlamaGrammar
from .llm_settings import LlamaLLMSettings
from .messages_formatter import MessagesFormatterType, get_predefined_messages_formatter, MessagesFormatter
from .function_calling import LlamaCppFunctionTool, LlamaCppFunctionToolRegistry | 2,719 |
@dataclass
class StreamingResponse:
text: str
is_last_response: bool
class LlamaCppAgent:
"""
A base agent that can be used for chat, structured output and function calling. Is used as part of all other agents.
"""
def __init__(self, model: Union[Llama, LlamaLLMSettings], name: str = "llamacpp_agent", system_prompt: str = "You are helpful assistant.",
predefined_messages_formatter_type: MessagesFormatterType = MessagesFormatterType.CHATML,
custom_messages_formatter: MessagesFormatter = None, debug_output: bool = False):
if isinstance(model, LlamaLLMSettings):
model = Llama(**model.as_dict())
self.model = model
self.name = name
self.system_prompt = system_prompt
self.debug_output = debug_output
self.messages = []
if custom_messages_formatter is not None:
self.messages_formatter = custom_messages_formatter
else:
self.messages_formatter = get_predefined_messages_formatter(predefined_messages_formatter_type)
@staticmethod
def get_function_tool_registry(function_tool_list: List[LlamaCppFunctionTool]):
|
@dataclass
class StreamingResponse:
text: str
is_last_response: bool
class LlamaCppAgent:
"""
A base agent that can be used for chat, structured output and function calling. Is used as part of all other agents.
"""
def __init__(self, model: Union[Llama, LlamaLLMSettings], name: str = "llamacpp_agent", system_prompt: str = "You are helpful assistant.",
predefined_messages_formatter_type: MessagesFormatterType = MessagesFormatterType.CHATML,
custom_messages_formatter: MessagesFormatter = None, debug_output: bool = False):
if isinstance(model, LlamaLLMSettings):
model = Llama(**model.as_dict())
self.model = model
self.name = name
self.system_prompt = system_prompt
self.debug_output = debug_output
self.messages = []
if custom_messages_formatter is not None:
self.messages_formatter = custom_messages_formatter
else:
self.messages_formatter = get_predefined_messages_formatter(predefined_messages_formatter_type)
@staticmethod
def get_function_tool_registry(function_tool_list: List[LlamaCppFunctionTool]): | function_tool_registry = LlamaCppFunctionToolRegistry() | 5 | 2023-12-29 16:54:39+00:00 | 4k |
usail-hkust/LLMTSCS | run_presslight.py | [
{
"identifier": "config",
"path": "utils/config.py",
"snippet": "DIC_AGENTS = {\n \"Random\": RandomAgent,\n \"Fixedtime\": FixedtimeAgent,\n \"MaxPressure\": MaxPressureAgent,\n \"EfficientMaxPressure\": EfficientMaxPressureAgent,\n \"AdvancedMaxPressure\": AdvancedMaxPressureAgent,\n\n ... | from utils import config, error
from utils.utils import pipeline_wrapper, merge
from multiprocessing import Process
import time
import argparse
import os | 2,049 |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--memo", type=str, default='PressLight')
parser.add_argument("--mod", type=str, default="EfficientPressLight") # EPressLight
parser.add_argument("--model", type=str, default="PressLight")
parser.add_argument("--proj_name", type=str, default="chatgpt-TSCS")
parser.add_argument("--eightphase", action="store_true", default=False)
parser.add_argument("--gen", type=int, default=1)
parser.add_argument("--multi_process", action="store_true", default=True)
parser.add_argument("--workers", type=int, default=1)
parser.add_argument("--dataset", type=str, default="jinan")
parser.add_argument("--traffic_file", type=str, default="anon_3_4_jinan_real.json")
parser.add_argument("--duration", type=int, default=30)
parser.add_argument("--num_rounds", type=int, default=100)
return parser.parse_args()
def main(in_args=None):
traffic_file_list = []
if in_args.dataset == 'jinan':
count = 3600
road_net = "3_4"
traffic_file_list = ["anon_3_4_jinan_real.json", "anon_3_4_jinan_real_2000.json", "anon_3_4_jinan_real_2500.json"]
num_rounds = in_args.num_rounds
template = "Jinan"
elif in_args.dataset == 'hangzhou':
count = 3600
road_net = "4_4"
traffic_file_list = ["anon_4_4_hangzhou_real.json", "anon_4_4_hangzhou_real_5816.json"]
num_rounds = in_args.num_rounds
template = "Hangzhou"
elif in_args.dataset == 'newyork_16x3':
count = 3600
road_net = "16_3"
traffic_file_list = ["anon_16_3_newyork_real.json"]
num_rounds = 80
template = "NewYork"
elif in_args.dataset == 'newyork_28x7':
count = 3600
road_net = "28_7"
traffic_file_list = ["anon_28_7_newyork_real_double.json", "anon_28_7_newyork_real_triple.json"]
num_rounds = 80
template = "NewYork"
# flow_file error
try:
if in_args.traffic_file not in traffic_file_list:
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--memo", type=str, default='PressLight')
parser.add_argument("--mod", type=str, default="EfficientPressLight") # EPressLight
parser.add_argument("--model", type=str, default="PressLight")
parser.add_argument("--proj_name", type=str, default="chatgpt-TSCS")
parser.add_argument("--eightphase", action="store_true", default=False)
parser.add_argument("--gen", type=int, default=1)
parser.add_argument("--multi_process", action="store_true", default=True)
parser.add_argument("--workers", type=int, default=1)
parser.add_argument("--dataset", type=str, default="jinan")
parser.add_argument("--traffic_file", type=str, default="anon_3_4_jinan_real.json")
parser.add_argument("--duration", type=int, default=30)
parser.add_argument("--num_rounds", type=int, default=100)
return parser.parse_args()
def main(in_args=None):
traffic_file_list = []
if in_args.dataset == 'jinan':
count = 3600
road_net = "3_4"
traffic_file_list = ["anon_3_4_jinan_real.json", "anon_3_4_jinan_real_2000.json", "anon_3_4_jinan_real_2500.json"]
num_rounds = in_args.num_rounds
template = "Jinan"
elif in_args.dataset == 'hangzhou':
count = 3600
road_net = "4_4"
traffic_file_list = ["anon_4_4_hangzhou_real.json", "anon_4_4_hangzhou_real_5816.json"]
num_rounds = in_args.num_rounds
template = "Hangzhou"
elif in_args.dataset == 'newyork_16x3':
count = 3600
road_net = "16_3"
traffic_file_list = ["anon_16_3_newyork_real.json"]
num_rounds = 80
template = "NewYork"
elif in_args.dataset == 'newyork_28x7':
count = 3600
road_net = "28_7"
traffic_file_list = ["anon_28_7_newyork_real_double.json", "anon_28_7_newyork_real_triple.json"]
num_rounds = 80
template = "NewYork"
# flow_file error
try:
if in_args.traffic_file not in traffic_file_list: | raise error.flowFileException('Flow file does not exist.') | 1 | 2023-12-26 08:31:47+00:00 | 4k |
alipay/private_llm | demo/edge_device.py | [
{
"identifier": "PLLlamaConfig",
"path": "demo/model.py",
"snippet": "class PLLlamaConfig(LlamaConfig):\n def __init__(\n self,\n vocab_size=32000,\n hidden_size=4096,\n intermediate_size=11008,\n num_hidden_layers=32,\n num_attention_heads=32,\n num_k... | from demo.model import PLLlamaConfig, LlamaForDevice
from pl_lib import CommProfiler
from transformers import AutoTokenizer
from pl_lib import init_tcp_b
import torch
import logging
import argparse | 3,586 |
parser = argparse.ArgumentParser()
parser.add_argument(
"weight_path",
default=None,
help="path to device model weight",
)
parser.add_argument(
"llama_path",
default=None,
help="root dir of huggingface llama model, should contain weight files and config",
)
parser.add_argument(
"--ip",
default="127.0.0.1",
help="socket ip of cloud",
)
parser.add_argument(
"--port",
default=12345,
help="socket port of cloud",
)
parser.add_argument(
"--device",
default="cpu",
help="device of model",
)
parser.add_argument(
"--debug",
default=False,
)
args = parser.parse_args()
log_format = "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s"
logging.basicConfig(
level=logging.DEBUG if args.debug else logging.INFO, format=log_format
)
if __name__ == "__main__":
mock_small = True
load_weights = False
logging.info("start connecting...")
s = init_tcp_b(args.ip, args.port)
|
parser = argparse.ArgumentParser()
parser.add_argument(
"weight_path",
default=None,
help="path to device model weight",
)
parser.add_argument(
"llama_path",
default=None,
help="root dir of huggingface llama model, should contain weight files and config",
)
parser.add_argument(
"--ip",
default="127.0.0.1",
help="socket ip of cloud",
)
parser.add_argument(
"--port",
default=12345,
help="socket port of cloud",
)
parser.add_argument(
"--device",
default="cpu",
help="device of model",
)
parser.add_argument(
"--debug",
default=False,
)
args = parser.parse_args()
log_format = "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s"
logging.basicConfig(
level=logging.DEBUG if args.debug else logging.INFO, format=log_format
)
if __name__ == "__main__":
mock_small = True
load_weights = False
logging.info("start connecting...")
s = init_tcp_b(args.ip, args.port) | config = PLLlamaConfig.from_pretrained(args.llama_path) | 0 | 2023-12-25 06:28:04+00:00 | 4k |
ohadmata/shmessy | src/shmessy/types_handler.py | [
{
"identifier": "Field",
"path": "src/shmessy/schema.py",
"snippet": "class Field(InferredField, BaseField):\n pass"
},
{
"identifier": "BaseType",
"path": "src/shmessy/types/base.py",
"snippet": "class BaseType(ABC):\n weight: int = 0\n validator_types: Tuple[ValidatorTypes]\n\... | import logging
import os
from importlib import import_module
from types import ModuleType
from typing import Any, Dict, List, Optional, Type
from numpy import ndarray
from numpy.dtypes import (
BoolDType,
DateTime64DType,
Float16DType,
Float32DType,
Float64DType,
Int8DType,
Int16DType,
Int32DType,
Int64DType,
IntDType,
ObjectDType,
StrDType,
)
from pandas import Series
from .schema import Field
from .types.base import BaseType
from .types.boolean import BooleanType
from .types.datetime_ import DatetimeType
from .types.float import FloatType
from .types.integer import IntegerType
from .types.string import StringType | 1,977 |
logger = logging.getLogger(__name__)
class TypesHandler:
PACKAGE_NAME: str = "shmessy"
TYPES_DIR: str = "types"
def __init__(self):
self.__types = self._discover_types()
|
logger = logging.getLogger(__name__)
class TypesHandler:
PACKAGE_NAME: str = "shmessy"
TYPES_DIR: str = "types"
def __init__(self):
self.__types = self._discover_types() | self.__types_as_dict: Dict[str, BaseType] = self._types_as_dict(self.__types) | 1 | 2023-12-27 20:15:01+00:00 | 4k |
kokiez/solana-sniper | main.py | [
{
"identifier": "getSymbol",
"path": "birdeye.py",
"snippet": "def getSymbol(token):\r\n # usdc and usdt\r\n exclude = ['EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v', 'Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB']\r\n \r\n if token not in exclude:\r\n url = f\"https://api.dexscreene... | import base58, logging,time, re, os,sys, json
import threading
from datetime import datetime
from solders.keypair import Keypair
from solana.rpc.api import Client
from solana.rpc.commitment import Commitment
from configparser import ConfigParser
from threading import Thread, Event
from birdeye import getSymbol
from telethon import TelegramClient, events, errors
from amm_selection import select_amm2trade
from webhook import sendWebhook
from loadkey import load_keypair_from_file
| 3,558 |
# Pakages for Telegram
# Other Methods created
# ------------------------ ------------------------ ------------------------
# INTIALIZING VARIABLES
# ------------------------ ------------------------ ------------------------
# to read content from config.ini
config = ConfigParser()
# using sys and os because sometimes this shitty config reader does not read from curr directory
config.read(os.path.join(sys.path[0], 'data', 'config.ini'))
# Configuring the logging
log_file = os.path.join('data', f"logs.txt")
logging.basicConfig(level=logging.WARNING, filename=log_file, format='%(asctime)s|%(name)s|%(levelname)s|%(message)s',datefmt='%d-%b-%y %I:%M:%S %p')
def custom_exception_handler(exc_type, exc_value, exc_traceback):
# Log the exception automatically
logging.exception("An unhandled exception occurred: %s", str(exc_value))
sys.excepthook = custom_exception_handler
# Telegram settings
senderUserNames_to_monitor = config.get("TELEGRAM", "senderUserNames")
senderUserNames = senderUserNames_to_monitor.split(',')
session_name = config.get("TELEGRAM", "session_name")
api_id = config.getint("TELEGRAM", "API_ID")
api_hash = config.get("TELEGRAM", "API_HASH")
discord_msg_pattern = r'https?://birdeye\.so/token/(\w+)\?chain=solana'
CA_pattern = r'[1-9A-HJ-NP-Za-km-z]{32,44}'
# Infura settings - register at infura and get your mainnet url.
RPC_HTTPS_URL = config.get("INFURA_URL", "infuraURL")
# Wallets private key
private_key = config.get("WALLET", "private_key")
# Check if private key is in the form of ./something.json
if re.match(r'\w+\.json', private_key):
# Private key is in the form of ./something.json
|
# Pakages for Telegram
# Other Methods created
# ------------------------ ------------------------ ------------------------
# INTIALIZING VARIABLES
# ------------------------ ------------------------ ------------------------
# to read content from config.ini
config = ConfigParser()
# using sys and os because sometimes this shitty config reader does not read from curr directory
config.read(os.path.join(sys.path[0], 'data', 'config.ini'))
# Configuring the logging
log_file = os.path.join('data', f"logs.txt")
logging.basicConfig(level=logging.WARNING, filename=log_file, format='%(asctime)s|%(name)s|%(levelname)s|%(message)s',datefmt='%d-%b-%y %I:%M:%S %p')
def custom_exception_handler(exc_type, exc_value, exc_traceback):
# Log the exception automatically
logging.exception("An unhandled exception occurred: %s", str(exc_value))
sys.excepthook = custom_exception_handler
# Telegram settings
senderUserNames_to_monitor = config.get("TELEGRAM", "senderUserNames")
senderUserNames = senderUserNames_to_monitor.split(',')
session_name = config.get("TELEGRAM", "session_name")
api_id = config.getint("TELEGRAM", "API_ID")
api_hash = config.get("TELEGRAM", "API_HASH")
discord_msg_pattern = r'https?://birdeye\.so/token/(\w+)\?chain=solana'
CA_pattern = r'[1-9A-HJ-NP-Za-km-z]{32,44}'
# Infura settings - register at infura and get your mainnet url.
RPC_HTTPS_URL = config.get("INFURA_URL", "infuraURL")
# Wallets private key
private_key = config.get("WALLET", "private_key")
# Check if private key is in the form of ./something.json
if re.match(r'\w+\.json', private_key):
# Private key is in the form of ./something.json
| payer = load_keypair_from_file(private_key)
| 3 | 2023-12-26 11:40:05+00:00 | 4k |
CrawlScript/Torch-MGDCF | main_light_gcn.py | [
{
"identifier": "compute_bpr_loss",
"path": "torch_mgdcf/losses.py",
"snippet": "def compute_bpr_loss(a_embeddings, b_embeddings, pos_edges, reduction='mean'):\n \"\"\"\n bpr is a special case of info_bpr, where num_negs=1\n \"\"\"\n return compute_info_bpr_loss(a_embeddings, b_embeddings, p... | import os
import argparse
import torch
import torch.nn.functional as F
import numpy as np
import time
from torch_mgdcf.losses import compute_bpr_loss, compute_l2_loss
from torch_mgdcf.utils import create_tensor_dataloader
from torch_mgdcf.datasets import load_dataset
from torch_mgdcf.layers.light_gcn import LightGCN
from tqdm import tqdm
from torch_mgdcf.evaluation.ranking import evaluate_mean_global_metrics | 2,425 |
# set gpu id
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
np.set_printoptions(precision=4)
parser = argparse.ArgumentParser(description='Argument parser for the program.')
parser.add_argument('--dataset', type=str, default='light_gcn_yelp', help='Dataset Name')
parser.add_argument('--embedding_size', type=int, default=64, help='Embedding size')
parser.add_argument('--lr', type=float, default=1e-2, help='Learning rate')
parser.add_argument('--l2_coef', type=float, default=1e-4, help='L2 regularization coefficient')
parser.add_argument('--lr_decay', type=float, default=0.995, help='Learning rate decay')
parser.add_argument('--k', type=int, default=4, help='Number of layers')
parser.add_argument('--edge_drop_rate', type=float, default=0.15, help='Edge drop rate')
parser.add_argument('--batch_size', type=int, default=8000, help='Batch size')
parser.add_argument('--num_epochs', type=int, default=3000, help='Number of epochs')
args = parser.parse_args()
print(args)
dataset_name = args.dataset
embedding_size = args.embedding_size
lr = args.lr
l2_coef = args.l2_coef
lr_decay = args.lr_decay
k = args.k
edge_drop_rate = args.edge_drop_rate
batch_size = args.batch_size
num_epochs = args.num_epochs
device = "cuda"
num_users, num_items, user_item_edges, train_index, test_index, train_user_items_dict, test_user_items_dict = load_dataset(dataset_name)
train_user_item_edges = user_item_edges[train_index]
g = LightGCN.build_homo_graph(train_user_item_edges, num_users=num_users, num_items=num_items).to(device)
num_nodes = g.num_nodes()
embeddings = np.random.randn(num_nodes, embedding_size) / np.sqrt(embedding_size)
embeddings = torch.tensor(embeddings, dtype=torch.float32, requires_grad=True, device=device)
model = LightGCN(k=k, edge_drop_rate=edge_drop_rate).to(device)
def forward():
virtual_h = model(g, embeddings)
user_h = virtual_h[:num_users]
item_h = virtual_h[num_users:]
return user_h, item_h
def evaluate():
model.eval()
user_h, item_h = forward()
user_h = user_h.detach().cpu().numpy()
item_h = item_h.detach().cpu().numpy()
|
# set gpu id
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
np.set_printoptions(precision=4)
parser = argparse.ArgumentParser(description='Argument parser for the program.')
parser.add_argument('--dataset', type=str, default='light_gcn_yelp', help='Dataset Name')
parser.add_argument('--embedding_size', type=int, default=64, help='Embedding size')
parser.add_argument('--lr', type=float, default=1e-2, help='Learning rate')
parser.add_argument('--l2_coef', type=float, default=1e-4, help='L2 regularization coefficient')
parser.add_argument('--lr_decay', type=float, default=0.995, help='Learning rate decay')
parser.add_argument('--k', type=int, default=4, help='Number of layers')
parser.add_argument('--edge_drop_rate', type=float, default=0.15, help='Edge drop rate')
parser.add_argument('--batch_size', type=int, default=8000, help='Batch size')
parser.add_argument('--num_epochs', type=int, default=3000, help='Number of epochs')
args = parser.parse_args()
print(args)
dataset_name = args.dataset
embedding_size = args.embedding_size
lr = args.lr
l2_coef = args.l2_coef
lr_decay = args.lr_decay
k = args.k
edge_drop_rate = args.edge_drop_rate
batch_size = args.batch_size
num_epochs = args.num_epochs
device = "cuda"
num_users, num_items, user_item_edges, train_index, test_index, train_user_items_dict, test_user_items_dict = load_dataset(dataset_name)
train_user_item_edges = user_item_edges[train_index]
g = LightGCN.build_homo_graph(train_user_item_edges, num_users=num_users, num_items=num_items).to(device)
num_nodes = g.num_nodes()
embeddings = np.random.randn(num_nodes, embedding_size) / np.sqrt(embedding_size)
embeddings = torch.tensor(embeddings, dtype=torch.float32, requires_grad=True, device=device)
model = LightGCN(k=k, edge_drop_rate=edge_drop_rate).to(device)
def forward():
virtual_h = model(g, embeddings)
user_h = virtual_h[:num_users]
item_h = virtual_h[num_users:]
return user_h, item_h
def evaluate():
model.eval()
user_h, item_h = forward()
user_h = user_h.detach().cpu().numpy()
item_h = item_h.detach().cpu().numpy()
| mean_results_dict = evaluate_mean_global_metrics(test_user_items_dict, train_user_items_dict, | 5 | 2023-12-26 10:26:50+00:00 | 4k |
kraina-ai/quackosm | quackosm/pbf_file_reader.py | [
{
"identifier": "FEATURES_INDEX",
"path": "quackosm/_constants.py",
"snippet": "FEATURES_INDEX = \"feature_id\""
},
{
"identifier": "GEOMETRY_COLUMN",
"path": "quackosm/_constants.py",
"snippet": "GEOMETRY_COLUMN = \"geometry\""
},
{
"identifier": "WGS84_CRS",
"path": "quacko... | import hashlib
import json
import shutil
import tempfile
import warnings
import duckdb
import geoarrow.pyarrow as ga
import geopandas as gpd
import psutil
import pyarrow as pa
import pyarrow.parquet as pq
import shapely.wkt as wktlib
import quackosm._geo_arrow_io as io
from collections.abc import Iterable
from math import floor
from pathlib import Path
from typing import Any, Literal, NamedTuple, Optional, Union, cast
from shapely.geometry.base import BaseGeometry
from quackosm._constants import FEATURES_INDEX, GEOMETRY_COLUMN, WGS84_CRS
from quackosm._osm_tags_filters import GroupedOsmTagsFilter, OsmTagsFilter, merge_osm_tags_filter
from quackosm._osm_way_polygon_features import OsmWayPolygonConfig, parse_dict_to_config_object
from quackosm._rich_progress import ( # type: ignore[attr-defined]
TaskProgressBar,
TaskProgressSpinner,
)
from quackosm._typing import is_expected_type | 2,576 | """
PBF File Reader.
This module contains a reader capable of parsing a PBF file into a GeoDataFrame.
"""
__all__ = [
"PbfFileReader",
]
class PbfFileReader:
"""
PbfFileReader.
PBF(Protocolbuffer Binary Format)[1] file reader is a dedicated `*.osm.pbf` files reader
class based on DuckDB[2] and its spatial extension[3].
Handler can filter out OSM features based on tags filter and geometry filter
to limit the result.
References:
1. https://wiki.openstreetmap.org/wiki/PBF_Format
2. https://duckdb.org/
3. https://github.com/duckdb/duckdb_spatial
"""
class ConvertedOSMParquetFiles(NamedTuple):
"""List of parquet files read from the `*.osm.pbf` file."""
nodes_valid_with_tags: "duckdb.DuckDBPyRelation"
nodes_filtered_ids: "duckdb.DuckDBPyRelation"
ways_all_with_tags: "duckdb.DuckDBPyRelation"
ways_with_unnested_nodes_refs: "duckdb.DuckDBPyRelation"
ways_required_ids: "duckdb.DuckDBPyRelation"
ways_filtered_ids: "duckdb.DuckDBPyRelation"
relations_all_with_tags: "duckdb.DuckDBPyRelation"
relations_with_unnested_way_refs: "duckdb.DuckDBPyRelation"
relations_filtered_ids: "duckdb.DuckDBPyRelation"
class ParsedOSMFeatures(NamedTuple):
"""Final list of parsed features from the `*.osm.pbf` file."""
nodes: "duckdb.DuckDBPyRelation"
ways: "duckdb.DuckDBPyRelation"
relations: "duckdb.DuckDBPyRelation"
def __init__(
self,
tags_filter: Optional[Union[OsmTagsFilter, GroupedOsmTagsFilter]] = None,
geometry_filter: Optional[BaseGeometry] = None,
working_directory: Union[str, Path] = "files",
osm_way_polygon_features_config: Optional[
Union[OsmWayPolygonConfig, dict[str, Any]]
] = None,
) -> None:
"""
Initialize PbfFileReader.
Args:
tags_filter (Union[OsmTagsFilter, GroupedOsmTagsFilter], optional): A dictionary
specifying which tags to download.
The keys should be OSM tags (e.g. `building`, `amenity`).
The values should either be `True` for retrieving all objects with the tag,
string for retrieving a single tag-value pair
or list of strings for retrieving all values specified in the list.
`tags={'leisure': 'park}` would return parks from the area.
`tags={'leisure': 'park, 'amenity': True, 'shop': ['bakery', 'bicycle']}`
would return parks, all amenity types, bakeries and bicycle shops.
If `None`, handler will allow all of the tags to be parsed. Defaults to `None`.
geometry_filter (BaseGeometry, optional): Region which can be used to filter only
intersecting OSM objects. Defaults to `None`.
working_directory (Union[str, Path], optional): Directory where to save
the parsed `*.parquet` files. Defaults to "files".
osm_way_polygon_features_config (Union[OsmWayPolygonConfig, dict[str, Any]], optional):
Config used to determine which closed way features are polygons.
Modifications to this config left are left for experienced OSM users.
Defaults to predefined "osm_way_polygon_features.json".
"""
self.tags_filter = tags_filter
| """
PBF File Reader.
This module contains a reader capable of parsing a PBF file into a GeoDataFrame.
"""
__all__ = [
"PbfFileReader",
]
class PbfFileReader:
"""
PbfFileReader.
PBF(Protocolbuffer Binary Format)[1] file reader is a dedicated `*.osm.pbf` files reader
class based on DuckDB[2] and its spatial extension[3].
Handler can filter out OSM features based on tags filter and geometry filter
to limit the result.
References:
1. https://wiki.openstreetmap.org/wiki/PBF_Format
2. https://duckdb.org/
3. https://github.com/duckdb/duckdb_spatial
"""
class ConvertedOSMParquetFiles(NamedTuple):
"""List of parquet files read from the `*.osm.pbf` file."""
nodes_valid_with_tags: "duckdb.DuckDBPyRelation"
nodes_filtered_ids: "duckdb.DuckDBPyRelation"
ways_all_with_tags: "duckdb.DuckDBPyRelation"
ways_with_unnested_nodes_refs: "duckdb.DuckDBPyRelation"
ways_required_ids: "duckdb.DuckDBPyRelation"
ways_filtered_ids: "duckdb.DuckDBPyRelation"
relations_all_with_tags: "duckdb.DuckDBPyRelation"
relations_with_unnested_way_refs: "duckdb.DuckDBPyRelation"
relations_filtered_ids: "duckdb.DuckDBPyRelation"
class ParsedOSMFeatures(NamedTuple):
"""Final list of parsed features from the `*.osm.pbf` file."""
nodes: "duckdb.DuckDBPyRelation"
ways: "duckdb.DuckDBPyRelation"
relations: "duckdb.DuckDBPyRelation"
def __init__(
self,
tags_filter: Optional[Union[OsmTagsFilter, GroupedOsmTagsFilter]] = None,
geometry_filter: Optional[BaseGeometry] = None,
working_directory: Union[str, Path] = "files",
osm_way_polygon_features_config: Optional[
Union[OsmWayPolygonConfig, dict[str, Any]]
] = None,
) -> None:
"""
Initialize PbfFileReader.
Args:
tags_filter (Union[OsmTagsFilter, GroupedOsmTagsFilter], optional): A dictionary
specifying which tags to download.
The keys should be OSM tags (e.g. `building`, `amenity`).
The values should either be `True` for retrieving all objects with the tag,
string for retrieving a single tag-value pair
or list of strings for retrieving all values specified in the list.
`tags={'leisure': 'park}` would return parks from the area.
`tags={'leisure': 'park, 'amenity': True, 'shop': ['bakery', 'bicycle']}`
would return parks, all amenity types, bakeries and bicycle shops.
If `None`, handler will allow all of the tags to be parsed. Defaults to `None`.
geometry_filter (BaseGeometry, optional): Region which can be used to filter only
intersecting OSM objects. Defaults to `None`.
working_directory (Union[str, Path], optional): Directory where to save
the parsed `*.parquet` files. Defaults to "files".
osm_way_polygon_features_config (Union[OsmWayPolygonConfig, dict[str, Any]], optional):
Config used to determine which closed way features are polygons.
Modifications to this config left are left for experienced OSM users.
Defaults to predefined "osm_way_polygon_features.json".
"""
self.tags_filter = tags_filter | self.merged_tags_filter = merge_osm_tags_filter(tags_filter) if tags_filter else None | 3 | 2023-12-28 11:26:41+00:00 | 4k |
KyanChen/TTP | tools/analysis_tools/visualization_cam.py | [
{
"identifier": "inference_model",
"path": "mmseg/apis/inference.py",
"snippet": "def inference_model(model: BaseSegmentor,\n img: ImageType) -> Union[SegDataSample, SampleList]:\n \"\"\"Inference image(s) with the segmentor.\n\n Args:\n model (nn.Module): The loaded segm... | from argparse import ArgumentParser
from mmengine import Config
from mmengine.model import revert_sync_batchnorm
from PIL import Image
from pytorch_grad_cam import GradCAM
from pytorch_grad_cam.utils.image import preprocess_image, show_cam_on_image
from mmseg.apis import inference_model, init_model, show_result_pyplot
from mmseg.utils import register_all_modules
import numpy as np
import torch
import torch.nn.functional as F | 2,608 | # Copyright (c) OpenMMLab. All rights reserved.
"""Use the pytorch-grad-cam tool to visualize Class Activation Maps (CAM).
requirement: pip install grad-cam
"""
class SemanticSegmentationTarget:
"""wrap the model.
requirement: pip install grad-cam
Args:
category (int): Visualization class.
mask (ndarray): Mask of class.
size (tuple): Image size.
"""
def __init__(self, category, mask, size):
self.category = category
self.mask = torch.from_numpy(mask)
self.size = size
if torch.cuda.is_available():
self.mask = self.mask.cuda()
def __call__(self, model_output):
model_output = torch.unsqueeze(model_output, dim=0)
model_output = F.interpolate(
model_output, size=self.size, mode='bilinear')
model_output = torch.squeeze(model_output, dim=0)
return (model_output[self.category, :, :] * self.mask).sum()
def main():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--out-file',
default='prediction.png',
help='Path to output prediction file')
parser.add_argument(
'--cam-file', default='vis_cam.png', help='Path to output cam file')
parser.add_argument(
'--target-layers',
default='backbone.layer4[2]',
help='Target layers to visualize CAM')
parser.add_argument(
'--category-index', default='7', help='Category to visualize CAM')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
args = parser.parse_args()
# build the model from a config file and a checkpoint file
register_all_modules()
model = init_model(args.config, args.checkpoint, device=args.device)
if args.device == 'cpu':
model = revert_sync_batchnorm(model)
# test a single image
result = inference_model(model, args.img)
# show the results
| # Copyright (c) OpenMMLab. All rights reserved.
"""Use the pytorch-grad-cam tool to visualize Class Activation Maps (CAM).
requirement: pip install grad-cam
"""
class SemanticSegmentationTarget:
"""wrap the model.
requirement: pip install grad-cam
Args:
category (int): Visualization class.
mask (ndarray): Mask of class.
size (tuple): Image size.
"""
def __init__(self, category, mask, size):
self.category = category
self.mask = torch.from_numpy(mask)
self.size = size
if torch.cuda.is_available():
self.mask = self.mask.cuda()
def __call__(self, model_output):
model_output = torch.unsqueeze(model_output, dim=0)
model_output = F.interpolate(
model_output, size=self.size, mode='bilinear')
model_output = torch.squeeze(model_output, dim=0)
return (model_output[self.category, :, :] * self.mask).sum()
def main():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--out-file',
default='prediction.png',
help='Path to output prediction file')
parser.add_argument(
'--cam-file', default='vis_cam.png', help='Path to output cam file')
parser.add_argument(
'--target-layers',
default='backbone.layer4[2]',
help='Target layers to visualize CAM')
parser.add_argument(
'--category-index', default='7', help='Category to visualize CAM')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
args = parser.parse_args()
# build the model from a config file and a checkpoint file
register_all_modules()
model = init_model(args.config, args.checkpoint, device=args.device)
if args.device == 'cpu':
model = revert_sync_batchnorm(model)
# test a single image
result = inference_model(model, args.img)
# show the results | show_result_pyplot( | 2 | 2023-12-23 08:36:47+00:00 | 4k |
N0rz3/Phunter | lib/cli.py | [
{
"identifier": "lookup",
"path": "lib/lookup.py",
"snippet": "async def lookup(phone_number):\r\n print()\r\n parsed = phonenumbers.parse(phone_number)\r\n\r\n operator = carrier.name_for_number(parsed, \"fr\")\r\n line = phonenumbers.number_type(parsed)\r\n\r\n if line == phonenumbers.P... | import argparse
import time
from .lookup import lookup
from .account import Amazon
from .annuaire import Annuaire
from .text import *
from .verify import *
| 2,432 |
async def parser():
parse = argparse.ArgumentParser()
parse.add_argument(
'-t', '--target',
nargs='?',
type=str,
default=None,
help='get info by phone number'
)
parse.add_argument(
'-a', '--amazon',
nargs='?',
type=str,
default=None,
help='get confirmation whether Amazon linked by phone number'
)
parse.add_argument(
'-p', '--person',
nargs='?',
type=str,
default=None,
help='get owner of phone number with inversed annual (Page Blanche)'
)
parse.add_argument(
'-f', '--file',
nargs='?',
type=str,
default=None,
help='get info by a file containing phone numbers'
)
parse.add_argument(
'-v', '--verify',
action='store_true',
help='check your version, update(s), services...'
)
parse.add_argument(
'-o', '--output',
nargs='?',
type=str,
default=None,
help='give a file to save the output (only with args: --amazon/-a , --person/-p)'
)
args = parse.parse_args()
if args.file:
start = time.time()
with open(args.file, 'r') as file:
if args.file.endswith(".txt"):
nums = file.read().split('\n')
checked = 0
for num in nums:
|
async def parser():
parse = argparse.ArgumentParser()
parse.add_argument(
'-t', '--target',
nargs='?',
type=str,
default=None,
help='get info by phone number'
)
parse.add_argument(
'-a', '--amazon',
nargs='?',
type=str,
default=None,
help='get confirmation whether Amazon linked by phone number'
)
parse.add_argument(
'-p', '--person',
nargs='?',
type=str,
default=None,
help='get owner of phone number with inversed annual (Page Blanche)'
)
parse.add_argument(
'-f', '--file',
nargs='?',
type=str,
default=None,
help='get info by a file containing phone numbers'
)
parse.add_argument(
'-v', '--verify',
action='store_true',
help='check your version, update(s), services...'
)
parse.add_argument(
'-o', '--output',
nargs='?',
type=str,
default=None,
help='give a file to save the output (only with args: --amazon/-a , --person/-p)'
)
args = parse.parse_args()
if args.file:
start = time.time()
with open(args.file, 'r') as file:
if args.file.endswith(".txt"):
nums = file.read().split('\n')
checked = 0
for num in nums:
| await lookup(num)
| 0 | 2023-12-30 13:21:14+00:00 | 4k |
vpetersson/podcast-rss-generator | tests/test_rss_generator.py | [
{
"identifier": "convert_iso_to_rfc2822",
"path": "rss_generator.py",
"snippet": "def convert_iso_to_rfc2822(iso_date):\n date_obj = datetime.fromisoformat(iso_date)\n return format_datetime(date_obj)"
},
{
"identifier": "generate_rss",
"path": "rss_generator.py",
"snippet": "def g... | import os
import unittest
from xml.etree import ElementTree as ET
from rss_generator import (convert_iso_to_rfc2822, generate_rss, get_file_info,
read_podcast_config) | 1,682 |
CONFIG_FILE = "podcast_config.example.yaml"
class TestRSSGenerator(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Read the configuration and generate the RSS feed once for all tests
|
CONFIG_FILE = "podcast_config.example.yaml"
class TestRSSGenerator(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Read the configuration and generate the RSS feed once for all tests | cls.config = read_podcast_config(CONFIG_FILE) | 3 | 2023-12-23 09:47:39+00:00 | 4k |
SkierProjects/MultiLabelImageClassificationPytorch | src/utils/dataset/image_dataset.py | [
{
"identifier": "config",
"path": "src/config.py",
"snippet": "class config:\n \"\"\"\n Configuration class for holding model and training parameters.\n \"\"\"\n\n # Default static property values\n model_name = 'regnet_y_16gf'\n model_requires_grad = True\n num_classes = 31\n mo... | import torch
import hashlib
import cv2
import numpy as np
import torchvision.transforms as transforms
import pandas as pd
from torch.utils.data import Dataset
from sklearn.model_selection import train_test_split
from src.config import config
from src.utils.logging.loggerfactory import LoggerFactory | 2,946 | logger = LoggerFactory.get_logger(f"logger.{__name__}")
class ImageDataset(Dataset):
"""
A dataset class for loading and transforming images for model training and evaluation.
"""
| logger = LoggerFactory.get_logger(f"logger.{__name__}")
class ImageDataset(Dataset):
"""
A dataset class for loading and transforming images for model training and evaluation.
"""
| def __init__(self, csv, mode, random_state=42, config=config): | 0 | 2023-12-25 18:45:52+00:00 | 4k |
the-seeds/imitater | src/imitater/service/app.py | [
{
"identifier": "ChatModel",
"path": "src/imitater/model/chat_model.py",
"snippet": "class ChatModel:\n def __init__(self) -> None:\n if int(os.environ.get(\"ENABLE_ATTN_BIAS\")):\n llama_attn_bias_monkey_patch()\n\n engine_args = AsyncEngineArgs(model=os.environ.get(\"CHAT_M... | import os
import uuid
import uvicorn
from contextlib import asynccontextmanager
from typing import Any, Dict
from fastapi import FastAPI, status
from fastapi.middleware.cors import CORSMiddleware
from sse_starlette import EventSourceResponse
from ..model.chat_model import ChatModel
from ..model.embed_model import EmbedModel
from ..utils.generic import dictify, jsonify, torch_gc
from .protocol import (
ChatCompletionRequest,
ChatCompletionResponse,
ChatCompletionResponseChoice,
ChatCompletionStreamResponse,
ChatCompletionStreamResponseChoice,
ChatMessage,
DeltaMessage,
Embeddings,
EmbeddingsRequest,
EmbeddingsResponse,
Finish,
ModelCard,
ModelList,
Role,
UsageInfo,
) | 2,869 |
@asynccontextmanager
async def lifespan(app: "FastAPI") -> None:
yield
torch_gc()
def launch_app() -> None:
app = FastAPI(lifespan=lifespan)
chat_model = ChatModel()
embed_model = EmbedModel()
app.add_middleware(
CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"]
)
@app.get("/v1/models", response_model=ModelList)
async def list_models():
model_card = ModelCard(id="gpt-3.5-turbo")
return ModelList(data=[model_card])
@app.post("/v1/embeddings", response_model=EmbeddingsResponse, status_code=status.HTTP_200_OK)
async def create_embeddings(request: EmbeddingsRequest):
texts = request.input
if isinstance(texts, str):
texts = [texts]
embed_output = await embed_model(texts)
embeddings = []
for i in range(len(embed_output)):
embeddings.append(Embeddings(embedding=embed_output[i], index=i))
return EmbeddingsResponse(
data=embeddings,
model=request.model,
usage=UsageInfo(prompt_tokens=0, completion_tokens=None, total_tokens=0),
)
@app.post("/v1/chat/completions", response_model=ChatCompletionResponse, status_code=status.HTTP_200_OK)
async def create_chat_completion(request: ChatCompletionRequest):
input_kwargs = {
"messages": [dictify(message) for message in request.messages],
"request_id": "chatcmpl-{}".format(uuid.uuid4().hex),
"temperature": request.temperature,
"top_p": request.top_p,
"max_tokens": request.max_tokens,
}
if request.stream:
generator = create_stream_chat_completion(request, input_kwargs)
return EventSourceResponse(generator, media_type="text/event-stream")
response = await chat_model.chat(**input_kwargs)
choice = ChatCompletionResponseChoice(
index=0, message=ChatMessage(role=Role.ASSISTANT, content=response), finish_reason=Finish.STOP
)
return ChatCompletionResponse(
id=input_kwargs["request_id"],
model=request.model,
choices=[choice],
usage=UsageInfo(prompt_tokens=0, completion_tokens=0, total_tokens=0),
)
async def create_stream_chat_completion(request: ChatCompletionRequest, input_kwargs: Dict[str, Any]):
choice = ChatCompletionStreamResponseChoice(
index=0, delta=DeltaMessage(role=Role.ASSISTANT, content=""), finish_reason=None
)
chunk = ChatCompletionStreamResponse(id=input_kwargs["request_id"], model=request.model, choices=[choice])
|
@asynccontextmanager
async def lifespan(app: "FastAPI") -> None:
yield
torch_gc()
def launch_app() -> None:
app = FastAPI(lifespan=lifespan)
chat_model = ChatModel()
embed_model = EmbedModel()
app.add_middleware(
CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"]
)
@app.get("/v1/models", response_model=ModelList)
async def list_models():
model_card = ModelCard(id="gpt-3.5-turbo")
return ModelList(data=[model_card])
@app.post("/v1/embeddings", response_model=EmbeddingsResponse, status_code=status.HTTP_200_OK)
async def create_embeddings(request: EmbeddingsRequest):
texts = request.input
if isinstance(texts, str):
texts = [texts]
embed_output = await embed_model(texts)
embeddings = []
for i in range(len(embed_output)):
embeddings.append(Embeddings(embedding=embed_output[i], index=i))
return EmbeddingsResponse(
data=embeddings,
model=request.model,
usage=UsageInfo(prompt_tokens=0, completion_tokens=None, total_tokens=0),
)
@app.post("/v1/chat/completions", response_model=ChatCompletionResponse, status_code=status.HTTP_200_OK)
async def create_chat_completion(request: ChatCompletionRequest):
input_kwargs = {
"messages": [dictify(message) for message in request.messages],
"request_id": "chatcmpl-{}".format(uuid.uuid4().hex),
"temperature": request.temperature,
"top_p": request.top_p,
"max_tokens": request.max_tokens,
}
if request.stream:
generator = create_stream_chat_completion(request, input_kwargs)
return EventSourceResponse(generator, media_type="text/event-stream")
response = await chat_model.chat(**input_kwargs)
choice = ChatCompletionResponseChoice(
index=0, message=ChatMessage(role=Role.ASSISTANT, content=response), finish_reason=Finish.STOP
)
return ChatCompletionResponse(
id=input_kwargs["request_id"],
model=request.model,
choices=[choice],
usage=UsageInfo(prompt_tokens=0, completion_tokens=0, total_tokens=0),
)
async def create_stream_chat_completion(request: ChatCompletionRequest, input_kwargs: Dict[str, Any]):
choice = ChatCompletionStreamResponseChoice(
index=0, delta=DeltaMessage(role=Role.ASSISTANT, content=""), finish_reason=None
)
chunk = ChatCompletionStreamResponse(id=input_kwargs["request_id"], model=request.model, choices=[choice]) | yield jsonify(chunk) | 3 | 2023-12-31 07:21:06+00:00 | 4k |
dan-r/HomeAssistant-Ohme | custom_components/ohme/api_client.py | [
{
"identifier": "DOMAIN",
"path": "custom_components/ohme/const.py",
"snippet": "DOMAIN = \"ohme\""
},
{
"identifier": "USER_AGENT",
"path": "custom_components/ohme/const.py",
"snippet": "USER_AGENT = \"dan-r-homeassistant-ohme\""
},
{
"identifier": "INTEGRATION_VERSION",
"pa... | import aiohttp
import logging
import json
from time import time
from datetime import datetime, timedelta
from homeassistant.helpers.entity import DeviceInfo
from .const import DOMAIN, USER_AGENT, INTEGRATION_VERSION
from .utils import time_next_occurs | 2,428 |
def is_capable(self, capability):
"""Return whether or not this model has a given capability."""
return bool(self._capabilities[capability])
def get_device_info(self):
return self._device_info
def get_unique_id(self, name):
return f"ohme_{self._serial}_{name}"
# Push methods
async def async_pause_charge(self):
"""Pause an ongoing charge"""
result = await self._post_request(f"/v1/chargeSessions/{self._serial}/stop", skip_json=True)
return bool(result)
async def async_resume_charge(self):
"""Resume a paused charge"""
result = await self._post_request(f"/v1/chargeSessions/{self._serial}/resume", skip_json=True)
return bool(result)
async def async_approve_charge(self):
"""Approve a charge"""
result = await self._put_request(f"/v1/chargeSessions/{self._serial}/approve?approve=true")
return bool(result)
async def async_max_charge(self):
"""Enable max charge"""
result = await self._put_request(f"/v1/chargeSessions/{self._serial}/rule?maxCharge=true")
return bool(result)
async def async_apply_session_rule(self, max_price=None, target_time=None, target_percent=None, pre_condition=None, pre_condition_length=None):
"""Apply rule to ongoing charge/stop max charge."""
# Check every property. If we've provided it, use that. If not, use the existing.
if max_price is None:
max_price = self._last_rule['settings'][0]['enabled'] if 'settings' in self._last_rule and len(
self._last_rule['settings']) > 1 else False
if target_percent is None:
target_percent = self._last_rule['targetPercent'] if 'targetPercent' in self._last_rule else 80
if pre_condition is None:
pre_condition = self._last_rule['preconditioningEnabled'] if 'preconditioningEnabled' in self._last_rule else False
if pre_condition_length is None:
pre_condition_length = self._last_rule[
'preconditionLengthMins'] if 'preconditionLengthMins' in self._last_rule else 30
if target_time is None:
# Default to 9am
target_time = self._last_rule['targetTime'] if 'targetTime' in self._last_rule else 32400
target_time = (target_time // 3600,
(target_time % 3600) // 60)
target_ts = int(time_next_occurs(
target_time[0], target_time[1]).timestamp() * 1000)
# Convert these to string form
max_price = 'true' if max_price else 'false'
pre_condition = 'true' if pre_condition else 'false'
result = await self._put_request(f"/v1/chargeSessions/{self._serial}/rule?enableMaxPrice={max_price}&targetTs={target_ts}&enablePreconditioning={pre_condition}&toPercent={target_percent}&preconditionLengthMins={pre_condition_length}")
return bool(result)
async def async_get_schedule(self):
"""Get the first schedule."""
schedules = await self._get_request("/v1/chargeRules")
return schedules[0] if len(schedules) > 0 else None
async def async_update_schedule(self, target_percent=None, target_time=None):
"""Update the first listed schedule."""
rule = await self.async_get_schedule()
# Account for user having no rules
if not rule:
return None
# Update percent and time if provided
if target_percent is not None:
rule['targetPercent'] = target_percent
if target_time is not None:
rule['targetTime'] = (target_time[0] * 3600) + (target_time[1] * 60)
await self._put_request(f"/v1/chargeRules/{rule['id']}", data=rule)
return True
async def async_set_configuration_value(self, values):
"""Set a configuration value or values."""
result = await self._put_request(f"/v1/chargeDevices/{self._serial}/appSettings", data=values)
return bool(result)
# Pull methods
async def async_get_charge_sessions(self, is_retry=False):
"""Try to fetch charge sessions endpoint.
If we get a non 200 response, refresh auth token and try again"""
resp = await self._get_request('/v1/chargeSessions')
resp = resp[0]
# Cache the current rule if we are given it
if resp["mode"] == "SMART_CHARGE" and 'appliedRule' in resp:
self._last_rule = resp["appliedRule"]
return resp
async def async_get_account_info(self):
resp = await self._get_request('/v1/users/me/account')
return resp
async def async_update_device_info(self, is_retry=False):
"""Update _device_info with our charger model."""
resp = await self.async_get_account_info()
device = resp['chargeDevices'][0]
info = DeviceInfo(
|
_LOGGER = logging.getLogger(__name__)
GOOGLE_API_KEY = "AIzaSyC8ZeZngm33tpOXLpbXeKfwtyZ1WrkbdBY"
class OhmeApiClient:
"""API client for Ohme EV chargers."""
def __init__(self, email, password):
if email is None or password is None:
raise Exception("Credentials not provided")
# Credentials from configuration
self._email = email
self._password = password
# Charger and its capabilities
self._device_info = None
self._capabilities = {}
self._ct_connected = False
# Authentication
self._token_birth = 0
self._token = None
self._refresh_token = None
# User info
self._user_id = ""
self._serial = ""
# Cache the last rule to use when we disable max charge or change schedule
self._last_rule = {}
# Sessions
self._session = aiohttp.ClientSession(
base_url="https://api.ohme.io")
self._auth_session = aiohttp.ClientSession()
# Auth methods
async def async_create_session(self):
"""Refresh the user auth token from the stored credentials."""
async with self._auth_session.post(
f"https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyPassword?key={GOOGLE_API_KEY}",
data={"email": self._email, "password": self._password,
"returnSecureToken": True}
) as resp:
if resp.status != 200:
return None
resp_json = await resp.json()
self._token_birth = time()
self._token = resp_json['idToken']
self._refresh_token = resp_json['refreshToken']
return True
async def async_refresh_session(self):
"""Refresh auth token if needed."""
if self._token is None:
return await self.async_create_session()
# Don't refresh token unless its over 45 mins old
if time() - self._token_birth < 2700:
return
async with self._auth_session.post(
f"https://securetoken.googleapis.com/v1/token?key={GOOGLE_API_KEY}",
data={"grantType": "refresh_token",
"refreshToken": self._refresh_token}
) as resp:
if resp.status != 200:
text = await resp.text()
msg = f"Ohme auth refresh error: {text}"
_LOGGER.error(msg)
raise AuthException(msg)
resp_json = await resp.json()
self._token_birth = time()
self._token = resp_json['id_token']
self._refresh_token = resp_json['refresh_token']
return True
# Internal methods
def _last_second_of_month_timestamp(self):
"""Get the last second of this month."""
dt = datetime.today()
dt = dt.replace(day=1) + timedelta(days=32)
dt = dt.replace(day=1, hour=0, minute=0, second=0,
microsecond=0) - timedelta(seconds=1)
return int(dt.timestamp()*1e3)
async def _handle_api_error(self, url, resp):
"""Raise an exception if API response failed."""
if resp.status != 200:
text = await resp.text()
msg = f"Ohme API response error: {url}, {resp.status}; {text}"
_LOGGER.error(msg)
raise ApiException(msg)
def _get_headers(self):
"""Get auth and content-type headers"""
return {
"Authorization": "Firebase %s" % self._token,
"Content-Type": "application/json",
"User-Agent": f"{USER_AGENT}/{INTEGRATION_VERSION}"
}
async def _post_request(self, url, skip_json=False, data=None):
"""Make a POST request."""
await self.async_refresh_session()
async with self._session.post(
url,
data=data,
headers=self._get_headers()
) as resp:
_LOGGER.debug(f"POST request to {url}, status code {resp.status}")
await self._handle_api_error(url, resp)
if skip_json:
return await resp.text()
return await resp.json()
async def _put_request(self, url, data=None):
"""Make a PUT request."""
await self.async_refresh_session()
async with self._session.put(
url,
data=json.dumps(data),
headers=self._get_headers()
) as resp:
_LOGGER.debug(f"PUT request to {url}, status code {resp.status}")
await self._handle_api_error(url, resp)
return True
async def _get_request(self, url):
"""Make a GET request."""
await self.async_refresh_session()
async with self._session.get(
url,
headers=self._get_headers()
) as resp:
_LOGGER.debug(f"GET request to {url}, status code {resp.status}")
await self._handle_api_error(url, resp)
return await resp.json()
# Simple getters
def ct_connected(self):
"""Is CT clamp connected."""
return self._ct_connected
def is_capable(self, capability):
"""Return whether or not this model has a given capability."""
return bool(self._capabilities[capability])
def get_device_info(self):
return self._device_info
def get_unique_id(self, name):
return f"ohme_{self._serial}_{name}"
# Push methods
async def async_pause_charge(self):
"""Pause an ongoing charge"""
result = await self._post_request(f"/v1/chargeSessions/{self._serial}/stop", skip_json=True)
return bool(result)
async def async_resume_charge(self):
"""Resume a paused charge"""
result = await self._post_request(f"/v1/chargeSessions/{self._serial}/resume", skip_json=True)
return bool(result)
async def async_approve_charge(self):
"""Approve a charge"""
result = await self._put_request(f"/v1/chargeSessions/{self._serial}/approve?approve=true")
return bool(result)
async def async_max_charge(self):
"""Enable max charge"""
result = await self._put_request(f"/v1/chargeSessions/{self._serial}/rule?maxCharge=true")
return bool(result)
async def async_apply_session_rule(self, max_price=None, target_time=None, target_percent=None, pre_condition=None, pre_condition_length=None):
"""Apply rule to ongoing charge/stop max charge."""
# Check every property. If we've provided it, use that. If not, use the existing.
if max_price is None:
max_price = self._last_rule['settings'][0]['enabled'] if 'settings' in self._last_rule and len(
self._last_rule['settings']) > 1 else False
if target_percent is None:
target_percent = self._last_rule['targetPercent'] if 'targetPercent' in self._last_rule else 80
if pre_condition is None:
pre_condition = self._last_rule['preconditioningEnabled'] if 'preconditioningEnabled' in self._last_rule else False
if pre_condition_length is None:
pre_condition_length = self._last_rule[
'preconditionLengthMins'] if 'preconditionLengthMins' in self._last_rule else 30
if target_time is None:
# Default to 9am
target_time = self._last_rule['targetTime'] if 'targetTime' in self._last_rule else 32400
target_time = (target_time // 3600,
(target_time % 3600) // 60)
target_ts = int(time_next_occurs(
target_time[0], target_time[1]).timestamp() * 1000)
# Convert these to string form
max_price = 'true' if max_price else 'false'
pre_condition = 'true' if pre_condition else 'false'
result = await self._put_request(f"/v1/chargeSessions/{self._serial}/rule?enableMaxPrice={max_price}&targetTs={target_ts}&enablePreconditioning={pre_condition}&toPercent={target_percent}&preconditionLengthMins={pre_condition_length}")
return bool(result)
async def async_get_schedule(self):
"""Get the first schedule."""
schedules = await self._get_request("/v1/chargeRules")
return schedules[0] if len(schedules) > 0 else None
async def async_update_schedule(self, target_percent=None, target_time=None):
"""Update the first listed schedule."""
rule = await self.async_get_schedule()
# Account for user having no rules
if not rule:
return None
# Update percent and time if provided
if target_percent is not None:
rule['targetPercent'] = target_percent
if target_time is not None:
rule['targetTime'] = (target_time[0] * 3600) + (target_time[1] * 60)
await self._put_request(f"/v1/chargeRules/{rule['id']}", data=rule)
return True
async def async_set_configuration_value(self, values):
"""Set a configuration value or values."""
result = await self._put_request(f"/v1/chargeDevices/{self._serial}/appSettings", data=values)
return bool(result)
# Pull methods
async def async_get_charge_sessions(self, is_retry=False):
"""Try to fetch charge sessions endpoint.
If we get a non 200 response, refresh auth token and try again"""
resp = await self._get_request('/v1/chargeSessions')
resp = resp[0]
# Cache the current rule if we are given it
if resp["mode"] == "SMART_CHARGE" and 'appliedRule' in resp:
self._last_rule = resp["appliedRule"]
return resp
async def async_get_account_info(self):
resp = await self._get_request('/v1/users/me/account')
return resp
async def async_update_device_info(self, is_retry=False):
"""Update _device_info with our charger model."""
resp = await self.async_get_account_info()
device = resp['chargeDevices'][0]
info = DeviceInfo( | identifiers={(DOMAIN, "ohme_charger")}, | 0 | 2023-12-24 20:59:18+00:00 | 4k |
Almas-Ali/SpyIP | spyip/backend.py | [
{
"identifier": "TooManyRequests",
"path": "spyip/exceptions.py",
"snippet": "class TooManyRequests(Exception):\n pass"
},
{
"identifier": "ConnectionTimeout",
"path": "spyip/exceptions.py",
"snippet": "class ConnectionTimeout(Exception):\n pass"
},
{
"identifier": "StatusE... | from typing import List, Union
from .exceptions import (
TooManyRequests,
ConnectionTimeout,
StatusError,
)
from .models import (
IPResponse,
DNSResponse,
)
import asyncio
import random
import string
import httpx | 1,750 |
def get_random_string(length: int = 32) -> str:
"""Generate a random string of fixed length."""
letters = string.ascii_lowercase + string.digits
return ''.join(random.sample(letters, length))
# API endpoints for IP address lookup
trace_me_url = 'http://ip-api.com/json/'
trace_ip_url = 'http://ip-api.com/json/%(query)s'
trace_dns_url = f'http://{get_random_string(32)}.edns.ip-api.com/json/'
trace_ip_batch_url = 'http://ip-api.com/batch'
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
}
def trace_me(
timeout: int = 5,
lang: str = 'en',
) -> Union[IPResponse, None]:
"""Trace your own IP address."""
try:
res = httpx.get(
url=trace_me_url,
params={'fields': 66842623, 'lang': lang},
headers=headers,
timeout=timeout,
)
if res.status_code == 200:
return IPResponse(**res.json())
else:
raise StatusError(f'Invalid status code: {res.status_code}. Expected 200.')
# 408 Request Timeout
except httpx._exceptions.ConnectTimeout:
raise ConnectionTimeout(
'Connection timeout. The server timed out waiting for the request. According to the HTTP specification, the client is allowed to repeat the request again after some time.'
)
# 429 Too Many Requests
except httpx._exceptions.TooManyRedirects:
raise TooManyRequests(
'Too many requests. Our endpoints are limited to 45 HTTP requests per minute from an IP address. If you go over this limit your requests will be throttled (HTTP 429) until your rate limit window is reset.'
)
def trace_ip(
query: str,
timeout: int = 5,
lang: str = 'en',
) -> IPResponse:
"""Trace IP address"""
try:
res = httpx.get(
url=trace_ip_url % {'query': query},
params={'fields': 66842623, 'lang': lang},
headers=headers,
timeout=timeout,
)
if res.status_code == 200:
return IPResponse(**res.json())
else:
raise StatusError(f'Invalid status code: {res.status_code}. Expected 200.')
# 408 Request Timeout
except httpx._exceptions.ConnectTimeout:
raise ConnectionTimeout('The server timed out waiting for the request.')
# 429 Too Many Requests
except httpx._exceptions.TooManyRedirects:
raise TooManyRequests(
'Too many requests. Our endpoints are limited to 45 HTTP requests per minute from an IP address. If you go over this limit your requests will be throttled (HTTP 429) until your rate limit window is reset.'
)
def trace_dns(
timeout: int = 5,
lang: str = 'en',
) -> IPResponse:
"""Trace your own DNS address."""
try:
res = httpx.get(
url=trace_dns_url,
params={'fields': 66842623, 'lang': lang},
headers=headers,
timeout=timeout,
)
if res.status_code == 200:
|
def get_random_string(length: int = 32) -> str:
"""Generate a random string of fixed length."""
letters = string.ascii_lowercase + string.digits
return ''.join(random.sample(letters, length))
# API endpoints for IP address lookup
trace_me_url = 'http://ip-api.com/json/'
trace_ip_url = 'http://ip-api.com/json/%(query)s'
trace_dns_url = f'http://{get_random_string(32)}.edns.ip-api.com/json/'
trace_ip_batch_url = 'http://ip-api.com/batch'
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
}
def trace_me(
timeout: int = 5,
lang: str = 'en',
) -> Union[IPResponse, None]:
"""Trace your own IP address."""
try:
res = httpx.get(
url=trace_me_url,
params={'fields': 66842623, 'lang': lang},
headers=headers,
timeout=timeout,
)
if res.status_code == 200:
return IPResponse(**res.json())
else:
raise StatusError(f'Invalid status code: {res.status_code}. Expected 200.')
# 408 Request Timeout
except httpx._exceptions.ConnectTimeout:
raise ConnectionTimeout(
'Connection timeout. The server timed out waiting for the request. According to the HTTP specification, the client is allowed to repeat the request again after some time.'
)
# 429 Too Many Requests
except httpx._exceptions.TooManyRedirects:
raise TooManyRequests(
'Too many requests. Our endpoints are limited to 45 HTTP requests per minute from an IP address. If you go over this limit your requests will be throttled (HTTP 429) until your rate limit window is reset.'
)
def trace_ip(
query: str,
timeout: int = 5,
lang: str = 'en',
) -> IPResponse:
"""Trace IP address"""
try:
res = httpx.get(
url=trace_ip_url % {'query': query},
params={'fields': 66842623, 'lang': lang},
headers=headers,
timeout=timeout,
)
if res.status_code == 200:
return IPResponse(**res.json())
else:
raise StatusError(f'Invalid status code: {res.status_code}. Expected 200.')
# 408 Request Timeout
except httpx._exceptions.ConnectTimeout:
raise ConnectionTimeout('The server timed out waiting for the request.')
# 429 Too Many Requests
except httpx._exceptions.TooManyRedirects:
raise TooManyRequests(
'Too many requests. Our endpoints are limited to 45 HTTP requests per minute from an IP address. If you go over this limit your requests will be throttled (HTTP 429) until your rate limit window is reset.'
)
def trace_dns(
timeout: int = 5,
lang: str = 'en',
) -> IPResponse:
"""Trace your own DNS address."""
try:
res = httpx.get(
url=trace_dns_url,
params={'fields': 66842623, 'lang': lang},
headers=headers,
timeout=timeout,
)
if res.status_code == 200: | return DNSResponse(**res.json()['dns']) | 4 | 2023-12-31 19:43:38+00:00 | 4k |
leopedroso45/Stable-Diffusion-ImageGen | sevsd/do_work.py | [
{
"identifier": "setup_pipeline",
"path": "sevsd/setup_pipeline.py",
"snippet": "def setup_pipeline(pretrained_model_link_or_path, **kwargs):\n r\"\"\"\n Sets up and returns a Stable Diffusion pipeline for image generation.\n\n This function initializes the Stable Diffusion pipeline using eithe... | from sevsd.setup_pipeline import setup_pipeline
from sevsd.process_task import process_task | 1,640 |
def do_work(models, jobs, image_path, parallel_exec=True, **kwargs):
r"""
Orchestrates the processing of image generation tasks based on given models and jobs.
This function iterates over each model and the associated jobs, generating images as specified. It sets up the pipeline for each model and executes the image generation tasks, saving the results to the specified path.
Parameters:
models (list of dicts): List of model configurations. Each configuration includes:
- 'name' (str): The model name or path.
- 'executor' (dict): Parameters like 'labels', 'num_of_exec', 'cfg_scale', and 'inference_steps'.
jobs (list of dicts): List of job configurations. Each job includes:
- 'label' (int): Corresponding model label.
- 'prompt' (str): Text prompt for image generation.
- 'negative_prompt' (str, optional): Text prompt for undesired image features.
image_path (str): Directory path to save the generated images.
parallel_exec (bool, optional): Flag to enable parallel execution. Defaults to True.
**kwargs: Additional keyword arguments for pipeline setup.
Example:
models = [
{
"name": "CompVis/stable-diffusion-v1-4",
"executor": {
"labels": [1],
"num_of_exec": 1,
"cfg_scale": 7,
"inference_steps": 100,
}
},
{
"name": "./model_cache/model2.safetensors",
"executor": {
"labels": [2],
"num_of_exec": 2,
"cfg_scale": 6,
"inference_steps": 50,
}
},
]
jobs = [
{
"label": 1,
"prompt": "A scenic landscape",
"negative_prompt": "blurred image, black and white, watermarked image",
},
{
"label": 2,
"prompt": "A person wearing a mask",
"negative_prompt": "deformed anatomy, hand-drawn image, blurred image",
},
]
do_work(models, jobs, "./generated-images")
"""
job_dict = {job['label']: [] for job in jobs}
for job in jobs:
job_dict[job['label']].append(job)
for model in models:
pipeline = setup_pipeline(model["name"], **kwargs)
labels = model.get("executor", {}).get("labels", [])
for label in labels:
if label in job_dict:
for job in job_dict[label]:
executor = model.get("executor", {})
|
def do_work(models, jobs, image_path, parallel_exec=True, **kwargs):
r"""
Orchestrates the processing of image generation tasks based on given models and jobs.
This function iterates over each model and the associated jobs, generating images as specified. It sets up the pipeline for each model and executes the image generation tasks, saving the results to the specified path.
Parameters:
models (list of dicts): List of model configurations. Each configuration includes:
- 'name' (str): The model name or path.
- 'executor' (dict): Parameters like 'labels', 'num_of_exec', 'cfg_scale', and 'inference_steps'.
jobs (list of dicts): List of job configurations. Each job includes:
- 'label' (int): Corresponding model label.
- 'prompt' (str): Text prompt for image generation.
- 'negative_prompt' (str, optional): Text prompt for undesired image features.
image_path (str): Directory path to save the generated images.
parallel_exec (bool, optional): Flag to enable parallel execution. Defaults to True.
**kwargs: Additional keyword arguments for pipeline setup.
Example:
models = [
{
"name": "CompVis/stable-diffusion-v1-4",
"executor": {
"labels": [1],
"num_of_exec": 1,
"cfg_scale": 7,
"inference_steps": 100,
}
},
{
"name": "./model_cache/model2.safetensors",
"executor": {
"labels": [2],
"num_of_exec": 2,
"cfg_scale": 6,
"inference_steps": 50,
}
},
]
jobs = [
{
"label": 1,
"prompt": "A scenic landscape",
"negative_prompt": "blurred image, black and white, watermarked image",
},
{
"label": 2,
"prompt": "A person wearing a mask",
"negative_prompt": "deformed anatomy, hand-drawn image, blurred image",
},
]
do_work(models, jobs, "./generated-images")
"""
job_dict = {job['label']: [] for job in jobs}
for job in jobs:
job_dict[job['label']].append(job)
for model in models:
pipeline = setup_pipeline(model["name"], **kwargs)
labels = model.get("executor", {}).get("labels", [])
for label in labels:
if label in job_dict:
for job in job_dict[label]:
executor = model.get("executor", {}) | process_task(job, pipeline, executor, image_path, parallel_exec) | 1 | 2023-12-28 16:19:12+00:00 | 4k |
Emperor-WS/PyEmber | ember/autograd/function.py | [
{
"identifier": "numpy_or_cupy",
"path": "ember/cuda.py",
"snippet": "def numpy_or_cupy(*tensors):\n \"\"\"\n Choose between NumPy and CuPy based on the device of input tensors.\n\n Args:\n *tensors: Variable number of tensors.\n\n Returns:\n module: NumPy or CuPy module based ... | from abc import ABC, abstractmethod
from ember.cuda import numpy_or_cupy, scalars_to_device
from .utils import inv_permutation
from .hook import Hook
import numpy as np
import copy
import ember | 1,626 | def backward(self, grad):
"""Abstract method for the backward pass."""
raise NotImplementedError
def __call__(self, *tensors):
"""
Invokes the function, registering hooks for gradients.
Args:
- *tensors: Variable number of input tensors.
Returns:
- Tensor: Output tensor from the forward pass.
"""
self.tensors = (*tensors,)
scalars_to_device(*self.tensors)
# Perform the forward pass
out = self.forward(*tensors)
# Register hooks for gradients
for tensor in self.tensors:
if tensor.requires_grad:
out.register_hook(Hook(tensor, self.backward))
return out
def __repr__(self):
"""
Returns a string representation of the function.
Returns:
- str: String representation of the function.
"""
return f'<Function: {self.__class__.__name__}>'
class Add(Function):
"""
Addition operation.
Methods:
- forward(tensor1, tensor2): Performs addition.
- single_backward(grad, tensor): Computes gradient for a single tensor.
- backward(grad): Computes gradients for tensors involved in the backward pass.
"""
def forward(self, tensor1, tensor2):
"""
Performs addition.
Args:
- tensor1: First input tensor.
- tensor2: Second input tensor.
Returns:
- Tensor: Resultant tensor after addition.
"""
data = tensor1.data + tensor2.data
requires_grad = tensor1.requires_grad or tensor2.requires_grad
device = tensor1.device
return ember.Tensor(data, requires_grad=requires_grad, device=device)
@staticmethod
def single_backward(grad, tensor):
"""
Computes gradient for a single tensor.
Args:
- grad: Gradient.
- tensor: Input tensor.
Returns:
- Tensor: Gradient for the input tensor.
"""
num_dims_added = grad.ndim - tensor.ndim
for _ in range(num_dims_added):
grad = grad.sum(axis=0)
for i, dim in enumerate(tensor.shape):
if dim == 1:
grad = grad.sum(axis=i, keepdims=True)
return grad
def backward(self, grad):
"""
Computes gradients for tensors involved in the backward pass.
Args:
- grad: Gradient.
Returns:
- Tuple: Gradients for each input tensor.
"""
tensor1, tensor2 = self.tensors
return (self.single_backward(grad, tensor1),
self.single_backward(grad, tensor2))
class Multiply(Function):
"""
Multiplication operation.
Methods:
- forward(tensor1, tensor2): Performs multiplication.
- single_backward(grad, t1, t2): Computes gradient for a single tensor.
- backward(grad): Computes gradients for tensors involved in the backward pass.
"""
def forward(self, tensor1, tensor2):
"""
Performs multiplication.
Args:
- tensor1: First input tensor.
- tensor2: Second input tensor.
Returns:
- Tensor: Resultant tensor after multiplication.
"""
# Determine whether to use NumPy or CuPy for element-wise multiplication
|
class Function(ABC):
"""
Abstract base class for defining mathematical operations as functions.
Attributes:
- tensors: Tensors involved in the operation.
Methods:
- forward(*tensors): Abstract method for the forward pass.
- backward(grad): Abstract method for the backward pass.
- __call__(*tensors): Invokes the function, registering hooks for gradients.
- __repr__(): Returns a string representation of the function.
"""
__slots__ = 'tensors'
def __init__(self):
super(Function, self).__init__()
self.tensors = None
@abstractmethod
def forward(self, *tensors):
"""Abstract method for the forward pass."""
raise NotImplementedError
@abstractmethod
def backward(self, grad):
"""Abstract method for the backward pass."""
raise NotImplementedError
def __call__(self, *tensors):
"""
Invokes the function, registering hooks for gradients.
Args:
- *tensors: Variable number of input tensors.
Returns:
- Tensor: Output tensor from the forward pass.
"""
self.tensors = (*tensors,)
scalars_to_device(*self.tensors)
# Perform the forward pass
out = self.forward(*tensors)
# Register hooks for gradients
for tensor in self.tensors:
if tensor.requires_grad:
out.register_hook(Hook(tensor, self.backward))
return out
def __repr__(self):
"""
Returns a string representation of the function.
Returns:
- str: String representation of the function.
"""
return f'<Function: {self.__class__.__name__}>'
class Add(Function):
"""
Addition operation.
Methods:
- forward(tensor1, tensor2): Performs addition.
- single_backward(grad, tensor): Computes gradient for a single tensor.
- backward(grad): Computes gradients for tensors involved in the backward pass.
"""
def forward(self, tensor1, tensor2):
"""
Performs addition.
Args:
- tensor1: First input tensor.
- tensor2: Second input tensor.
Returns:
- Tensor: Resultant tensor after addition.
"""
data = tensor1.data + tensor2.data
requires_grad = tensor1.requires_grad or tensor2.requires_grad
device = tensor1.device
return ember.Tensor(data, requires_grad=requires_grad, device=device)
@staticmethod
def single_backward(grad, tensor):
"""
Computes gradient for a single tensor.
Args:
- grad: Gradient.
- tensor: Input tensor.
Returns:
- Tensor: Gradient for the input tensor.
"""
num_dims_added = grad.ndim - tensor.ndim
for _ in range(num_dims_added):
grad = grad.sum(axis=0)
for i, dim in enumerate(tensor.shape):
if dim == 1:
grad = grad.sum(axis=i, keepdims=True)
return grad
def backward(self, grad):
"""
Computes gradients for tensors involved in the backward pass.
Args:
- grad: Gradient.
Returns:
- Tuple: Gradients for each input tensor.
"""
tensor1, tensor2 = self.tensors
return (self.single_backward(grad, tensor1),
self.single_backward(grad, tensor2))
class Multiply(Function):
"""
Multiplication operation.
Methods:
- forward(tensor1, tensor2): Performs multiplication.
- single_backward(grad, t1, t2): Computes gradient for a single tensor.
- backward(grad): Computes gradients for tensors involved in the backward pass.
"""
def forward(self, tensor1, tensor2):
"""
Performs multiplication.
Args:
- tensor1: First input tensor.
- tensor2: Second input tensor.
Returns:
- Tensor: Resultant tensor after multiplication.
"""
# Determine whether to use NumPy or CuPy for element-wise multiplication | nc = numpy_or_cupy(tensor1, tensor2) | 0 | 2023-12-23 23:11:58+00:00 | 4k |
Hassi34/iot-device-identification | src/stage_01_ingest_data.py | [
{
"identifier": "read_yaml",
"path": "src/utils/common.py",
"snippet": "def read_yaml(path_to_yaml: str) -> dict:\n with open(path_to_yaml) as yaml_file:\n content = yaml.safe_load(yaml_file)\n return content"
},
{
"identifier": "MongoDBOps",
"path": "src/utils/mongo_ops.py",
... | import argparse
import mlflow
import os
from src.utils.common import read_yaml
from src.utils import MongoDBOps
from src.utils.sys_logging import get_logger
from src.utils import MLFlowManager
from pathlib import Path | 1,846 |
STAGE = "Ingest Data"
def ingest_data():
logger.info("Pulling data from the source...")
mongo_db = MongoDBOps(database_name=MONGO_DATABSE_NAME)
complete_df = mongo_db.export_collection_as_dataframe(
collection_name=MONGO_COLLECTION_NAME,
rows_to_load=MONGO_NUMBER_OF_ROWS_TO_INGEST,
)
logger.info(
f'The collection has been exported as a pandas dataframe with the shape "{complete_df.shape}"'
)
Path(RAW_DATA_FILE_PATH).parent.absolute().mkdir(parents=True, exist_ok=True)
complete_df.to_parquet(RAW_DATA_FILE_PATH, compression="gzip")
logger.info(f'Data has been saved locally at "{RAW_DATA_FILE_PATH}"')
mlflow_service = MLFlowManager()
mlflow.set_experiment(EXPERIMENT_NAME)
runs = mlflow.search_runs(order_by=["attribute.start_time DESC"])
if runs.empty:
logger.warning("This is a new experiment, skipping the data drift check...")
recent_run = runs[0:1]
recent_run_id = recent_run.run_id[0]
Path(LAST_EXP_DATA_DIR).absolute().mkdir(parents=True, exist_ok=True)
file_name = Path(RAW_DATA_FILE_PATH).resolve().name
mlflow_artifact_path = MLFLOW_ARTIFACT_DIR + "/" + file_name
last_experiment_data_file_path = os.path.join(
LAST_EXP_DATA_DIR, MLFLOW_ARTIFACT_DIR, file_name
)
try:
mlflow_service.client.download_artifacts(
recent_run_id, mlflow_artifact_path, LAST_EXP_DATA_DIR
)
logger.info(
f"The last data version has been downloaded and saved to {last_experiment_data_file_path}"
)
except Exception as e:
logger.error("Could not download the last data version")
raise e
if __name__ == "__main__":
args = argparse.ArgumentParser()
args.add_argument("--config", "-c", default="configs/system.yaml")
parsed_args = args.parse_args()
config = read_yaml(parsed_args.config)
LOGS_FILE_PATH = config["logs"]["RUNNING_LOGS_FILE_PATH"]
RAW_DATA_FILE_PATH = config["data"]["RAW_DATA_FILE_PATH"][0]
LAST_EXP_DATA_DIR = config["data"]["LAST_EXP_DATA_DIR"]
MONGO_DATABSE_NAME = config["data"]["MONGO_DATABSE_NAME"]
MONGO_COLLECTION_NAME = config["data"]["MONGO_COLLECTION_NAME"]
MONGO_NUMBER_OF_ROWS_TO_INGEST = config["data"]["MONGO_NUMBER_OF_ROWS_TO_INGEST"]
MLFLOW_ARTIFACT_DIR = config["mlflow"]["ARTIFACT_DIR"]
EXPERIMENT_NAME = config["mlflow"]["EXPERIMENT_NAME"]
|
STAGE = "Ingest Data"
def ingest_data():
logger.info("Pulling data from the source...")
mongo_db = MongoDBOps(database_name=MONGO_DATABSE_NAME)
complete_df = mongo_db.export_collection_as_dataframe(
collection_name=MONGO_COLLECTION_NAME,
rows_to_load=MONGO_NUMBER_OF_ROWS_TO_INGEST,
)
logger.info(
f'The collection has been exported as a pandas dataframe with the shape "{complete_df.shape}"'
)
Path(RAW_DATA_FILE_PATH).parent.absolute().mkdir(parents=True, exist_ok=True)
complete_df.to_parquet(RAW_DATA_FILE_PATH, compression="gzip")
logger.info(f'Data has been saved locally at "{RAW_DATA_FILE_PATH}"')
mlflow_service = MLFlowManager()
mlflow.set_experiment(EXPERIMENT_NAME)
runs = mlflow.search_runs(order_by=["attribute.start_time DESC"])
if runs.empty:
logger.warning("This is a new experiment, skipping the data drift check...")
recent_run = runs[0:1]
recent_run_id = recent_run.run_id[0]
Path(LAST_EXP_DATA_DIR).absolute().mkdir(parents=True, exist_ok=True)
file_name = Path(RAW_DATA_FILE_PATH).resolve().name
mlflow_artifact_path = MLFLOW_ARTIFACT_DIR + "/" + file_name
last_experiment_data_file_path = os.path.join(
LAST_EXP_DATA_DIR, MLFLOW_ARTIFACT_DIR, file_name
)
try:
mlflow_service.client.download_artifacts(
recent_run_id, mlflow_artifact_path, LAST_EXP_DATA_DIR
)
logger.info(
f"The last data version has been downloaded and saved to {last_experiment_data_file_path}"
)
except Exception as e:
logger.error("Could not download the last data version")
raise e
if __name__ == "__main__":
args = argparse.ArgumentParser()
args.add_argument("--config", "-c", default="configs/system.yaml")
parsed_args = args.parse_args()
config = read_yaml(parsed_args.config)
LOGS_FILE_PATH = config["logs"]["RUNNING_LOGS_FILE_PATH"]
RAW_DATA_FILE_PATH = config["data"]["RAW_DATA_FILE_PATH"][0]
LAST_EXP_DATA_DIR = config["data"]["LAST_EXP_DATA_DIR"]
MONGO_DATABSE_NAME = config["data"]["MONGO_DATABSE_NAME"]
MONGO_COLLECTION_NAME = config["data"]["MONGO_COLLECTION_NAME"]
MONGO_NUMBER_OF_ROWS_TO_INGEST = config["data"]["MONGO_NUMBER_OF_ROWS_TO_INGEST"]
MLFLOW_ARTIFACT_DIR = config["mlflow"]["ARTIFACT_DIR"]
EXPERIMENT_NAME = config["mlflow"]["EXPERIMENT_NAME"] | logger = get_logger(LOGS_FILE_PATH) | 2 | 2023-12-25 10:40:19+00:00 | 4k |
see2023/Bert-VITS2-ext | for_deploy/webui.py | [
{
"identifier": "split_by_language",
"path": "tools/sentence.py",
"snippet": "def split_by_language(text: str, target_languages: list = None) -> list:\n pattern = (\n r\"[\\!\\\"\\#\\$\\%\\&\\'\\(\\)\\*\\+\\,\\-\\.\\/\\:\\;\\<\\>\\=\\?\\@\\[\\]\\{\\}\\\\\\\\\\^\\_\\`\"\n r\"\\!?\\。"#$%&... | import os
import logging
import re_matching
import torch
import utils
import gradio as gr
import webbrowser
import numpy as np
import librosa
from tools.sentence import split_by_language
from infer import infer, latest_version, get_net_g, infer_multilang
from config import config
from tools.translate import translate
from infer_utils import BertFeature, ClapFeature | 1,989 | # flake8: noqa: E402
logging.getLogger("numba").setLevel(logging.WARNING)
logging.getLogger("markdown_it").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("matplotlib").setLevel(logging.WARNING)
logging.basicConfig(
level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s"
)
logger = logging.getLogger(__name__)
net_g = None
| # flake8: noqa: E402
logging.getLogger("numba").setLevel(logging.WARNING)
logging.getLogger("markdown_it").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("matplotlib").setLevel(logging.WARNING)
logging.basicConfig(
level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s"
)
logger = logging.getLogger(__name__)
net_g = None
| device = config.webui_config.device | 2 | 2023-12-27 03:09:11+00:00 | 4k |
chinhsuanwu/ifusion-threestudio | threestudio/systems/zero123.py | [
{
"identifier": "BaseLift3DSystem",
"path": "threestudio/systems/base.py",
"snippet": "class BaseLift3DSystem(BaseSystem):\n @dataclass\n class Config(BaseSystem.Config):\n geometry_type: str = \"\"\n geometry: dict = field(default_factory=dict)\n geometry_convert_from: Option... | import os
import random
import shutil
import torch
import torch.nn.functional as F
import threestudio
from dataclasses import dataclass, field
from PIL import Image, ImageDraw
from torchmetrics import PearsonCorrCoef
from threestudio.systems.base import BaseLift3DSystem
from threestudio.utils.ops import binary_cross_entropy, dot
from threestudio.utils.typing import * | 3,217 | if guidance == "ref":
# bg_color = torch.rand_like(batch['rays_o'])
ambient_ratio = 1.0
shading = "diffuse"
batch["shading"] = shading
elif guidance == "zero123":
batch = batch["random_camera"]
ambient_ratio = (
self.cfg.ambient_ratio_min
+ (1 - self.cfg.ambient_ratio_min) * random.random()
)
batch["bg_color"] = None
batch["ambient_ratio"] = ambient_ratio
out = self(batch)
loss_prefix = f"loss_{guidance}_"
loss_terms = {}
def set_loss(name, value):
loss_terms[f"{loss_prefix}{name}"] = value
guidance_eval = (
guidance == "zero123"
and self.cfg.freq.guidance_eval > 0
and self.true_global_step % self.cfg.freq.guidance_eval == 0
)
if guidance == "ref":
gt_mask = batch["mask"]
gt_rgb = batch["rgb"]
# color loss
gt_rgb = gt_rgb * gt_mask.float() + out["comp_rgb_bg"] * (
1 - gt_mask.float()
)
set_loss("rgb", F.mse_loss(gt_rgb, out["comp_rgb"]))
# mask loss
set_loss("mask", F.mse_loss(gt_mask.float(), out["opacity"]))
# depth loss
if self.C(self.cfg.loss.lambda_depth) > 0:
valid_gt_depth = batch["ref_depth"][gt_mask.squeeze(-1)].unsqueeze(1)
valid_pred_depth = out["depth"][gt_mask].unsqueeze(1)
with torch.no_grad():
A = torch.cat(
[valid_gt_depth, torch.ones_like(valid_gt_depth)], dim=-1
) # [B, 2]
X = torch.linalg.lstsq(A, valid_pred_depth).solution # [2, 1]
valid_gt_depth = A @ X # [B, 1]
set_loss("depth", F.mse_loss(valid_gt_depth, valid_pred_depth))
# relative depth loss
if self.C(self.cfg.loss.lambda_depth_rel) > 0:
valid_gt_depth = batch["ref_depth"][gt_mask.squeeze(-1)] # [B,]
valid_pred_depth = out["depth"][gt_mask] # [B,]
set_loss(
"depth_rel", 1 - self.pearson(valid_pred_depth, valid_gt_depth)
)
# normal loss
if self.C(self.cfg.loss.lambda_normal) > 0:
valid_gt_normal = (
1 - 2 * batch["ref_normal"][gt_mask.squeeze(-1)]
) # [B, 3]
valid_pred_normal = (
2 * out["comp_normal"][gt_mask.squeeze(-1)] - 1
) # [B, 3]
set_loss(
"normal",
1 - F.cosine_similarity(valid_pred_normal, valid_gt_normal).mean(),
)
elif guidance == "zero123":
# zero123
guidance_out = self.guidance(
out["comp_rgb"],
**batch,
rgb_as_latents=False,
guidance_eval=guidance_eval,
)
# claforte: TODO: rename the loss_terms keys
set_loss("sds", guidance_out["loss_sds"])
if self.C(self.cfg.loss.lambda_normal_smooth) > 0:
if "comp_normal" not in out:
raise ValueError(
"comp_normal is required for 2D normal smooth loss, no comp_normal is found in the output."
)
normal = out["comp_normal"]
set_loss(
"normal_smooth",
(normal[:, 1:, :, :] - normal[:, :-1, :, :]).square().mean()
+ (normal[:, :, 1:, :] - normal[:, :, :-1, :]).square().mean(),
)
if self.C(self.cfg.loss.lambda_3d_normal_smooth) > 0:
if "normal" not in out:
raise ValueError(
"Normal is required for normal smooth loss, no normal is found in the output."
)
if "normal_perturb" not in out:
raise ValueError(
"normal_perturb is required for normal smooth loss, no normal_perturb is found in the output."
)
normals = out["normal"]
normals_perturb = out["normal_perturb"]
set_loss("3d_normal_smooth", (normals - normals_perturb).abs().mean())
if not self.cfg.refinement:
if self.C(self.cfg.loss.lambda_orient) > 0:
if "normal" not in out:
raise ValueError(
"Normal is required for orientation loss, no normal is found in the output."
)
set_loss(
"orient",
(
out["weights"].detach()
|
@threestudio.register("zero123-system")
class Zero123(BaseLift3DSystem):
@dataclass
class Config(BaseLift3DSystem.Config):
freq: dict = field(default_factory=dict)
refinement: bool = False
ambient_ratio_min: float = 0.5
cfg: Config
def configure(self):
# create geometry, material, background, renderer
super().configure()
def forward(self, batch: Dict[str, Any]) -> Dict[str, Any]:
render_out = self.renderer(**batch)
return {
**render_out,
}
def on_fit_start(self) -> None:
super().on_fit_start()
# no prompt processor
self.guidance = threestudio.find(self.cfg.guidance_type)(self.cfg.guidance)
# visualize all training images
all_images = self.trainer.datamodule.train_dataloader().dataset.get_all_images()
self.save_image_grid(
"all_training_images.png",
[
{"type": "rgb", "img": image, "kwargs": {"data_format": "HWC"}}
for image in all_images
],
name="on_fit_start",
step=self.true_global_step,
)
self.pearson = PearsonCorrCoef().to(self.device)
def training_substep(self, batch, batch_idx, guidance: str):
"""
Args:
guidance: one of "ref" (reference image supervision), "zero123"
"""
if guidance == "ref":
# bg_color = torch.rand_like(batch['rays_o'])
ambient_ratio = 1.0
shading = "diffuse"
batch["shading"] = shading
elif guidance == "zero123":
batch = batch["random_camera"]
ambient_ratio = (
self.cfg.ambient_ratio_min
+ (1 - self.cfg.ambient_ratio_min) * random.random()
)
batch["bg_color"] = None
batch["ambient_ratio"] = ambient_ratio
out = self(batch)
loss_prefix = f"loss_{guidance}_"
loss_terms = {}
def set_loss(name, value):
loss_terms[f"{loss_prefix}{name}"] = value
guidance_eval = (
guidance == "zero123"
and self.cfg.freq.guidance_eval > 0
and self.true_global_step % self.cfg.freq.guidance_eval == 0
)
if guidance == "ref":
gt_mask = batch["mask"]
gt_rgb = batch["rgb"]
# color loss
gt_rgb = gt_rgb * gt_mask.float() + out["comp_rgb_bg"] * (
1 - gt_mask.float()
)
set_loss("rgb", F.mse_loss(gt_rgb, out["comp_rgb"]))
# mask loss
set_loss("mask", F.mse_loss(gt_mask.float(), out["opacity"]))
# depth loss
if self.C(self.cfg.loss.lambda_depth) > 0:
valid_gt_depth = batch["ref_depth"][gt_mask.squeeze(-1)].unsqueeze(1)
valid_pred_depth = out["depth"][gt_mask].unsqueeze(1)
with torch.no_grad():
A = torch.cat(
[valid_gt_depth, torch.ones_like(valid_gt_depth)], dim=-1
) # [B, 2]
X = torch.linalg.lstsq(A, valid_pred_depth).solution # [2, 1]
valid_gt_depth = A @ X # [B, 1]
set_loss("depth", F.mse_loss(valid_gt_depth, valid_pred_depth))
# relative depth loss
if self.C(self.cfg.loss.lambda_depth_rel) > 0:
valid_gt_depth = batch["ref_depth"][gt_mask.squeeze(-1)] # [B,]
valid_pred_depth = out["depth"][gt_mask] # [B,]
set_loss(
"depth_rel", 1 - self.pearson(valid_pred_depth, valid_gt_depth)
)
# normal loss
if self.C(self.cfg.loss.lambda_normal) > 0:
valid_gt_normal = (
1 - 2 * batch["ref_normal"][gt_mask.squeeze(-1)]
) # [B, 3]
valid_pred_normal = (
2 * out["comp_normal"][gt_mask.squeeze(-1)] - 1
) # [B, 3]
set_loss(
"normal",
1 - F.cosine_similarity(valid_pred_normal, valid_gt_normal).mean(),
)
elif guidance == "zero123":
# zero123
guidance_out = self.guidance(
out["comp_rgb"],
**batch,
rgb_as_latents=False,
guidance_eval=guidance_eval,
)
# claforte: TODO: rename the loss_terms keys
set_loss("sds", guidance_out["loss_sds"])
if self.C(self.cfg.loss.lambda_normal_smooth) > 0:
if "comp_normal" not in out:
raise ValueError(
"comp_normal is required for 2D normal smooth loss, no comp_normal is found in the output."
)
normal = out["comp_normal"]
set_loss(
"normal_smooth",
(normal[:, 1:, :, :] - normal[:, :-1, :, :]).square().mean()
+ (normal[:, :, 1:, :] - normal[:, :, :-1, :]).square().mean(),
)
if self.C(self.cfg.loss.lambda_3d_normal_smooth) > 0:
if "normal" not in out:
raise ValueError(
"Normal is required for normal smooth loss, no normal is found in the output."
)
if "normal_perturb" not in out:
raise ValueError(
"normal_perturb is required for normal smooth loss, no normal_perturb is found in the output."
)
normals = out["normal"]
normals_perturb = out["normal_perturb"]
set_loss("3d_normal_smooth", (normals - normals_perturb).abs().mean())
if not self.cfg.refinement:
if self.C(self.cfg.loss.lambda_orient) > 0:
if "normal" not in out:
raise ValueError(
"Normal is required for orientation loss, no normal is found in the output."
)
set_loss(
"orient",
(
out["weights"].detach() | * dot(out["normal"], out["t_dirs"]).clamp_min(0.0) ** 2 | 2 | 2023-12-27 20:30:33+00:00 | 4k |
jasursadikov/mud | commands.py | [
{
"identifier": "TEXT",
"path": "utils.py",
"snippet": "TEXT = {\n 'white': '\\033[37m',\n 'gray': '\\033[90m',\n 'black': '\\033[30m',\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m'... | import utils
import asyncio
import subprocess
from utils import TEXT, BACK, RESET, STYLES, END_STYLES, glyph
from typing import List, Dict
from collections import Counter
from prettytable import PrettyTable, PLAIN_COLUMNS | 3,475 | if not line:
break
line = line.decode().strip()
line = table[repo_path][0] if not line.strip() else line
table[repo_path] = [line, f'{TEXT["yellow"]}{glyph("running")}']
self._print_process(table)
return_code = await process.wait()
if return_code == 0:
status = f'{TEXT["green"]}{glyph("finished")}'
else:
status = f'{TEXT["red"]}{glyph("failed")} Code: {return_code}'
table[repo_path] = [table[repo_path][0], status]
self._print_process(table)
def _print_process(self, info: Dict[str, List[str]]) -> None:
table = self._get_table()
for path, (line, status) in info.items():
formatted_path = self._get_formatted_path(path)
table.add_row([formatted_path, line, status])
print(f'\x1bc{self._table_to_str(table)}\n', end='')
def _print_table(self, table: PrettyTable):
table = self._table_to_str(table)
if len(table) != 0:
print(table)
@staticmethod
def _table_to_str(table: PrettyTable) -> str:
table = table.get_string()
table = '\n'.join(line.lstrip() for line in table.splitlines())
return table
@staticmethod
def _get_table() -> PrettyTable:
return PrettyTable(border=False, header=False, style=PLAIN_COLUMNS, align='l')
# Prettified repository path
@staticmethod
def _get_formatted_path(path: str) -> str:
return f'{STYLES["dim"]}{TEXT["gray"]}../{RESET}{STYLES["dim"]}{path}{RESET}'
# Displaying current branch
@staticmethod
def _get_branch_status(path: str) -> str:
branch_cmd = subprocess.run('git rev-parse --abbrev-ref HEAD', shell=True, text=True, cwd=path,
capture_output=True)
branch_stdout = branch_cmd.stdout.strip()
if branch_stdout == 'master' or branch_stdout == 'main':
branch = f'{TEXT["yellow"]}{glyph("master")}{RESET} {branch_stdout}'
elif branch_stdout == 'develop':
branch = f'{TEXT["green"]}{glyph("feature")}{RESET} {branch_stdout}'
elif '/' in branch_stdout:
branch_path = branch_stdout.split('/')
icon = branch_path[0]
icon = f'{TEXT["red"]}{glyph("bugfix")}{RESET}' if icon in ['bugfix', 'bug', 'hotfix'] else \
f'{TEXT["blue"]}{glyph("release")}{RESET}' if icon == 'release' else \
f'{TEXT["green"]}{glyph("feature")}{RESET}' if icon in ['feature', 'feat', 'develop'] else \
f'{TEXT["green"]}{glyph("branch")}{RESET}'
branch = f'{icon} {STYLES["bold"]}{branch_path[0]}{RESET}/{STYLES["bold"]}{("/".join(branch_path[1:]))}'
else:
branch = f'{TEXT["cyan"]}{glyph("branch")}{RESET} {branch_stdout}'
return branch
# Last author's name
@staticmethod
def _get_authors_name(path: str) -> str:
cmd = subprocess.run(['git', 'log', '-1', '--pretty=format:%an'], text=True, cwd=path, capture_output=True)
git_config_user_cmd = subprocess.run(['git', 'config', 'user.name'], text=True, capture_output=True)
committer_color = '' if cmd.stdout.strip() == git_config_user_cmd.stdout.strip() else STYLES["dim"]
author = cmd.stdout.strip()
author = author[:20] + '...' if len(author) > 20 else author
author = f'{committer_color}{author}{RESET}'
return author
# Last commit message
@staticmethod
def _get_commit_message(path: str, max_chars: int) -> str:
cmd = subprocess.run(['git', 'log', '-1', '--pretty=format:%s'], text=True, cwd=path, capture_output=True)
log = cmd.stdout.strip()
log = log[:max_chars] + '...' if len(log) > max_chars else log
return log
def _get_formatted_labels(self, labels: List[str]) -> str:
if len(labels) == 0:
return ''
colored_label = ''
for label in labels:
color_index = self._get_color_index(label) % len(TEXT)
colored_label += f'{TEXT[list(TEXT.keys())[color_index + 3]]}{glyph("label")}{RESET} {label} '
return colored_label
@staticmethod
def _get_formatted_branches(branches: List[str], current_branch: str) -> str:
if len(branches) == 0:
return ''
simplify_branches = utils.settings.config['mud'].getboolean('simplify_branches') == True
output = ''
for branch in branches:
is_origin = branch.startswith('origin/')
branch = branch.replace('origin/', '') if is_origin else branch
current_prefix = f'{STYLES["italic"]}{STYLES["bold"]}' if current_branch == branch else ''
current_prefix = current_prefix + STYLES['dim'] if is_origin else current_prefix
origin_prefix = f'{TEXT["magenta"]}{STYLES["dim"]}o/' if is_origin else ''
color = 'white'
icon = glyph('branch')
if branch == 'master' or branch == 'main':
color = 'yellow'
icon = f'{glyph("master")}'
elif branch == 'develop':
color = 'green'
icon = f'{glyph("feature")}'
elif '/' in branch:
parts = branch.split('/')
|
class Commands:
def __init__(self, repos):
self.repos = repos
self.label_color_cache = {}
self.current_color_index = 0
# `mud status` command implementation
def status(self, repos: Dict[str, List[str]]) -> None:
table = self._get_table()
for path, tags in repos.items():
formatted_path = self._get_formatted_path(path)
branch = self._get_branch_status(path)
author = self._get_authors_name(path)
commit = self._get_commit_message(path, 30)
colored_labels = self._get_formatted_labels(tags)
# Sync with origin status
ahead_behind_cmd = subprocess.run(['git', 'rev-list', '--left-right', '--count', 'HEAD...@{upstream}'],
text=True, cwd=path, capture_output=True)
stdout = ahead_behind_cmd.stdout.strip().split()
if len(stdout) >= 2:
ahead, behind = stdout[0], stdout[1]
origin_sync = ''
if ahead and ahead != '0':
origin_sync += f'{TEXT["bright_green"]}{glyph("ahead")} {ahead}{RESET}'
if behind and behind != '0':
if origin_sync:
origin_sync += ' '
origin_sync += f'{TEXT["bright_blue"]}{glyph("behind")} {behind}{RESET}'
else:
origin_sync = ''
# Git status
status_cmd = subprocess.run(['git', 'status', '-s'], text=True, cwd=path, capture_output=True)
files = [line.lstrip() for line in status_cmd.stdout.strip().splitlines()]
modified, added, removed, moved = 0, 0, 0, 0
for file in files:
if file.startswith('M'):
modified += 1
elif file.startswith('A') or file.startswith('??'):
added += 1
elif file.startswith('D'):
removed += 1
elif file.startswith('R'):
moved += 1
status = ''
if added:
status += f'{TEXT["bright_green"]}{added} {glyph("added")}{RESET} '
if modified:
status += f'{TEXT["yellow"]}{modified} {glyph("modified")}{RESET} '
if moved:
status += f'{TEXT["blue"]}{moved} {glyph("moved")}{RESET} '
if removed:
status += f'{TEXT["red"]}{removed} {glyph("removed")}{RESET} '
if not files:
status = f'{TEXT["green"]}{glyph("clear")}{RESET}'
table.add_row([formatted_path, branch, origin_sync, status, author, commit, colored_labels])
self._print_table(table)
# `mud log` command implementation
def log(self, repos: Dict[str, List[str]]) -> None:
table = self._get_table()
for path, labels in repos.items():
formatted_path = self._get_formatted_path(path)
branch = self._get_branch_status(path)
author = self._get_authors_name(path)
commit = self._get_commit_message(path, 35)
colored_labels = self._get_formatted_labels(labels)
# Commit time
commit_time_cmd = subprocess.run(['git', 'log', '-1', '--pretty=format:%cd', '--date=relative'], text=True,
cwd=path, capture_output=True)
commit_time = commit_time_cmd.stdout.strip()
table.add_row([formatted_path, branch, author, commit_time, commit, colored_labels])
self._print_table(table)
# `mud branch` command implementation
def branches(self, repos: Dict[str, List[str]]) -> None:
table = self._get_table()
all_branches = {}
for path in repos.keys():
raw_branches = [line.strip() for line in
subprocess.check_output(['git', 'branch'], text=True, cwd=path).split('\n') if line.strip()]
for branch in raw_branches:
branch = branch.replace(' ', '').replace('*', '')
if branch not in all_branches:
all_branches[branch] = 0
all_branches[branch] += 1
branch_counter = Counter(all_branches)
for path, labels in repos.items():
formatted_path = self._get_formatted_path(path)
branches = subprocess.check_output(['git', 'branch'], text=True, cwd=path).splitlines()
current_branch = next((branch.lstrip('* ') for branch in branches if branch.startswith('*')), None)
branches = [branch.lstrip('* ') for branch in branches]
sorted_branches = sorted(branches, key=lambda x: branch_counter.get(x, 0), reverse=True)
if current_branch and current_branch in sorted_branches:
sorted_branches.remove(current_branch)
sorted_branches.insert(0, current_branch)
formatted_branches = self._get_formatted_branches(sorted_branches, current_branch)
colored_labels = self._get_formatted_labels(labels)
table.add_row([formatted_path, formatted_branches, colored_labels])
self._print_table(table)
# `mud <COMMAND>` when run_async = 0 and run_table = 0
def run_ordered(self, repos: List[str], command: [str]) -> None:
for path in repos:
print(f'{self._get_formatted_path(path)}{RESET} {command}{RESET}')
result = subprocess.run(command, shell=True, cwd=path, capture_output=True, text=True)
if result.stderr:
print(result.stderr)
if result.stdout and not result.stdout.isspace():
print(result.stdout)
# `mud <COMMAND>` when run_async = 1 and run_table = 0
async def run_async(self, repos: List[str], command: str) -> None:
sem = asyncio.Semaphore(len(repos))
async def run_process(path: str) -> None:
async with sem:
process = await asyncio.create_subprocess_shell(command, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = await process.communicate()
print(f'{self._get_formatted_path(path)}>{RESET} {command}')
if stderr:
print(stderr.decode())
if stdout and not stdout.isspace():
print(stdout.decode())
await asyncio.gather(*(run_process(path) for path in repos))
# `mud <COMMAND>` when run_async = 1 and run_table = 1
async def run_async_table_view(self, repos: List[str], command: str) -> None:
sem = asyncio.Semaphore(len(repos))
table = {repo: ['', ''] for repo in repos}
async def task(repo: str) -> None:
async with sem:
await self._run_process(repo, table, command)
tasks = [asyncio.create_task(task(repo)) for repo in repos]
await asyncio.gather(*tasks)
async def _run_process(self, repo_path: str, table: Dict[str, List[str]], command: str) -> None:
process = await asyncio.create_subprocess_shell(command, cwd=repo_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while True:
line = await process.stdout.readline()
if not line:
break
line = line.decode().strip()
line = table[repo_path][0] if not line.strip() else line
table[repo_path] = [line, f'{TEXT["yellow"]}{glyph("running")}']
self._print_process(table)
return_code = await process.wait()
if return_code == 0:
status = f'{TEXT["green"]}{glyph("finished")}'
else:
status = f'{TEXT["red"]}{glyph("failed")} Code: {return_code}'
table[repo_path] = [table[repo_path][0], status]
self._print_process(table)
def _print_process(self, info: Dict[str, List[str]]) -> None:
table = self._get_table()
for path, (line, status) in info.items():
formatted_path = self._get_formatted_path(path)
table.add_row([formatted_path, line, status])
print(f'\x1bc{self._table_to_str(table)}\n', end='')
def _print_table(self, table: PrettyTable):
table = self._table_to_str(table)
if len(table) != 0:
print(table)
@staticmethod
def _table_to_str(table: PrettyTable) -> str:
table = table.get_string()
table = '\n'.join(line.lstrip() for line in table.splitlines())
return table
@staticmethod
def _get_table() -> PrettyTable:
return PrettyTable(border=False, header=False, style=PLAIN_COLUMNS, align='l')
# Prettified repository path
@staticmethod
def _get_formatted_path(path: str) -> str:
return f'{STYLES["dim"]}{TEXT["gray"]}../{RESET}{STYLES["dim"]}{path}{RESET}'
# Displaying current branch
@staticmethod
def _get_branch_status(path: str) -> str:
branch_cmd = subprocess.run('git rev-parse --abbrev-ref HEAD', shell=True, text=True, cwd=path,
capture_output=True)
branch_stdout = branch_cmd.stdout.strip()
if branch_stdout == 'master' or branch_stdout == 'main':
branch = f'{TEXT["yellow"]}{glyph("master")}{RESET} {branch_stdout}'
elif branch_stdout == 'develop':
branch = f'{TEXT["green"]}{glyph("feature")}{RESET} {branch_stdout}'
elif '/' in branch_stdout:
branch_path = branch_stdout.split('/')
icon = branch_path[0]
icon = f'{TEXT["red"]}{glyph("bugfix")}{RESET}' if icon in ['bugfix', 'bug', 'hotfix'] else \
f'{TEXT["blue"]}{glyph("release")}{RESET}' if icon == 'release' else \
f'{TEXT["green"]}{glyph("feature")}{RESET}' if icon in ['feature', 'feat', 'develop'] else \
f'{TEXT["green"]}{glyph("branch")}{RESET}'
branch = f'{icon} {STYLES["bold"]}{branch_path[0]}{RESET}/{STYLES["bold"]}{("/".join(branch_path[1:]))}'
else:
branch = f'{TEXT["cyan"]}{glyph("branch")}{RESET} {branch_stdout}'
return branch
# Last author's name
@staticmethod
def _get_authors_name(path: str) -> str:
cmd = subprocess.run(['git', 'log', '-1', '--pretty=format:%an'], text=True, cwd=path, capture_output=True)
git_config_user_cmd = subprocess.run(['git', 'config', 'user.name'], text=True, capture_output=True)
committer_color = '' if cmd.stdout.strip() == git_config_user_cmd.stdout.strip() else STYLES["dim"]
author = cmd.stdout.strip()
author = author[:20] + '...' if len(author) > 20 else author
author = f'{committer_color}{author}{RESET}'
return author
# Last commit message
@staticmethod
def _get_commit_message(path: str, max_chars: int) -> str:
cmd = subprocess.run(['git', 'log', '-1', '--pretty=format:%s'], text=True, cwd=path, capture_output=True)
log = cmd.stdout.strip()
log = log[:max_chars] + '...' if len(log) > max_chars else log
return log
def _get_formatted_labels(self, labels: List[str]) -> str:
if len(labels) == 0:
return ''
colored_label = ''
for label in labels:
color_index = self._get_color_index(label) % len(TEXT)
colored_label += f'{TEXT[list(TEXT.keys())[color_index + 3]]}{glyph("label")}{RESET} {label} '
return colored_label
@staticmethod
def _get_formatted_branches(branches: List[str], current_branch: str) -> str:
if len(branches) == 0:
return ''
simplify_branches = utils.settings.config['mud'].getboolean('simplify_branches') == True
output = ''
for branch in branches:
is_origin = branch.startswith('origin/')
branch = branch.replace('origin/', '') if is_origin else branch
current_prefix = f'{STYLES["italic"]}{STYLES["bold"]}' if current_branch == branch else ''
current_prefix = current_prefix + STYLES['dim'] if is_origin else current_prefix
origin_prefix = f'{TEXT["magenta"]}{STYLES["dim"]}o/' if is_origin else ''
color = 'white'
icon = glyph('branch')
if branch == 'master' or branch == 'main':
color = 'yellow'
icon = f'{glyph("master")}'
elif branch == 'develop':
color = 'green'
icon = f'{glyph("feature")}'
elif '/' in branch:
parts = branch.split('/') | end_dim = '' if is_origin else END_STYLES["dim"] | 4 | 2023-12-28 13:09:31+00:00 | 4k |
RaceCrewAI/gt-telem | gt_telem/models/telemetry.py | [
{
"identifier": "format_time",
"path": "gt_telem/models/helpers.py",
"snippet": "def format_time(milliseconds):\n \"\"\"\n Format milliseconds into a time string (MM:SS.sss).\n\n Parameters:\n - milliseconds (int): Time in milliseconds.\n\n Returns:\n str: Formatted time string... | from datetime import datetime
from gt_telem.models.helpers import format_time, format_time_of_day
from gt_telem.models.models import Vector3D, WheelMetric
from gt_telem.models.telemetry_packet import TelemetryPacket | 3,293 |
@property
def cars_on_track(self) -> bool:
"""
Check if there are cars on the track.
"""
return bool(1<<0 & self.flags)
@property
def is_paused(self) -> bool:
"""
Check if the simulation is paused.
"""
return bool(1<<1 & self.flags)
@property
def is_loading(self) -> bool:
"""
Check if the simulation is loading.
"""
return bool(1<<2 & self.flags)
@property
def in_gear(self) -> bool:
"""
Check if the vehicle is in gear.
"""
return bool(1<<3 & self.flags)
@property
def has_turbo(self) -> bool:
"""
Check if the vehicle has a turbo.
"""
return bool(1<<4 & self.flags)
@property
def rev_limit(self) -> bool:
"""
Check if the vehicle is at the rev limit.
"""
return bool(1<<5 & self.flags)
@property
def hand_brake_active(self) -> bool:
"""
Check if the hand brake is active.
"""
return bool(1<<6 & self.flags)
@property
def lights_active(self) -> bool:
"""
Check if the lights are active.
"""
return bool(1<<7 & self.flags)
@property
def high_beams(self) -> bool:
"""
Check if the high beams are active.
"""
return bool(1<<8 & self.flags)
@property
def low_beams(self) -> bool:
"""
Check if the low beams are active.
"""
return bool(1<<9 & self.flags)
@property
def asm_active(self) -> bool:
"""
Check if the ASM (Active Stability Management) is active.
"""
return bool(1<<10 & self.flags)
@property
def tcs_active(self) -> bool:
"""
Check if the TCS (Traction Control System) is active.
"""
return bool(1<<11 & self.flags)
@property
def unknown_bool_1(self) -> bool:
"""
Get the value of an unknown boolean flag.
"""
return bool(1<<12 & self.flags)
@property
def unknown_bool_2(self) -> bool:
"""
Not sure
"""
return bool(1<<13 & self.flags)
@property
def unknown_bool_3(self) -> bool:
"""
Get the value of another unknown boolean flag.
"""
return bool(1<<14 & self.flags)
@property
def unknown_bool_4(self) -> bool:
"""
Get the value of another unknown boolean flag.
"""
return bool(1<<15 & self.flags)
@property
def best_lap_time(self) -> str:
"""
Get the formatted best lap time.
"""
if self.best_lap_time_ms == -1:
return None
|
class Telemetry(TelemetryPacket):
"""
Telemetry data from Gran Turismo
Attributes:
- position_x: float - X-coordinate of the position.
- position_y: float - Y-coordinate of the position.
- position_z: float - Z-coordinate of the position.
- velocity_x: float - X-component of velocity.
- velocity_y: float - Y-component of velocity.
- velocity_z: float - Z-component of velocity.
- rotation_x: float - X-component of rotation.
- rotation_y: float - Y-component of rotation.
- rotation_z: float - Z-component of rotation.
- orientation: float - Orientation.
- ang_vel_x: float - X-component of angular velocity.
- ang_vel_y: float - Y-component of angular velocity.
- ang_vel_z: float - Z-component of angular velocity.
- body_height: float - Height of the body.
- engine_rpm: float - Engine RPM.
- iv: float - IV, used for encryption.
- fuel_level: float - Fuel level.
- fuel_capacity: float - Fuel capacity.
- speed_mps: float - Speed in meters per second.
- boost_pressure: float - Boost pressure.
- oil_pressure: float - Oil pressure.
- water_temp: float - Water temperature.
- oil_temp: float - Oil temperature.
- tire_fl_temp: float - Front-left tire temperature.
- tire_fr_temp: float - Front-right tire temperature.
- tire_rl_temp: float - Rear-left tire temperature.
- tire_rr_temp: float - Rear-right tire temperature.
- packet_id: int - Packet ID.
- current_lap: int - Current lap.
- total_laps: int - Total laps.
- best_lap_time_ms: int - Best lap time in milliseconds.
- last_lap_time_ms: int - Last lap time in milliseconds.
- time_of_day_ms: int - Time of day in milliseconds.
- race_start_pos: int - Race start position.
- total_cars: int - Total number of cars.
- min_alert_rpm: int - Minimum alert RPM.
- max_alert_rpm: int - Maximum alert RPM.
- calc_max_speed: int - Calculated maximum speed.
- flags: int - byte that contains current/suggested gear.
- bits: int - Collection of booleans - see properties.
- throttle: int - Throttle.
- brake: int - Brake.
- empty: int - Unused.
- road_plane_x: float - X-coordinate of the road plane.
- road_plane_y: float - Y-coordinate of the road plane.
- road_plane_z: float - Z-coordinate of the road plane.
- road_plane_dist: float - Distance of the road plane. Not sure what this is.
- wheel_fl_rps: float - Front-left wheel revolutions per second.
- wheel_fr_rps: float - Front-right wheel revolutions per second.
- wheel_rl_rps: float - Rear-left wheel revolutions per second.
- wheel_rr_rps: float - Rear-right wheel revolutions per second.
- tire_fl_radius: float - Front-left tire radius.
- tire_fr_radius: float - Front-right tire radius.
- tire_rl_radius: float - Rear-left tire radius.
- tire_rr_radius: float - Rear-right tire radius.
- tire_fl_sus_height: float - Front-left tire suspension height.
- tire_fr_sus_height: float - Front-right tire suspension height.
- tire_rl_sus_height: float - Rear-left tire suspension height.
- tire_rr_sus_height: float - Rear-right tire suspension height.
- unused1: int - Unused variable 1.
- unused2: int - Unused variable 2.
- unused3: int - Unused variable 3.
- unused4: int - Unused variable 4.
- unused5: int - Unused variable 5.
- unused6: int - Unused variable 6.
- unused7: int - Unused variable 7.
- unused8: int - Unused variable 8.
- clutch_pedal: float - Clutch pedal position.
- clutch_engagement: float - Clutch engagement.
- trans_rpm: float - Transmission RPM.
- trans_top_speed: float - Transmission top speed.
- gear1: float - Gear 1.
- gear2: float - Gear 2.
- gear3: float - Gear 3.
- gear4: float - Gear 4.
- gear5: float - Gear 5.
- gear6: float - Gear 6.
- gear7: float - Gear 7.
- gear8: float - Gear 8.
- car_code: int - Car code - on vehicles with more than 8 gears, this is corrupted.
Properties:
- position: Get the position as a Vector3D.
- velocity: Get the velocity as a Vector3D.
- rotation: Get the rotation as a Vector3D.
- angular_velocity: Get the angular velocity as a Vector3D.
- road_plane: Get the road plane coordinates as a Vector3D.
- tire_temp: Get tire temperatures as a WheelMetric.
- wheel_rps: Get wheel revolutions per second as a WheelMetric.
- tire_radius: Get tire radii as a WheelMetric.
- suspension_height: Get suspension heights as a WheelMetric.
- current_gear: Get the current gear.
- suggested_gear: Get the suggested gear.
- speed_kph: Get the speed in kilometers per hour.
- speed_mph: Get the speed in miles per hour.
- cars_on_track: Check if there are cars on the track.
- is_paused: Check if the simulation is paused.
- is_loading: Check if the simulation is loading.
- in_gear: Check if the vehicle is in gear.
- has_turbo: Check if the vehicle has a turbo.
- rev_limit: Check if the vehicle is at the rev limit.
- hand_brake_active: Check if the hand brake is active.
- lights_active: Check if the lights are active.
- high_beams: Check if the high beams are active.
- low_beams: Check if the low beams are active.
- asm_active: Check if the ASM (Active Stability Management) is active.
- tcs_active: Check if the TCS (Traction Control System) is active.
- unknown_bool_1: Purpose unknown.
- unknown_bool_2: Purpose unknown.
- unknown_bool_3: Purpose unknown.
- unknown_bool_4: Purpose unknown.
- best_lap_time: Get the formatted best lap time.
- last_lap_time: Get the formatted last lap time.
- time_of_day: Get the formatted time of day.
Methods
- as_dict: Get the state of the object in a dictionary format.
"""
def __post_init__(self):
self.time = datetime.now()
@property
def position(self) -> Vector3D:
"""
Get the position as a Vector3D.
"""
return Vector3D(self.position_x, self.position_y, self.position_z)
@property
def velocity(self) -> Vector3D:
"""
Get the velocity as a Vector3D.
"""
return Vector3D(self.velocity_x, self.velocity_y, self.velocity_z)
@property
def rotation(self) -> Vector3D:
"""
Get the rotation as a Vector3D.
"""
return Vector3D(self.rotation_x, self.rotation_y, self.rotation_z)
@property
def angular_velocity(self) -> Vector3D:
"""
Get the angular velocity as a Vector3D.
"""
return Vector3D(self.ang_vel_x, self.ang_vel_y, self.ang_vel_z)
@property
def road_plane(self) -> Vector3D:
"""
Get the road plane coordinates as a Vector3D.
"""
return Vector3D(self.road_plane_x, self.road_plane_y, self.road_plane_z)
@property
def tire_temp(self) -> WheelMetric:
"""
Get tire temperatures as a WheelMetric.
"""
return WheelMetric(
self.tire_fl_temp, self.tire_fr_temp, self.tire_rl_temp, self.tire_rr_temp
)
@property
def wheel_rps(self) -> WheelMetric:
"""
Get wheel revolutions per second as a WheelMetric.
"""
return WheelMetric(
self.wheel_fl_rps, self.wheel_fr_rps, self.wheel_rl_rps, self.wheel_rr_rps
)
@property
def tire_radius(self) -> WheelMetric:
"""
Get tire radii as a WheelMetric.
"""
return WheelMetric(
self.tire_fl_radius,
self.tire_fr_radius,
self.tire_rl_radius,
self.tire_rr_radius,
)
@property
def suspension_height(self) -> WheelMetric:
"""
Get suspension heights as a WheelMetric.
"""
return WheelMetric(
self.tire_fl_sus_height,
self.tire_fr_sus_height,
self.tire_rl_sus_height,
self.tire_rr_sus_height,
)
@property
def current_gear(self) -> int:
"""
Get the current gear.
"""
return self.bits & 0b1111
@property
def suggested_gear(self) -> int:
"""
Get the suggested gear.
"""
return self.bits >> 4
@property
def speed_kph(self) -> float:
"""
Get the speed in kilometers per hour.
"""
return self.speed_mps * 3.6
@property
def speed_mph(self) -> float:
"""
Get the speed in miles per hour.
"""
return self.speed_mps * 2.23694
@property
def cars_on_track(self) -> bool:
"""
Check if there are cars on the track.
"""
return bool(1<<0 & self.flags)
@property
def is_paused(self) -> bool:
"""
Check if the simulation is paused.
"""
return bool(1<<1 & self.flags)
@property
def is_loading(self) -> bool:
"""
Check if the simulation is loading.
"""
return bool(1<<2 & self.flags)
@property
def in_gear(self) -> bool:
"""
Check if the vehicle is in gear.
"""
return bool(1<<3 & self.flags)
@property
def has_turbo(self) -> bool:
"""
Check if the vehicle has a turbo.
"""
return bool(1<<4 & self.flags)
@property
def rev_limit(self) -> bool:
"""
Check if the vehicle is at the rev limit.
"""
return bool(1<<5 & self.flags)
@property
def hand_brake_active(self) -> bool:
"""
Check if the hand brake is active.
"""
return bool(1<<6 & self.flags)
@property
def lights_active(self) -> bool:
"""
Check if the lights are active.
"""
return bool(1<<7 & self.flags)
@property
def high_beams(self) -> bool:
"""
Check if the high beams are active.
"""
return bool(1<<8 & self.flags)
@property
def low_beams(self) -> bool:
"""
Check if the low beams are active.
"""
return bool(1<<9 & self.flags)
@property
def asm_active(self) -> bool:
"""
Check if the ASM (Active Stability Management) is active.
"""
return bool(1<<10 & self.flags)
@property
def tcs_active(self) -> bool:
"""
Check if the TCS (Traction Control System) is active.
"""
return bool(1<<11 & self.flags)
@property
def unknown_bool_1(self) -> bool:
"""
Get the value of an unknown boolean flag.
"""
return bool(1<<12 & self.flags)
@property
def unknown_bool_2(self) -> bool:
"""
Not sure
"""
return bool(1<<13 & self.flags)
@property
def unknown_bool_3(self) -> bool:
"""
Get the value of another unknown boolean flag.
"""
return bool(1<<14 & self.flags)
@property
def unknown_bool_4(self) -> bool:
"""
Get the value of another unknown boolean flag.
"""
return bool(1<<15 & self.flags)
@property
def best_lap_time(self) -> str:
"""
Get the formatted best lap time.
"""
if self.best_lap_time_ms == -1:
return None | return format_time(self.best_lap_time_ms) | 0 | 2023-12-23 03:37:54+00:00 | 4k |
Cl0udG0d/GPTHack | gui/server/background.py | [
{
"identifier": "ChatGpt",
"path": "core/chatgpt_web/chatgpt.py",
"snippet": "class ChatGpt():\n session_data = {}\n\n @classmethod\n def get_hi_data(cls):\n return {\"prompt\": \"hi\", \"options\": {},\n \"systemMessage\": \"你是ChatGPT,一个由OpenAI训练的大型语言模型。尽可能详细而准确地回答我们提出的问题... | from apscheduler.schedulers.background import BackgroundScheduler
from core.chatgpt_web.chatgpt import ChatGpt
from core.toolkit import get_file_line_count, sort_gpt_sitelist_from_list, set_new_gpt_site
import ping3
import config
import concurrent.futures
import threading | 2,169 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2023/12/27 18:02
# @Author : Cl0udG0d
# @File : background.py
# @Github: https://github.com/Cl0udG0d
scheduler = BackgroundScheduler()
temp_site_list = list()
lock = threading.Lock()
def check_site_num():
return get_file_line_count() > config.GPT_ALARM_NUM
def save_site2list(site):
alive, execution_time = ChatGpt.check_alive(site)
if alive:
with lock:
temp_site_list.append(f"{site}|{execution_time}\n")
def submit_thread_task(sitelist):
with concurrent.futures.ThreadPoolExecutor(max_workers=config.THREADPOOL_NUM) as executor:
futures = [executor.submit(save_site2list, site) for site in sitelist]
concurrent.futures.wait(futures)
def is_connected(host=config.TEST_CONNECT_URL):
return True if ping3.ping(host) else False
def check_gpt_alive():
global temp_site_list
urllist = list()
with open(config.GPT_FILEPATH, "r") as file:
lines = file.readlines()
for line in lines:
url = line.strip().split('|')[0]
urllist.append(url)
submit_thread_task(urllist)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2023/12/27 18:02
# @Author : Cl0udG0d
# @File : background.py
# @Github: https://github.com/Cl0udG0d
scheduler = BackgroundScheduler()
temp_site_list = list()
lock = threading.Lock()
def check_site_num():
return get_file_line_count() > config.GPT_ALARM_NUM
def save_site2list(site):
alive, execution_time = ChatGpt.check_alive(site)
if alive:
with lock:
temp_site_list.append(f"{site}|{execution_time}\n")
def submit_thread_task(sitelist):
with concurrent.futures.ThreadPoolExecutor(max_workers=config.THREADPOOL_NUM) as executor:
futures = [executor.submit(save_site2list, site) for site in sitelist]
concurrent.futures.wait(futures)
def is_connected(host=config.TEST_CONNECT_URL):
return True if ping3.ping(host) else False
def check_gpt_alive():
global temp_site_list
urllist = list()
with open(config.GPT_FILEPATH, "r") as file:
lines = file.readlines()
for line in lines:
url = line.strip().split('|')[0]
urllist.append(url)
submit_thread_task(urllist) | sort_gpt_sitelist_from_list(temp_site_list) | 2 | 2023-12-26 02:44:48+00:00 | 4k |
DidacticFishstick/ultrastar-wingman | main.py | [
{
"identifier": "Song",
"path": "song.py",
"snippet": "class Song:\n songs = {}\n usdb_ids = set()\n php_session_id = None\n\n @staticmethod\n def create_valid_dir_name(s):\n # Remove invalid characters\n s = re.sub(r'[<>:\"/\\\\|?*]', '', s)\n\n # Replace spaces with... | import getpass
import os
import asyncio
import json
import logging
import os.path
import platform
import signal
import subprocess
import threading
import websockets
import config
import usdb
import usdx
from flask import render_template, Flask, request, send_file
from song import Song
from websocket_server import WebSocketServer, messages | 3,108 |
SCRIPT_BASE_PATH = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__, static_folder=os.path.join(SCRIPT_BASE_PATH, "static"), template_folder=os.path.join(SCRIPT_BASE_PATH, "templates"))
usdx_process = None
download_queue = asyncio.Queue()
event_loop = asyncio.get_event_loop()
php_session_id = None
def restart_usdx():
global usdx_process
if usdx_process is not None:
logging.info("Stopping USDX")
if platform.system() == "Windows":
subprocess.call(['taskkill', '/F', '/T', '/PID', str(usdx_process.pid)])
else:
os.kill(usdx_process.pid, signal.SIGKILL)
logging.info("Starting USDX")
usdx_process = subprocess.Popen(str(config.usdx_path))
@app.route('/')
def index():
|
SCRIPT_BASE_PATH = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__, static_folder=os.path.join(SCRIPT_BASE_PATH, "static"), template_folder=os.path.join(SCRIPT_BASE_PATH, "templates"))
usdx_process = None
download_queue = asyncio.Queue()
event_loop = asyncio.get_event_loop()
php_session_id = None
def restart_usdx():
global usdx_process
if usdx_process is not None:
logging.info("Stopping USDX")
if platform.system() == "Windows":
subprocess.call(['taskkill', '/F', '/T', '/PID', str(usdx_process.pid)])
else:
os.kill(usdx_process.pid, signal.SIGKILL)
logging.info("Starting USDX")
usdx_process = subprocess.Popen(str(config.usdx_path))
@app.route('/')
def index(): | return render_template('index.html', messages=messages) | 1 | 2023-12-23 15:29:44+00:00 | 4k |
Q-MM/PureMM | eval/model_vqa.py | [
{
"identifier": "conv_templates",
"path": "model/conversation.py",
"snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n H, W = lo... | import argparse
import torch
import os
import json
import math
import logging
import warnings
from tqdm import tqdm
from model.conversation import conv_templates, SeparatorStyle
from model.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path
from PIL import Image
from transformers import AutoTokenizer, AutoConfig, BitsAndBytesConfig
from ..model import *
from peft import PeftModel | 2,438 | if 'lora' in model_name.lower() and model_base is None:
warnings.warn(
'There is `lora` in model_zoo name but no `model_base` is provided. ')
if 'lora' in model_name.lower() and model_base is not None:
lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
print('Loading PureMM from base model_zoo...')
model = PureMMLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained,
**kwargs)
token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features
if model.lm_head.weight.shape[0] != token_num:
print(f'model_zoo.lm_head.weight.shape[0]: {model.lm_head.weight.shape[0]}; token_num: {token_num}')
model.lm_head.weight = torch.nn.Parameter(
torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
model.model.embed_tokens.weight = torch.nn.Parameter(
torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
print('Loading additional PureMM weights...')
if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):
non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')
non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in
non_lora_trainables.items()}
if any(k.startswith('model_zoo.model_zoo.') for k in non_lora_trainables):
non_lora_trainables = {(k[6:] if k.startswith('model_zoo.') else k): v for k, v in non_lora_trainables.items()}
incompatible_keys = model.load_state_dict(non_lora_trainables, strict=False)
# print("non_lora_trainables incompatible_keys: ", incompatible_keys)
# vision_tower 在lora载入之前load,验证visual encoder lora训练效果
vision_tower = model.get_vision_tower()
print(f'vision_tower.is_loaded: {vision_tower.is_loaded}')
if not vision_tower.is_loaded:
vision_tower.load_model()
print(f'vision_tower loaded!!!!')
# print(f'model_zoo: {model_zoo}')
print('Loading LoRA weights...')
model = PeftModel.from_pretrained(model, model_path)
# print(f'model_zoo after get lora: {model_zoo}')
print('Merging LoRA weights...')
model = model.merge_and_unload()
# print(f'model_zoo after merge with lora: {model_zoo}')
print('Model is loaded...')
vision_tower = model.get_vision_tower()
print(f'vision_tower.is_loaded: {vision_tower.is_loaded}')
if not vision_tower.is_loaded:
vision_tower.load_model()
print(f'vision_tower loaded!!!!')
vision_tower.to(device=device, dtype=torch.float16)
image_processor = vision_tower.image_processor
print(f'image_processor: {image_processor}')
if hasattr(model.config, "max_sequence_length"):
context_len = model.config.max_sequence_length
else:
context_len = 2048
return tokenizer, model, image_processor, context_len
def eval_model(args):
# Model
disable_torch_init()
model_path = os.path.expanduser(args.model_path)
model_name = get_model_name_from_path(model_path)
print(f'model_name: {model_name}')
tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name)
print('load model_zoo done!!!')
# questions = [json.loads(q) for q in open(os.path.expanduser(args.question_file), "r")]
questions_json = json.load(open(os.path.expanduser(args.question_file), "r"))
image_dir = questions_json.get('root_dir', None)
if 'mini_benchmark_IT_SFT_v1.2' in args.question_file:
questions = questions_json.get('annotations')
else:
questions = questions_json.get('questions')
# questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
if not os.path.exists(args.answers_dir):
logging.error(f'answers_dir not exist: {args.answers_dir}')
os.mkdir(args.answers_dir)
print('answers_dir: ', args.answers_dir)
answers_file = os.path.join(args.answers_dir, os.path.basename(args.question_file))
answers_file = answers_file.replace('.json', '_result.json')
print('answers_file: ', answers_file)
# answers_file = os.path.expanduser(args.answers_file)
# os.makedirs(os.path.dirname(answers_file), exist_ok=True)
ans_file = open(answers_file, "w")
for line in tqdm(questions):
idx = line["question_id"]
image_file = line["image"]
# qs = line["text"]
qs = line["question"]
gt = line['answer']
if 'mini_benchmark_IT_SFT_v1.2' in args.question_file:
# qs = qs.replace('Please answer yes or no.', '')
qs = qs.replace(' Please answer yes or no.', '\nAnswer the question using a single word or phrase.')
cur_prompt = qs
qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
conv = conv_templates[args.conv_mode].copy()
conv.append_message(conv.roles[0], qs)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
# image = Image.open(os.path.join(args.image_folder, image_file))
if image_dir:
image_path = os.path.join(image_dir, image_file)
else:
image_path = os.path.join(args.image_folder, image_file)
image = Image.open(image_path).convert('RGB')
# image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
|
IMAGE_TOKEN_INDEX = -200
DEFAULT_IMAGE_TOKEN = "<image>"
def split_list(lst, n):
"""Split a list into n (roughly) equal-sized chunks"""
chunk_size = math.ceil(len(lst) / n) # integer division
return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
def get_chunk(lst, n, k):
chunks = split_list(lst, n)
return chunks[k]
def disable_torch_init():
"""
Disable the redundant torch default initialization to accelerate model_zoo creation.
"""
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="auto", device="cuda"):
kwargs = {"device_map": device_map}
if load_8bit:
kwargs['load_in_8bit'] = True
elif load_4bit:
kwargs['load_in_4bit'] = True
kwargs['quantization_config'] = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type='nf4'
)
else:
kwargs['torch_dtype'] = torch.float16
if 'lora' in model_name.lower() and model_base is None:
warnings.warn(
'There is `lora` in model_zoo name but no `model_base` is provided. ')
if 'lora' in model_name.lower() and model_base is not None:
lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
print('Loading PureMM from base model_zoo...')
model = PureMMLlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained,
**kwargs)
token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features
if model.lm_head.weight.shape[0] != token_num:
print(f'model_zoo.lm_head.weight.shape[0]: {model.lm_head.weight.shape[0]}; token_num: {token_num}')
model.lm_head.weight = torch.nn.Parameter(
torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
model.model.embed_tokens.weight = torch.nn.Parameter(
torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
print('Loading additional PureMM weights...')
if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):
non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')
non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in
non_lora_trainables.items()}
if any(k.startswith('model_zoo.model_zoo.') for k in non_lora_trainables):
non_lora_trainables = {(k[6:] if k.startswith('model_zoo.') else k): v for k, v in non_lora_trainables.items()}
incompatible_keys = model.load_state_dict(non_lora_trainables, strict=False)
# print("non_lora_trainables incompatible_keys: ", incompatible_keys)
# vision_tower 在lora载入之前load,验证visual encoder lora训练效果
vision_tower = model.get_vision_tower()
print(f'vision_tower.is_loaded: {vision_tower.is_loaded}')
if not vision_tower.is_loaded:
vision_tower.load_model()
print(f'vision_tower loaded!!!!')
# print(f'model_zoo: {model_zoo}')
print('Loading LoRA weights...')
model = PeftModel.from_pretrained(model, model_path)
# print(f'model_zoo after get lora: {model_zoo}')
print('Merging LoRA weights...')
model = model.merge_and_unload()
# print(f'model_zoo after merge with lora: {model_zoo}')
print('Model is loaded...')
vision_tower = model.get_vision_tower()
print(f'vision_tower.is_loaded: {vision_tower.is_loaded}')
if not vision_tower.is_loaded:
vision_tower.load_model()
print(f'vision_tower loaded!!!!')
vision_tower.to(device=device, dtype=torch.float16)
image_processor = vision_tower.image_processor
print(f'image_processor: {image_processor}')
if hasattr(model.config, "max_sequence_length"):
context_len = model.config.max_sequence_length
else:
context_len = 2048
return tokenizer, model, image_processor, context_len
def eval_model(args):
# Model
disable_torch_init()
model_path = os.path.expanduser(args.model_path)
model_name = get_model_name_from_path(model_path)
print(f'model_name: {model_name}')
tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name)
print('load model_zoo done!!!')
# questions = [json.loads(q) for q in open(os.path.expanduser(args.question_file), "r")]
questions_json = json.load(open(os.path.expanduser(args.question_file), "r"))
image_dir = questions_json.get('root_dir', None)
if 'mini_benchmark_IT_SFT_v1.2' in args.question_file:
questions = questions_json.get('annotations')
else:
questions = questions_json.get('questions')
# questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
if not os.path.exists(args.answers_dir):
logging.error(f'answers_dir not exist: {args.answers_dir}')
os.mkdir(args.answers_dir)
print('answers_dir: ', args.answers_dir)
answers_file = os.path.join(args.answers_dir, os.path.basename(args.question_file))
answers_file = answers_file.replace('.json', '_result.json')
print('answers_file: ', answers_file)
# answers_file = os.path.expanduser(args.answers_file)
# os.makedirs(os.path.dirname(answers_file), exist_ok=True)
ans_file = open(answers_file, "w")
for line in tqdm(questions):
idx = line["question_id"]
image_file = line["image"]
# qs = line["text"]
qs = line["question"]
gt = line['answer']
if 'mini_benchmark_IT_SFT_v1.2' in args.question_file:
# qs = qs.replace('Please answer yes or no.', '')
qs = qs.replace(' Please answer yes or no.', '\nAnswer the question using a single word or phrase.')
cur_prompt = qs
qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
conv = conv_templates[args.conv_mode].copy()
conv.append_message(conv.roles[0], qs)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
# image = Image.open(os.path.join(args.image_folder, image_file))
if image_dir:
image_path = os.path.join(image_dir, image_file)
else:
image_path = os.path.join(args.image_folder, image_file)
image = Image.open(image_path).convert('RGB')
# image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0] | image_tensor = process_images([image], image_processor, model.config)[0] | 2 | 2023-12-27 09:54:09+00:00 | 4k |
giaminhgist/3D-DAM | lib/training/train.py | [
{
"identifier": "AverageMeter",
"path": "lib/utils/utils.py",
"snippet": "class AverageMeter:\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n ... | import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from collections import OrderedDict
from lib.utils.utils import AverageMeter, accuracy
from lib.utils.EarlyStopping import EarlyStopping
from lib.training.train_helper import plot_result
from sklearn.metrics import confusion_matrix
from tqdm import tqdm | 1,675 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def train_one_epoch(
model,
loader,
optimizer,
epoch_idx: int,
lr_scheduler=None,
):
losses_m = AverageMeter()
acc_m = AverageMeter()
model.train()
print('Start training epoch: ', epoch_idx)
for batch_idx, data in enumerate(tqdm(loader)):
images, target = data
images, target = images.to(device), target.to(device)
target = target.flatten()
output = model(images)
loss = nn.CrossEntropyLoss()(output, target)
losses_m.update(loss.item(), images.size(0))
acc1 = accuracy(output, target, topk=(1,))
acc_m.update(acc1[0].item(), output.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
torch.cuda.synchronize()
print(optimizer.param_groups[0]['lr'])
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
metrics = OrderedDict([('loss', losses_m.avg), ('Acc', acc_m.avg)])
if lr_scheduler is not None:
lr_scheduler.step()
return metrics
def validate(model, loader):
losses_m = AverageMeter()
acc_m = AverageMeter()
model.eval()
with torch.no_grad():
for batch_idx, data in enumerate(loader):
images, target = data
images, target = images.to(device), target.to(device)
target = target.flatten()
output = model(images)
loss = nn.CrossEntropyLoss()(output, target)
acc1 = accuracy(output, target, topk=(1,))
# reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(loss.item(), images.size(0))
acc_m.update(acc1[0].item(), output.size(0))
metrics = OrderedDict([('loss', losses_m.avg), ('Acc', acc_m.avg)])
return metrics
def train(model,
train_loader,
val_loader,
epoch_size=300,
lr_scheduler=True,
learning_rate=1e-7, optimizer_setup='Adam', w_decay=1e-7,
patience=20, save_last=True,
name='save', fold=0,
):
seed = 42
torch.manual_seed(seed)
np.random.seed(seed)
print('Training using:', device)
model = torch.nn.DataParallel(model)
model.to(device)
if optimizer_setup == 'Adam':
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=w_decay)
elif optimizer_setup == 'SGD':
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=w_decay)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=w_decay)
min_valid_loss = np.inf
max_acc = 0
highest_val_epoch = 0
train_acc = []
train_losses = []
val_acc = []
val_losses = []
if lr_scheduler:
print('Applied lr_scheduler')
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.1)
else:
scheduler = None
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def train_one_epoch(
model,
loader,
optimizer,
epoch_idx: int,
lr_scheduler=None,
):
losses_m = AverageMeter()
acc_m = AverageMeter()
model.train()
print('Start training epoch: ', epoch_idx)
for batch_idx, data in enumerate(tqdm(loader)):
images, target = data
images, target = images.to(device), target.to(device)
target = target.flatten()
output = model(images)
loss = nn.CrossEntropyLoss()(output, target)
losses_m.update(loss.item(), images.size(0))
acc1 = accuracy(output, target, topk=(1,))
acc_m.update(acc1[0].item(), output.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
torch.cuda.synchronize()
print(optimizer.param_groups[0]['lr'])
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
metrics = OrderedDict([('loss', losses_m.avg), ('Acc', acc_m.avg)])
if lr_scheduler is not None:
lr_scheduler.step()
return metrics
def validate(model, loader):
losses_m = AverageMeter()
acc_m = AverageMeter()
model.eval()
with torch.no_grad():
for batch_idx, data in enumerate(loader):
images, target = data
images, target = images.to(device), target.to(device)
target = target.flatten()
output = model(images)
loss = nn.CrossEntropyLoss()(output, target)
acc1 = accuracy(output, target, topk=(1,))
# reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(loss.item(), images.size(0))
acc_m.update(acc1[0].item(), output.size(0))
metrics = OrderedDict([('loss', losses_m.avg), ('Acc', acc_m.avg)])
return metrics
def train(model,
train_loader,
val_loader,
epoch_size=300,
lr_scheduler=True,
learning_rate=1e-7, optimizer_setup='Adam', w_decay=1e-7,
patience=20, save_last=True,
name='save', fold=0,
):
seed = 42
torch.manual_seed(seed)
np.random.seed(seed)
print('Training using:', device)
model = torch.nn.DataParallel(model)
model.to(device)
if optimizer_setup == 'Adam':
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=w_decay)
elif optimizer_setup == 'SGD':
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=w_decay)
else:
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, weight_decay=w_decay)
min_valid_loss = np.inf
max_acc = 0
highest_val_epoch = 0
train_acc = []
train_losses = []
val_acc = []
val_losses = []
if lr_scheduler:
print('Applied lr_scheduler')
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.1)
else:
scheduler = None
| early_stopping = EarlyStopping(patience=patience, verbose=True) | 2 | 2023-12-22 10:15:55+00:00 | 4k |
gardenifi/server | tests/api/discover_wifi_test.py | [
{
"identifier": "discover_wifi",
"path": "app/main_app.py",
"snippet": "@app.get(\"/api/discover_wifi\")\nasync def discover_wifi(chunked: int = None, page: int = None):\n \"\"\"WIFI discovery API call.\"\"\"\n try:\n if chunked is not None:\n if page is None:\n re... | import json
import pytest
from app.main_app import discover_wifi
from app.raspi.services import Services | 3,565 | """MIT License
Copyright (c) 2023, Marios Karagiannopoulos
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
**Attribution Requirement:**
When using or distributing the software, an attribution to Marios Karagiannopoulos must be included.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
| """MIT License
Copyright (c) 2023, Marios Karagiannopoulos
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
**Attribution Requirement:**
When using or distributing the software, an attribution to Marios Karagiannopoulos must be included.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
| services = Services() | 1 | 2023-12-22 08:06:09+00:00 | 4k |
xiaoye0x0/pfgo_tg_bot | apps/all_status/view.py | [
{
"identifier": "str_in_list_str",
"path": "utils/base.py",
"snippet": "def str_in_list_str(target: str, target_list: list) -> bool:\n \"\"\"检测字符串中是否存在list中字符串\"\"\"\n for target_str in target_list:\n if target_str in target:\n return True\n return False"
},
{
"identif... | from telebot import TeleBot
from telebot.types import Message
from utils.base import str_in_list_str
from utils.pfgo_spider.spider import PfgoSpider
from utils.task import Task
from utils.tg_tools.base import is_administrator | 2,021 |
def get_all_status(bot: TeleBot, message: Message):
chat_id = message.chat.id
task = Task()
bot_info = bot.get_me()
if is_administrator(bot_info.id, bot.get_chat_administrators(chat_id)):
sent_message = bot.send_message(chat_id, "开始查询数据")
s = PfgoSpider(task.pfgo_url, task.username, task.password)
try:
data = s.get_forward_rules()
result_text = ""
for _, v in data.items():
|
def get_all_status(bot: TeleBot, message: Message):
chat_id = message.chat.id
task = Task()
bot_info = bot.get_me()
if is_administrator(bot_info.id, bot.get_chat_administrators(chat_id)):
sent_message = bot.send_message(chat_id, "开始查询数据")
s = PfgoSpider(task.pfgo_url, task.username, task.password)
try:
data = s.get_forward_rules()
result_text = ""
for _, v in data.items(): | if str_in_list_str(v["name"], task.hide): | 0 | 2023-12-28 08:55:04+00:00 | 4k |
bclavie/RAGatouille | ragatouille/RAGPretrainedModel.py | [
{
"identifier": "CorpusProcessor",
"path": "ragatouille/data/corpus_processor.py",
"snippet": "class CorpusProcessor:\n def __init__(\n self,\n document_splitter_fn: Optional[Callable] = llama_index_sentence_splitter,\n preprocessing_fn: Optional[Union[Callable, list[Callable]]] ... | from typing import Callable, Optional, Union, List, Any
from pathlib import Path
from langchain_core.retrievers import BaseRetriever
from langchain_core.documents import Document
from langchain_core.callbacks.manager import (
CallbackManagerForRetrieverRun,
)
from ragatouille.data.corpus_processor import CorpusProcessor
from ragatouille.data.preprocessors import llama_index_sentence_splitter
from ragatouille.models import LateInteractionModel, ColBERT | 3,181 |
class RAGPretrainedModel:
"""
Wrapper class for a pretrained RAG late-interaction model, and all the associated utilities.
Allows you to load a pretrained model from disk or from the hub, build or query an index.
## Usage
Load a pre-trained checkpoint:
```python
from ragatouille import RAGPretrainedModel
RAG = RAGPretrainedModel.from_pretrained("colbert-ir/colbertv2.0")
```
Load checkpoint from an existing index:
```python
from ragatouille import RAGPretrainedModel
RAG = RAGPretrainedModel.from_index("path/to/my/index")
```
Both methods will load a fully initialised instance of ColBERT, which you can use to build and query indexes.
```python
RAG.search("How many people live in France?")
```
"""
model_name: Union[str, None] = None
model: Union[LateInteractionModel, None] = None
|
class RAGPretrainedModel:
"""
Wrapper class for a pretrained RAG late-interaction model, and all the associated utilities.
Allows you to load a pretrained model from disk or from the hub, build or query an index.
## Usage
Load a pre-trained checkpoint:
```python
from ragatouille import RAGPretrainedModel
RAG = RAGPretrainedModel.from_pretrained("colbert-ir/colbertv2.0")
```
Load checkpoint from an existing index:
```python
from ragatouille import RAGPretrainedModel
RAG = RAGPretrainedModel.from_index("path/to/my/index")
```
Both methods will load a fully initialised instance of ColBERT, which you can use to build and query indexes.
```python
RAG.search("How many people live in France?")
```
"""
model_name: Union[str, None] = None
model: Union[LateInteractionModel, None] = None | corpus_processor: Optional[CorpusProcessor] = None | 0 | 2023-12-29 16:26:42+00:00 | 4k |
Caipengzhou/BRAU-Netplusplus | networks/biformer.py | [
{
"identifier": "Attention",
"path": "networks/_common.py",
"snippet": "class Attention(nn.Module):\n \"\"\"\n vanilla attention\n \"\"\"\n def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):\n super().__init__()\n self.num_heads = n... | import torch
import torch.nn as nn
from einops import rearrange
from timm.models.layers import DropPath
from bra import BiLevelRoutingAttention
from ._common import Attention, AttentionLePE, DWConv | 1,738 | """
BiFormer impl.
author: ZHU Lei
github: https://github.com/rayleizhu
email: ray.leizhu@outlook.com
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
class Block(nn.Module):
def __init__(self, dim, input_resolution, drop_path=0., layer_scale_init_value=-1,num_heads=8, n_win=7, qk_dim=None, qk_scale=None,
kv_per_win=4, kv_downsample_ratio=4, kv_downsample_kernel=None, kv_downsample_mode='ada_avgpool',
topk=4, param_attention="qkvo", param_routing=False, diff_routing=False, soft_routing=False,
mlp_ratio=4, mlp_dwconv=False, side_dwconv=5, before_attn_dwconv=3, pre_norm=True, auto_pad=False):
super().__init__()
qk_dim = qk_dim or dim
self.input_resolution=input_resolution
# modules
if before_attn_dwconv > 0:
self.pos_embed = nn.Conv2d(dim, dim, kernel_size=before_attn_dwconv, padding=1, groups=dim)
else:
self.pos_embed = lambda x: 0
self.norm1 = nn.LayerNorm(dim, eps=1e-6) # important to avoid attention collapsing
if topk > 0:
self.attn = BiLevelRoutingAttention(dim=dim, num_heads=num_heads, n_win=n_win, qk_dim=qk_dim,
qk_scale=qk_scale, kv_per_win=kv_per_win,
kv_downsample_ratio=kv_downsample_ratio,
kv_downsample_kernel=kv_downsample_kernel,
kv_downsample_mode=kv_downsample_mode,
topk=topk, param_attention=param_attention, param_routing=param_routing,
diff_routing=diff_routing, soft_routing=soft_routing,
side_dwconv=side_dwconv,
auto_pad=auto_pad)
elif topk == -1:
| """
BiFormer impl.
author: ZHU Lei
github: https://github.com/rayleizhu
email: ray.leizhu@outlook.com
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
class Block(nn.Module):
def __init__(self, dim, input_resolution, drop_path=0., layer_scale_init_value=-1,num_heads=8, n_win=7, qk_dim=None, qk_scale=None,
kv_per_win=4, kv_downsample_ratio=4, kv_downsample_kernel=None, kv_downsample_mode='ada_avgpool',
topk=4, param_attention="qkvo", param_routing=False, diff_routing=False, soft_routing=False,
mlp_ratio=4, mlp_dwconv=False, side_dwconv=5, before_attn_dwconv=3, pre_norm=True, auto_pad=False):
super().__init__()
qk_dim = qk_dim or dim
self.input_resolution=input_resolution
# modules
if before_attn_dwconv > 0:
self.pos_embed = nn.Conv2d(dim, dim, kernel_size=before_attn_dwconv, padding=1, groups=dim)
else:
self.pos_embed = lambda x: 0
self.norm1 = nn.LayerNorm(dim, eps=1e-6) # important to avoid attention collapsing
if topk > 0:
self.attn = BiLevelRoutingAttention(dim=dim, num_heads=num_heads, n_win=n_win, qk_dim=qk_dim,
qk_scale=qk_scale, kv_per_win=kv_per_win,
kv_downsample_ratio=kv_downsample_ratio,
kv_downsample_kernel=kv_downsample_kernel,
kv_downsample_mode=kv_downsample_mode,
topk=topk, param_attention=param_attention, param_routing=param_routing,
diff_routing=diff_routing, soft_routing=soft_routing,
side_dwconv=side_dwconv,
auto_pad=auto_pad)
elif topk == -1: | self.attn = Attention(dim=dim) | 0 | 2023-12-29 05:45:26+00:00 | 4k |
shibing624/chatgpt-webui | src/overwrites.py | [
{
"identifier": "chuanhu_path",
"path": "src/presets.py",
"snippet": "class I18nAuto:\n def __init__(self):\n def __call__(self, key):\nCHATGLM_MODEL = None\nCHATGLM_TOKENIZER = None\nLLAMA_MODEL = None\nLLAMA_INFERENCER = None\nINITIAL_SYSTEM_PROMPT = \"You are a helpful assistant.\"\nAPI_HOST = ... | import os
import gradio as gr
from collections import namedtuple
from gradio.utils import validate_url
from gradio_client import utils as client_utils
from src.presets import chuanhu_path, assets_path
from src.utils import convert_bot_before_marked, convert_user_before_marked | 3,116 |
def postprocess(
self,
y,
):
"""
Parameters:
y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed.
Returns:
List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed.
"""
if y is None:
return []
processed_messages = []
for message_pair in y:
assert isinstance(
message_pair, (tuple, list)
), f"Expected a list of lists or list of tuples. Received: {message_pair}"
assert (
len(message_pair) == 2
), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}"
processed_messages.append(
[
self._postprocess_chat_messages(message_pair[0], "user"),
self._postprocess_chat_messages(message_pair[1], "bot"),
]
)
return processed_messages
def postprocess_chat_messages(
self, chat_message, role: str
):
if chat_message is None:
return None
elif isinstance(chat_message, (tuple, list)):
file_uri = chat_message[0]
if validate_url(file_uri):
filepath = file_uri
else:
filepath = self.make_temp_copy_if_needed(file_uri)
mime_type = client_utils.get_mimetype(filepath)
return {
"name": filepath,
"mime_type": mime_type,
"alt_text": chat_message[1] if len(chat_message) > 1 else None,
"data": None, # These last two fields are filled in by the frontend
"is_file": True,
}
elif isinstance(chat_message, str):
# chat_message = inspect.cleandoc(chat_message)
# escape html spaces
# chat_message = chat_message.replace(" ", " ")
if role == "bot":
chat_message = convert_bot_before_marked(chat_message)
elif role == "user":
chat_message = convert_user_before_marked(chat_message)
return chat_message
else:
raise ValueError(f"Invalid message for Chatbot component: {chat_message}")
def add_classes_to_gradio_component(comp):
"""
this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others
code from stable-diffusion-webui <AUTOMATIC1111/stable-diffusion-webui>
"""
comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])]
if getattr(comp, 'multiselect', False):
comp.elem_classes.append('multiselect')
def IOComponent_init(self, *args, **kwargs):
res = original_IOComponent_init(self, *args, **kwargs)
add_classes_to_gradio_component(self)
return res
original_IOComponent_init = gr.components.IOComponent.__init__
gr.components.IOComponent.__init__ = IOComponent_init
def BlockContext_init(self, *args, **kwargs):
res = original_BlockContext_init(self, *args, **kwargs)
add_classes_to_gradio_component(self)
return res
original_BlockContext_init = gr.blocks.BlockContext.__init__
gr.blocks.BlockContext.__init__ = BlockContext_init
def get_html(filename):
path = os.path.join(chuanhu_path, "assets", "html", filename)
if os.path.exists(path):
with open(path, encoding="utf8") as file:
return file.read()
return ""
def webpath(fn):
|
def postprocess(
self,
y,
):
"""
Parameters:
y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed.
Returns:
List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed.
"""
if y is None:
return []
processed_messages = []
for message_pair in y:
assert isinstance(
message_pair, (tuple, list)
), f"Expected a list of lists or list of tuples. Received: {message_pair}"
assert (
len(message_pair) == 2
), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}"
processed_messages.append(
[
self._postprocess_chat_messages(message_pair[0], "user"),
self._postprocess_chat_messages(message_pair[1], "bot"),
]
)
return processed_messages
def postprocess_chat_messages(
self, chat_message, role: str
):
if chat_message is None:
return None
elif isinstance(chat_message, (tuple, list)):
file_uri = chat_message[0]
if validate_url(file_uri):
filepath = file_uri
else:
filepath = self.make_temp_copy_if_needed(file_uri)
mime_type = client_utils.get_mimetype(filepath)
return {
"name": filepath,
"mime_type": mime_type,
"alt_text": chat_message[1] if len(chat_message) > 1 else None,
"data": None, # These last two fields are filled in by the frontend
"is_file": True,
}
elif isinstance(chat_message, str):
# chat_message = inspect.cleandoc(chat_message)
# escape html spaces
# chat_message = chat_message.replace(" ", " ")
if role == "bot":
chat_message = convert_bot_before_marked(chat_message)
elif role == "user":
chat_message = convert_user_before_marked(chat_message)
return chat_message
else:
raise ValueError(f"Invalid message for Chatbot component: {chat_message}")
def add_classes_to_gradio_component(comp):
"""
this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others
code from stable-diffusion-webui <AUTOMATIC1111/stable-diffusion-webui>
"""
comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])]
if getattr(comp, 'multiselect', False):
comp.elem_classes.append('multiselect')
def IOComponent_init(self, *args, **kwargs):
res = original_IOComponent_init(self, *args, **kwargs)
add_classes_to_gradio_component(self)
return res
original_IOComponent_init = gr.components.IOComponent.__init__
gr.components.IOComponent.__init__ = IOComponent_init
def BlockContext_init(self, *args, **kwargs):
res = original_BlockContext_init(self, *args, **kwargs)
add_classes_to_gradio_component(self)
return res
original_BlockContext_init = gr.blocks.BlockContext.__init__
gr.blocks.BlockContext.__init__ = BlockContext_init
def get_html(filename):
path = os.path.join(chuanhu_path, "assets", "html", filename)
if os.path.exists(path):
with open(path, encoding="utf8") as file:
return file.read()
return ""
def webpath(fn): | if fn.startswith(assets_path): | 0 | 2023-12-27 12:14:26+00:00 | 4k |
Rounak40/Fast-Torrent-Downloader | main.py | [
{
"identifier": "x1337",
"path": "torrents.py",
"snippet": "class x1337:\r\n def __init__(self, limit=10):\r\n self.BASE_URL = \"https://1337x.unblockit.ing\"\r\n self.LIMIT = limit\r\n self.session = requests.Session()\r\n self.session.headers = {\r\n 'use... | from torrents import x1337, thepiratebay, torrentio
from downloader import Seedr
| 3,179 |
class Provider:
def __init__(self) -> None:
self.limit = 10
self.provider_name = "Torrentio"
|
class Provider:
def __init__(self) -> None:
self.limit = 10
self.provider_name = "Torrentio"
| self.provider = torrentio(limit=self.limit)
| 2 | 2023-12-24 13:50:46+00:00 | 4k |
ConnectAI-E/GitMaya | server/tasks/lark/pull_request.py | [
{
"identifier": "get_bot_by_application_id",
"path": "server/tasks/lark/base.py",
"snippet": "def get_bot_by_application_id(app_id):\n application = (\n db.session.query(IMApplication)\n .filter(\n or_(\n IMApplication.app_id == app_id,\n IMAppli... | import json
import logging
from celery_app import app, celery
from connectai.lark.sdk import FeishuTextMessage
from model.schema import (
ChatGroup,
CodeApplication,
CodeUser,
IMUser,
PullRequest,
Repo,
Team,
TeamMember,
db,
)
from model.team import get_assignees_by_openid
from utils.github.repo import GitHubAppRepo
from utils.lark.pr_card import PullCard
from utils.lark.pr_manual import (
PrManual,
PullRequestDiff,
PullRequestLog,
PullRequestView,
)
from utils.lark.pr_tip_failed import PrTipFailed
from utils.lark.pr_tip_success import PrTipSuccess
from .base import (
get_bot_by_application_id,
get_git_object_by_message_id,
with_authenticated_github,
) | 3,039 | .filter(
ChatGroup.repo_id == pr.repo_id,
)
.first()
)
if chat_group and pr.message_id:
bot, _ = get_bot_by_application_id(chat_group.im_application_id)
result = bot.reply(
pr.message_id,
FeishuTextMessage(f"@{user_name}: {comment}"),
).json()
return result
return False
@celery.task()
def update_pull_request_card(pr_id: str) -> bool | dict:
"""Update PullRequest card message.
Args:
pr_id (str): PullRequest.id.
Returns:
bool | dict: True or False or FeishuMessage
"""
pr = db.session.query(PullRequest).filter(PullRequest.id == pr_id).first()
if pr:
chat_group = (
db.session.query(ChatGroup)
.filter(
ChatGroup.repo_id == pr.repo_id,
)
.first()
)
repo = db.session.query(Repo).filter(Repo.id == pr.repo_id).first()
if chat_group and repo:
bot, application = get_bot_by_application_id(chat_group.im_application_id)
team = db.session.query(Team).filter(Team.id == application.team_id).first()
if application and team:
repo_url = f"https://github.com/{team.name}/{repo.name}"
message = gen_pr_card_by_pr(pr, repo_url, team)
result = bot.update(pr.message_id, message).json()
return result
return False
def _get_github_app(app_id, message_id, content, data, *args, **kwargs):
root_id = data["event"]["message"].get(
"root_id", data["event"]["message"]["message_id"]
)
openid = data["event"]["sender"]["sender_id"]["open_id"]
_, _, pr = get_git_object_by_message_id(root_id)
if not pr:
return send_pull_request_failed_tip(
"找不到 Pull Request", app_id, message_id, content, data, *args, **kwargs
)
repo = (
db.session.query(Repo)
.filter(
Repo.id == pr.repo_id,
Repo.status == 0,
)
.first()
)
if not repo:
return send_pull_request_failed_tip(
"找不到项目", app_id, message_id, content, data, *args, **kwargs
)
code_application = (
db.session.query(CodeApplication)
.filter(
CodeApplication.id == repo.application_id,
)
.first()
)
if not code_application:
return send_pull_request_failed_tip(
"找不到对应的项目", app_id, message_id, content, data, *args, **kwargs
)
team = (
db.session.query(Team)
.filter(
Team.id == code_application.team_id,
)
.first()
)
if not team:
return send_pull_request_failed_tip(
"找不到对应的项目", app_id, message_id, content, data, *args, **kwargs
)
code_user_id = (
db.session.query(CodeUser.user_id)
.join(
TeamMember,
TeamMember.code_user_id == CodeUser.id,
)
.join(
IMUser,
IMUser.id == TeamMember.im_user_id,
)
.filter(
IMUser.openid == openid,
TeamMember.team_id == team.id,
)
.limit(1)
.scalar()
)
github_app = GitHubAppRepo(code_application.installation_id, user_id=code_user_id)
return github_app, team, repo, pr, root_id, openid
@celery.task()
|
@celery.task()
def send_pull_request_failed_tip(
content, app_id, message_id, *args, bot=None, **kwargs
):
"""send new card message to user.
Args:
app_id: IMApplication.app_id.
message_id: lark message id.
content: error message
"""
if not bot:
bot, _ = get_bot_by_application_id(app_id)
message = PrTipFailed(content=content)
return bot.reply(message_id, message).json()
@celery.task()
def send_pull_request_success_tip(
content, app_id, message_id, *args, bot=None, **kwargs
):
"""send new repo card message to user.
Args:
app_id: IMApplication.app_id.
message_id: lark message id.
content: success message
"""
if not bot:
bot, _ = get_bot_by_application_id(app_id)
message = PrTipSuccess(content=content)
return bot.reply(message_id, message).json()
def gen_pr_card_by_pr(pr: PullRequest, repo_url, team, maunal=False):
assignees = pr.extra.get("assignees", [])
reviewers = pr.extra.get("requested_reviewers", [])
if len(assignees):
assignees = [
openid
for openid, in db.session.query(IMUser.openid)
.join(TeamMember, TeamMember.im_user_id == IMUser.id)
.join(
CodeUser,
CodeUser.id == TeamMember.code_user_id,
)
.filter(
TeamMember.team_id == team.id,
CodeUser.name.in_([assignee["login"] for assignee in assignees]),
)
.all()
]
if len(reviewers):
reviewers = [
openid
for openid, in db.session.query(IMUser.openid)
.join(TeamMember, TeamMember.im_user_id == IMUser.id)
.join(
CodeUser,
CodeUser.id == TeamMember.code_user_id,
)
.filter(
TeamMember.team_id == team.id,
CodeUser.name.in_([reviewer["login"] for reviewer in reviewers]),
)
.all()
]
labels = [i["name"] for i in pr.extra.get("labels", [])]
status = pr.extra.get("state", "open")
merged = pr.extra.get("merged")
if status == "open":
status = "待完成"
elif status == "closed":
status = "已关闭"
if maunal:
return PrManual(
repo_url=repo_url,
pr_id=pr.pull_request_number,
persons=[], # 就没用上
assignees=assignees,
tags=labels,
merged=merged,
)
return PullCard(
repo_url=repo_url,
id=pr.pull_request_number,
title=pr.title,
description=pr.description,
base=pr.extra.get("base", {}),
head=pr.extra.get("head", {}),
status=status,
merged=merged,
persons=[], # TODO:应该是所有有写权限的人
assignees=assignees,
reviewers=reviewers,
labels=labels,
updated=pr.modified.strftime("%Y-%m-%d %H:%M:%S"),
)
@celery.task()
def send_pull_request_manual(app_id, message_id, content, data, *args, **kwargs):
root_id = data["event"]["message"]["root_id"]
_, _, pr = get_git_object_by_message_id(root_id)
if not pr:
return send_pull_request_failed_tip(
"找不到 Pull Request", app_id, message_id, content, data, *args, **kwargs
)
repo = (
db.session.query(Repo)
.filter(
Repo.id == pr.repo_id,
Repo.status == 0,
)
.first()
)
if not repo:
return send_pull_request_failed_tip(
"找不到项目", app_id, message_id, content, data, *args, **kwargs
)
bot, application = get_bot_by_application_id(app_id)
if not application:
return send_pull_request_failed_tip(
"找不到对应的应用", app_id, message_id, content, data, *args, bot=bot, **kwargs
)
team = (
db.session.query(Team)
.filter(
Team.id == application.team_id,
)
.first()
)
if not team:
return send_pull_request_failed_tip(
"找不到对应的项目", app_id, message_id, content, data, *args, bot=bot, **kwargs
)
repo_url = f"https://github.com/{team.name}/{repo.name}"
message = gen_pr_card_by_pr(pr, repo_url, team, maunal=True)
# 回复到话题内部
return bot.reply(message_id, message).json()
def send_pull_request_url_message(
app_id, message_id, content, data, *args, typ="view", **kwargs
):
root_id = data["event"]["message"]["root_id"]
_, _, pr = get_git_object_by_message_id(root_id)
if not pr:
return send_pull_request_failed_tip(
"找不到 Pull Request", app_id, message_id, content, data, *args, **kwargs
)
repo = (
db.session.query(Repo)
.filter(
Repo.id == pr.repo_id,
Repo.status == 0,
)
.first()
)
if not repo:
return send_pull_request_failed_tip(
"找不到项目", app_id, message_id, content, data, *args, **kwargs
)
bot, application = get_bot_by_application_id(app_id)
if not application:
return send_pull_request_failed_tip(
"找不到对应的应用", app_id, message_id, content, data, *args, bot=bot, **kwargs
)
team = (
db.session.query(Team)
.filter(
Team.id == application.team_id,
)
.first()
)
if not team:
return send_pull_request_failed_tip(
"找不到对应的项目", app_id, message_id, content, data, *args, bot=bot, **kwargs
)
repo_url = f"https://github.com/{team.name}/{repo.name}"
if "view" == typ:
message = PullRequestView(
repo_url=repo_url,
pr_id=pr.pull_request_number,
)
elif "log" == typ:
message = PullRequestLog(
repo_url=repo_url,
pr_id=pr.pull_request_number,
)
elif "diff" == typ:
message = PullRequestDiff(
repo_url=repo_url,
pr_id=pr.pull_request_number,
)
else:
return send_pull_request_failed_tip(
"找不到对应的项目", app_id, message_id, content, data, *args, bot=bot, **kwargs
)
# 回复到话题内部
return bot.reply(message_id, message).json()
@celery.task()
def send_pull_request_view_message(app_id, message_id, content, data, *args, **kwargs):
return send_pull_request_url_message(
app_id, message_id, content, data, *args, typ="view", **kwargs
)
@celery.task()
def send_pull_request_log_message(app_id, message_id, content, data, *args, **kwargs):
return send_pull_request_url_message(
app_id, message_id, content, data, *args, typ="log", **kwargs
)
@celery.task()
def send_pull_request_diff_message(app_id, message_id, content, data, *args, **kwargs):
return send_pull_request_url_message(
app_id, message_id, content, data, *args, typ="diff", **kwargs
)
@celery.task()
def send_pull_request_card(pull_request_id: str, assignees: list[str] = []):
"""send new PullRequest card message to user.
Args:
pull_request_id: PullRequest.id.
"""
pr = db.session.query(PullRequest).filter(PullRequest.id == pull_request_id).first()
if pr:
chat_group = (
db.session.query(ChatGroup)
.filter(
ChatGroup.repo_id == pr.repo_id,
)
.first()
)
repo = db.session.query(Repo).filter(Repo.id == pr.repo_id).first()
if chat_group and repo:
bot, application = get_bot_by_application_id(chat_group.im_application_id)
team = db.session.query(Team).filter(Team.id == application.team_id).first()
if application and team:
repo_url = f"https://github.com/{team.name}/{repo.name}"
message = gen_pr_card_by_pr(pr, repo_url, team)
result = bot.send(
chat_group.chat_id, message, receive_id_type="chat_id"
).json()
message_id = result.get("data", {}).get("message_id")
if message_id:
# save message_id
pr.message_id = message_id
db.session.commit()
users = (
"".join(
[f'<at user_id="{open_id}"></at>' for open_id in assignees]
)
if len(assignees)
else ""
)
first_message_result = bot.reply(
message_id,
# TODO 第一条话题消息,直接放repo_url
FeishuTextMessage(
f"{users}{repo_url}/pull/{pr.pull_request_number}"
),
reply_in_thread=True,
).json()
logging.info("debug first_message_result %r", first_message_result)
return result
return False
@celery.task()
def send_pull_request_comment(pull_request_id, comment, user_name: str):
"""send new pull_request comment message to user.
Args:
pull_request_id: PullRequest.id.
comment: str
"""
pr = db.session.query(PullRequest).filter(PullRequest.id == pull_request_id).first()
if pr:
chat_group = (
db.session.query(ChatGroup)
.filter(
ChatGroup.repo_id == pr.repo_id,
)
.first()
)
if chat_group and pr.message_id:
bot, _ = get_bot_by_application_id(chat_group.im_application_id)
result = bot.reply(
pr.message_id,
FeishuTextMessage(f"@{user_name}: {comment}"),
).json()
return result
return False
@celery.task()
def update_pull_request_card(pr_id: str) -> bool | dict:
"""Update PullRequest card message.
Args:
pr_id (str): PullRequest.id.
Returns:
bool | dict: True or False or FeishuMessage
"""
pr = db.session.query(PullRequest).filter(PullRequest.id == pr_id).first()
if pr:
chat_group = (
db.session.query(ChatGroup)
.filter(
ChatGroup.repo_id == pr.repo_id,
)
.first()
)
repo = db.session.query(Repo).filter(Repo.id == pr.repo_id).first()
if chat_group and repo:
bot, application = get_bot_by_application_id(chat_group.im_application_id)
team = db.session.query(Team).filter(Team.id == application.team_id).first()
if application and team:
repo_url = f"https://github.com/{team.name}/{repo.name}"
message = gen_pr_card_by_pr(pr, repo_url, team)
result = bot.update(pr.message_id, message).json()
return result
return False
def _get_github_app(app_id, message_id, content, data, *args, **kwargs):
root_id = data["event"]["message"].get(
"root_id", data["event"]["message"]["message_id"]
)
openid = data["event"]["sender"]["sender_id"]["open_id"]
_, _, pr = get_git_object_by_message_id(root_id)
if not pr:
return send_pull_request_failed_tip(
"找不到 Pull Request", app_id, message_id, content, data, *args, **kwargs
)
repo = (
db.session.query(Repo)
.filter(
Repo.id == pr.repo_id,
Repo.status == 0,
)
.first()
)
if not repo:
return send_pull_request_failed_tip(
"找不到项目", app_id, message_id, content, data, *args, **kwargs
)
code_application = (
db.session.query(CodeApplication)
.filter(
CodeApplication.id == repo.application_id,
)
.first()
)
if not code_application:
return send_pull_request_failed_tip(
"找不到对应的项目", app_id, message_id, content, data, *args, **kwargs
)
team = (
db.session.query(Team)
.filter(
Team.id == code_application.team_id,
)
.first()
)
if not team:
return send_pull_request_failed_tip(
"找不到对应的项目", app_id, message_id, content, data, *args, **kwargs
)
code_user_id = (
db.session.query(CodeUser.user_id)
.join(
TeamMember,
TeamMember.code_user_id == CodeUser.id,
)
.join(
IMUser,
IMUser.id == TeamMember.im_user_id,
)
.filter(
IMUser.openid == openid,
TeamMember.team_id == team.id,
)
.limit(1)
.scalar()
)
github_app = GitHubAppRepo(code_application.installation_id, user_id=code_user_id)
return github_app, team, repo, pr, root_id, openid
@celery.task() | @with_authenticated_github() | 2 | 2023-12-22 02:43:21+00:00 | 4k |
camenduru/AnyDoor-online-hf | dinov2/dinov2/layers/block.py | [
{
"identifier": "Attention",
"path": "dinov2/dinov2/layers/attention.py",
"snippet": "class Attention(nn.Module):\n def __init__(\n self,\n dim: int,\n num_heads: int = 8,\n qkv_bias: bool = False,\n proj_bias: bool = True,\n attn_drop: float = 0.0,\n ... | import logging
import torch
from typing import Callable, List, Any, Tuple, Dict
from torch import nn, Tensor
from .attention import Attention, MemEffAttention
from .drop_path import DropPath
from .layer_scale import LayerScale
from .mlp import Mlp
from xformers.ops import fmha
from xformers.ops import scaled_index_add, index_select_cat | 3,003 | if self.training and self.sample_drop_ratio > 0.1:
# the overhead is compensated only for a drop path rate larger than 0.1
x = drop_add_residual_stochastic_depth(
x,
residual_func=attn_residual_func,
sample_drop_ratio=self.sample_drop_ratio,
)
x = drop_add_residual_stochastic_depth(
x,
residual_func=ffn_residual_func,
sample_drop_ratio=self.sample_drop_ratio,
)
elif self.training and self.sample_drop_ratio > 0.0:
x = x + self.drop_path1(attn_residual_func(x))
x = x + self.drop_path1(ffn_residual_func(x)) # FIXME: drop_path2
else:
x = x + attn_residual_func(x)
x = x + ffn_residual_func(x)
return x
def drop_add_residual_stochastic_depth(
x: Tensor,
residual_func: Callable[[Tensor], Tensor],
sample_drop_ratio: float = 0.0,
) -> Tensor:
# 1) extract subset using permutation
b, n, d = x.shape
sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)
brange = (torch.randperm(b, device=x.device))[:sample_subset_size]
x_subset = x[brange]
# 2) apply residual_func to get residual
residual = residual_func(x_subset)
x_flat = x.flatten(1)
residual = residual.flatten(1)
residual_scale_factor = b / sample_subset_size
# 3) add the residual
x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)
return x_plus_residual.view_as(x)
def get_branges_scales(x, sample_drop_ratio=0.0):
b, n, d = x.shape
sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)
brange = (torch.randperm(b, device=x.device))[:sample_subset_size]
residual_scale_factor = b / sample_subset_size
return brange, residual_scale_factor
def add_residual(x, brange, residual, residual_scale_factor, scaling_vector=None):
if scaling_vector is None:
x_flat = x.flatten(1)
residual = residual.flatten(1)
x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)
else:
x_plus_residual = scaled_index_add(
x, brange, residual.to(dtype=x.dtype), scaling=scaling_vector, alpha=residual_scale_factor
)
return x_plus_residual
attn_bias_cache: Dict[Tuple, Any] = {}
def get_attn_bias_and_cat(x_list, branges=None):
"""
this will perform the index select, cat the tensors, and provide the attn_bias from cache
"""
batch_sizes = [b.shape[0] for b in branges] if branges is not None else [x.shape[0] for x in x_list]
all_shapes = tuple((b, x.shape[1]) for b, x in zip(batch_sizes, x_list))
if all_shapes not in attn_bias_cache.keys():
seqlens = []
for b, x in zip(batch_sizes, x_list):
for _ in range(b):
seqlens.append(x.shape[1])
attn_bias = fmha.BlockDiagonalMask.from_seqlens(seqlens)
attn_bias._batch_sizes = batch_sizes
attn_bias_cache[all_shapes] = attn_bias
if branges is not None:
cat_tensors = index_select_cat([x.flatten(1) for x in x_list], branges).view(1, -1, x_list[0].shape[-1])
else:
tensors_bs1 = tuple(x.reshape([1, -1, *x.shape[2:]]) for x in x_list)
cat_tensors = torch.cat(tensors_bs1, dim=1)
return attn_bias_cache[all_shapes], cat_tensors
def drop_add_residual_stochastic_depth_list(
x_list: List[Tensor],
residual_func: Callable[[Tensor, Any], Tensor],
sample_drop_ratio: float = 0.0,
scaling_vector=None,
) -> Tensor:
# 1) generate random set of indices for dropping samples in the batch
branges_scales = [get_branges_scales(x, sample_drop_ratio=sample_drop_ratio) for x in x_list]
branges = [s[0] for s in branges_scales]
residual_scale_factors = [s[1] for s in branges_scales]
# 2) get attention bias and index+concat the tensors
attn_bias, x_cat = get_attn_bias_and_cat(x_list, branges)
# 3) apply residual_func to get residual, and split the result
residual_list = attn_bias.split(residual_func(x_cat, attn_bias=attn_bias)) # type: ignore
outputs = []
for x, brange, residual, residual_scale_factor in zip(x_list, branges, residual_list, residual_scale_factors):
outputs.append(add_residual(x, brange, residual, residual_scale_factor, scaling_vector).view_as(x))
return outputs
class NestedTensorBlock(Block):
def forward_nested(self, x_list: List[Tensor]) -> List[Tensor]:
"""
x_list contains a list of tensors to nest together and run
"""
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# References:
# https://github.com/facebookresearch/dino/blob/master/vision_transformer.py
# https://github.com/rwightman/pytorch-image-models/tree/master/timm/layers/patch_embed.py
logger = logging.getLogger("dinov2")
try:
XFORMERS_AVAILABLE = True
except ImportError:
logger.warning("xFormers not available")
XFORMERS_AVAILABLE = False
class Block(nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
mlp_ratio: float = 4.0,
qkv_bias: bool = False,
proj_bias: bool = True,
ffn_bias: bool = True,
drop: float = 0.0,
attn_drop: float = 0.0,
init_values=None,
drop_path: float = 0.0,
act_layer: Callable[..., nn.Module] = nn.GELU,
norm_layer: Callable[..., nn.Module] = nn.LayerNorm,
attn_class: Callable[..., nn.Module] = Attention,
ffn_layer: Callable[..., nn.Module] = Mlp,
) -> None:
super().__init__()
# print(f"biases: qkv: {qkv_bias}, proj: {proj_bias}, ffn: {ffn_bias}")
self.norm1 = norm_layer(dim)
self.attn = attn_class(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
proj_bias=proj_bias,
attn_drop=attn_drop,
proj_drop=drop,
)
self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = ffn_layer(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
bias=ffn_bias,
)
self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.sample_drop_ratio = drop_path
def forward(self, x: Tensor) -> Tensor:
def attn_residual_func(x: Tensor) -> Tensor:
return self.ls1(self.attn(self.norm1(x)))
def ffn_residual_func(x: Tensor) -> Tensor:
return self.ls2(self.mlp(self.norm2(x)))
if self.training and self.sample_drop_ratio > 0.1:
# the overhead is compensated only for a drop path rate larger than 0.1
x = drop_add_residual_stochastic_depth(
x,
residual_func=attn_residual_func,
sample_drop_ratio=self.sample_drop_ratio,
)
x = drop_add_residual_stochastic_depth(
x,
residual_func=ffn_residual_func,
sample_drop_ratio=self.sample_drop_ratio,
)
elif self.training and self.sample_drop_ratio > 0.0:
x = x + self.drop_path1(attn_residual_func(x))
x = x + self.drop_path1(ffn_residual_func(x)) # FIXME: drop_path2
else:
x = x + attn_residual_func(x)
x = x + ffn_residual_func(x)
return x
def drop_add_residual_stochastic_depth(
x: Tensor,
residual_func: Callable[[Tensor], Tensor],
sample_drop_ratio: float = 0.0,
) -> Tensor:
# 1) extract subset using permutation
b, n, d = x.shape
sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)
brange = (torch.randperm(b, device=x.device))[:sample_subset_size]
x_subset = x[brange]
# 2) apply residual_func to get residual
residual = residual_func(x_subset)
x_flat = x.flatten(1)
residual = residual.flatten(1)
residual_scale_factor = b / sample_subset_size
# 3) add the residual
x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)
return x_plus_residual.view_as(x)
def get_branges_scales(x, sample_drop_ratio=0.0):
b, n, d = x.shape
sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)
brange = (torch.randperm(b, device=x.device))[:sample_subset_size]
residual_scale_factor = b / sample_subset_size
return brange, residual_scale_factor
def add_residual(x, brange, residual, residual_scale_factor, scaling_vector=None):
if scaling_vector is None:
x_flat = x.flatten(1)
residual = residual.flatten(1)
x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)
else:
x_plus_residual = scaled_index_add(
x, brange, residual.to(dtype=x.dtype), scaling=scaling_vector, alpha=residual_scale_factor
)
return x_plus_residual
attn_bias_cache: Dict[Tuple, Any] = {}
def get_attn_bias_and_cat(x_list, branges=None):
"""
this will perform the index select, cat the tensors, and provide the attn_bias from cache
"""
batch_sizes = [b.shape[0] for b in branges] if branges is not None else [x.shape[0] for x in x_list]
all_shapes = tuple((b, x.shape[1]) for b, x in zip(batch_sizes, x_list))
if all_shapes not in attn_bias_cache.keys():
seqlens = []
for b, x in zip(batch_sizes, x_list):
for _ in range(b):
seqlens.append(x.shape[1])
attn_bias = fmha.BlockDiagonalMask.from_seqlens(seqlens)
attn_bias._batch_sizes = batch_sizes
attn_bias_cache[all_shapes] = attn_bias
if branges is not None:
cat_tensors = index_select_cat([x.flatten(1) for x in x_list], branges).view(1, -1, x_list[0].shape[-1])
else:
tensors_bs1 = tuple(x.reshape([1, -1, *x.shape[2:]]) for x in x_list)
cat_tensors = torch.cat(tensors_bs1, dim=1)
return attn_bias_cache[all_shapes], cat_tensors
def drop_add_residual_stochastic_depth_list(
x_list: List[Tensor],
residual_func: Callable[[Tensor, Any], Tensor],
sample_drop_ratio: float = 0.0,
scaling_vector=None,
) -> Tensor:
# 1) generate random set of indices for dropping samples in the batch
branges_scales = [get_branges_scales(x, sample_drop_ratio=sample_drop_ratio) for x in x_list]
branges = [s[0] for s in branges_scales]
residual_scale_factors = [s[1] for s in branges_scales]
# 2) get attention bias and index+concat the tensors
attn_bias, x_cat = get_attn_bias_and_cat(x_list, branges)
# 3) apply residual_func to get residual, and split the result
residual_list = attn_bias.split(residual_func(x_cat, attn_bias=attn_bias)) # type: ignore
outputs = []
for x, brange, residual, residual_scale_factor in zip(x_list, branges, residual_list, residual_scale_factors):
outputs.append(add_residual(x, brange, residual, residual_scale_factor, scaling_vector).view_as(x))
return outputs
class NestedTensorBlock(Block):
def forward_nested(self, x_list: List[Tensor]) -> List[Tensor]:
"""
x_list contains a list of tensors to nest together and run
""" | assert isinstance(self.attn, MemEffAttention) | 1 | 2023-12-25 04:48:34+00:00 | 4k |
yixinNB/pyscrcpy | pyscrcpy/core.py | [
{
"identifier": "EVENT_DISCONNECT",
"path": "pyscrcpy/const.py",
"snippet": "EVENT_DISCONNECT = \"disconnect\""
},
{
"identifier": "EVENT_FRAME",
"path": "pyscrcpy/const.py",
"snippet": "EVENT_FRAME = \"frame\""
},
{
"identifier": "EVENT_INIT",
"path": "pyscrcpy/const.py",
... | import os
import abc
import socket
import struct
import threading
import time
import numpy as np
import numpy.typing as npt
import cv2 as cv
import cv2
from pathlib import Path
from time import sleep
from typing import Any, Callable, Optional, Tuple, Union
from adbutils import AdbConnection, AdbDevice, AdbError, Network, adb
from av.codec import CodecContext # type: ignore
from av.error import InvalidDataError # type: ignore
from loguru import logger
from .const import EVENT_DISCONNECT, EVENT_FRAME, EVENT_INIT, LOCK_SCREEN_ORIENTATION_UNLOCKED, EVENT_ONCHANGE
from .control import ControlSender | 3,389 | connection is alive.
lock_screen_orientation: lock screen in a particular orientation.
The available screen orientation are specify in const.py
in variables LOCK_SCREEN_ORIENTATION*
"""
# Params挪到后面去
self.max_size = max_size
self.bitrate = bitrate
self.max_fps = max_fps
self.block_frame = block_frame
self.stay_awake = stay_awake
self.lock_screen_orientation = lock_screen_orientation
self.skip_same_frame = skip_same_frame
self.min_frame_interval = 1 / max_fps
if device is None:
try:
device = adb.device_list()[0]
except IndexError:
raise Exception("Cannot connect to phone")
elif isinstance(device, str):
device = adb.device(serial=device)
self.device = device
self.listeners = dict(frame=[], init=[], disconnect=[], onchange=[])
# User accessible
self.last_frame: Optional[np.ndarray] = None
self.resolution: Optional[Tuple[int, int]] = None
self.device_name: Optional[str] = None
self.control = ControlSender(self)
# Need to destroy
self.alive = False
self.__server_stream: Optional[AdbConnection] = None
self.__video_socket: Optional[socket.socket] = None
self.control_socket: Optional[socket.socket] = None
self.control_socket_lock = threading.Lock()
def __init_server_connection(self) -> None:
"""
Connect to android server, there will be two sockets: video and control socket.
This method will also set resolution property.
"""
for _ in range(30): # 超时 写死
try:
self.__video_socket = self.device.create_connection(
Network.LOCAL_ABSTRACT, "scrcpy"
)
break
except AdbError:
sleep(0.1)
pass
else:
raise ConnectionError("Failed to connect scrcpy-server after 3 seconds")
dummy_byte = self.__video_socket.recv(1)
if not len(dummy_byte):
raise ConnectionError("Did not receive Dummy Byte!")
self.control_socket = self.device.create_connection(
Network.LOCAL_ABSTRACT, "scrcpy"
)
self.device_name = self.__video_socket.recv(64).decode("utf-8").rstrip("\x00")
if not len(self.device_name):
raise ConnectionError("Did not receive Device Name!")
res = self.__video_socket.recv(4)
self.resolution = struct.unpack(">HH", res)
self.__video_socket.setblocking(False)
def __deploy_server(self) -> None:
"""
Deploy server to android device.
Push the scrcpy-server.jar into the Android device using
the adb.push(...). Then a basic connection between client and server
is established.
"""
cmd = [
"CLASSPATH=/data/local/tmp/scrcpy-server.jar",
"app_process",
"/",
"com.genymobile.scrcpy.Server",
VERSION, # Scrcpy server version
"info", # Log level: info, verbose...
f"{self.max_size}", # Max screen width (long side)
f"{self.bitrate}", # Bitrate of video
f"{self.max_fps}", # Max frame per second
f"{self.lock_screen_orientation}", # Lock screen orientation
"true", # Tunnel forward
"-", # Crop screen
"false", # Send frame rate to client
"true", # Control enabled
"0", # Display id
"false", # Show touches
"true" if self.stay_awake else "false", # Stay awake
"-", # Codec (video encoding) options
"-", # Encoder name
"false", # Power off screen after server closed
]
self.device.push(JAR, "/data/local/tmp/")
self.__server_stream: AdbConnection = self.device.shell(cmd, stream=True)
def start(self, threaded: bool = False) -> None:
"""
Start the client-server connection.
In order to avoid unpredictable behaviors, this method must be called
after the on_init and on_frame callback are specify.
Args:
threaded : If set to True the stream loop willl run in a separated
thread. This mean that the code after client.strart() will be
run. Otherwise the client.start() method starts a endless loop
and the code after this method will never run. todo new_thread
"""
assert self.alive is False
self.__deploy_server()
self.__init_server_connection()
self.alive = True
|
Frame = npt.NDArray[np.int8]
VERSION = "1.20"
HERE = Path(__file__).resolve().parent
JAR = HERE / f"scrcpy-server.jar"
class Client:
def __init__(
self,
device: Optional[Union[AdbDevice, str]] = None,
max_size: int = 0,
bitrate: int = 8000000,
max_fps: int = 0,
block_frame: bool = True,
stay_awake: bool = True,
lock_screen_orientation: int = LOCK_SCREEN_ORIENTATION_UNLOCKED,
skip_same_frame=False
):
"""
[ok]Create a scrcpy client. The client won't be started until you call .start()
Args:
device: Android device to coennect to. Colud be also specify by
serial string. If device is None the client try to connect
to the first available device in adb deamon.
max_size: Specify the maximum dimension of the video stream. This
dimensioin refer both to width and hight.0: no limit[已校验, max size of width or height]
bitrate: bitrate
max_fps: Maximum FPS (Frame Per Second) of the video stream. If it
is set to 0 it means that there is not limit to FPS.
This feature is supported by android 10 or newer.
[flip]: 没有这个参数, 会自动处理
block_frame: If set to true, the on_frame callbacks will be only
apply on not empty frames. Otherwise try to apply on_frame
callbacks on every frame, but this could raise exceptions in
callbacks if they are not able to handle None value for frame.
True:跳过空白帧
stay_awake: keep Android device awake while the client-server
connection is alive.
lock_screen_orientation: lock screen in a particular orientation.
The available screen orientation are specify in const.py
in variables LOCK_SCREEN_ORIENTATION*
"""
# Params挪到后面去
self.max_size = max_size
self.bitrate = bitrate
self.max_fps = max_fps
self.block_frame = block_frame
self.stay_awake = stay_awake
self.lock_screen_orientation = lock_screen_orientation
self.skip_same_frame = skip_same_frame
self.min_frame_interval = 1 / max_fps
if device is None:
try:
device = adb.device_list()[0]
except IndexError:
raise Exception("Cannot connect to phone")
elif isinstance(device, str):
device = adb.device(serial=device)
self.device = device
self.listeners = dict(frame=[], init=[], disconnect=[], onchange=[])
# User accessible
self.last_frame: Optional[np.ndarray] = None
self.resolution: Optional[Tuple[int, int]] = None
self.device_name: Optional[str] = None
self.control = ControlSender(self)
# Need to destroy
self.alive = False
self.__server_stream: Optional[AdbConnection] = None
self.__video_socket: Optional[socket.socket] = None
self.control_socket: Optional[socket.socket] = None
self.control_socket_lock = threading.Lock()
def __init_server_connection(self) -> None:
"""
Connect to android server, there will be two sockets: video and control socket.
This method will also set resolution property.
"""
for _ in range(30): # 超时 写死
try:
self.__video_socket = self.device.create_connection(
Network.LOCAL_ABSTRACT, "scrcpy"
)
break
except AdbError:
sleep(0.1)
pass
else:
raise ConnectionError("Failed to connect scrcpy-server after 3 seconds")
dummy_byte = self.__video_socket.recv(1)
if not len(dummy_byte):
raise ConnectionError("Did not receive Dummy Byte!")
self.control_socket = self.device.create_connection(
Network.LOCAL_ABSTRACT, "scrcpy"
)
self.device_name = self.__video_socket.recv(64).decode("utf-8").rstrip("\x00")
if not len(self.device_name):
raise ConnectionError("Did not receive Device Name!")
res = self.__video_socket.recv(4)
self.resolution = struct.unpack(">HH", res)
self.__video_socket.setblocking(False)
def __deploy_server(self) -> None:
"""
Deploy server to android device.
Push the scrcpy-server.jar into the Android device using
the adb.push(...). Then a basic connection between client and server
is established.
"""
cmd = [
"CLASSPATH=/data/local/tmp/scrcpy-server.jar",
"app_process",
"/",
"com.genymobile.scrcpy.Server",
VERSION, # Scrcpy server version
"info", # Log level: info, verbose...
f"{self.max_size}", # Max screen width (long side)
f"{self.bitrate}", # Bitrate of video
f"{self.max_fps}", # Max frame per second
f"{self.lock_screen_orientation}", # Lock screen orientation
"true", # Tunnel forward
"-", # Crop screen
"false", # Send frame rate to client
"true", # Control enabled
"0", # Display id
"false", # Show touches
"true" if self.stay_awake else "false", # Stay awake
"-", # Codec (video encoding) options
"-", # Encoder name
"false", # Power off screen after server closed
]
self.device.push(JAR, "/data/local/tmp/")
self.__server_stream: AdbConnection = self.device.shell(cmd, stream=True)
def start(self, threaded: bool = False) -> None:
"""
Start the client-server connection.
In order to avoid unpredictable behaviors, this method must be called
after the on_init and on_frame callback are specify.
Args:
threaded : If set to True the stream loop willl run in a separated
thread. This mean that the code after client.strart() will be
run. Otherwise the client.start() method starts a endless loop
and the code after this method will never run. todo new_thread
"""
assert self.alive is False
self.__deploy_server()
self.__init_server_connection()
self.alive = True | for func in self.listeners[EVENT_INIT]: | 2 | 2023-12-23 12:52:58+00:00 | 4k |
andreafailla/pix2beats | ui.py | [
{
"identifier": "resize_and_convert",
"path": "backend.py",
"snippet": "def resize_and_convert(filename, tmpdir, n_pixels=None):\n \"\"\"\n Resize the image, convert to hsv, and save as png\n\n :param filename:\n :param tmpdir:\n :param n_pixels:\n :return:\n \"\"\"\n # Saves\n ... | import json # io
import tempfile
import streamlit as st # UI
from PIL import Image # image processing
from backend import resize_and_convert, trackmaker # processing
from backend import rolling_title # animation
from constants import SCALES, NOTES, HARMONIES, SAMPLE_IMAGES # constants
from my_presets import PRESETS | 3,525 |
def update_session_state(preset):
for k, v in preset.items():
if k != "octave":
st.session_state[k] = v
else:
octave_options = ["Low", "Mid", "High"]
st.session_state[k] = octave_options[v - 1]
def write_intro():
"""Defines general settings and introduces the app.
:return: placeholder for the rolling title
"""
st.set_page_config(
page_title="Pix2Beats",
page_icon=":musical_note:",
layout="centered",
initial_sidebar_state="expanded",
)
st.markdown(
"""
<style>
.stApp {
background: url("https://images.unsplash.com/photo-1557695126-fa2ce36f6828?q=80&w=2670&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D");
background-size: cover;
background-opacity: 0;
}
</style>""",
unsafe_allow_html=True,
)
st.title(":blue[Pix]2:red[Beats]")
plh = st.empty()
# Display the description
st.markdown(
"""
Welcome to :blue[Pix]2:red[Beats]—a web application at the intersection of visual art and musical expression.
Harnessing the power of Artificial Intelligence, :blue[Pix]2:red[Beats] transforms your images into sounds,
unlocking a fascinating synergy between the realms of visual and auditory creativity.
At the heart of :blue[Pix]2:red[Beats] lies the intuition that both images and sound can be effortlessly
represented as matrices of numbers.
This unique foundation allows us to create a one-of-a-kind mapping between color spaces and musical scales.
Choose an image, tinker with the parameters, and let :blue[Pix]2:red[Beats] do the rest :musical_note:
"""
)
return plh
def handle_presets():
presetsel, presetupl, _ = st.columns([1, 1, 2])
with presetsel:
preset_name = st.selectbox(
"***Choose a preset***",
PRESETS.keys(),
key="preset_select",
help="Tip: you can modify an existing preset by selecting it and then selecting "
"*None* from this list.",
)
if preset_name is not None:
if preset_name != "None":
update_session_state(PRESETS[preset_name])
with presetupl:
uploaded_preset = st.file_uploader(
"***...or upload your own!***", type=["json"]
)
css = """
<style>
[data-testid='stFileUploader'] {
width: max-content;
}
[data-testid='stFileUploader'] section {
padding: 0;
float: left;
}
[data-testid='stFileUploader'] section > input + div {
display: none;
}
[data-testid='stFileUploader'] section + div {
float: right;
padding-top: 0;
}
</style>
"""
st.markdown(css, unsafe_allow_html=True)
if uploaded_preset is not None:
preset_name = uploaded_preset.name.split(".")[0]
preset = json.load(uploaded_preset)
PRESETS[preset_name] = preset
update_session_state(preset)
def make_sidebar_and_select_file():
"""
Create the sidebar for the app
The sidebar lets the user select an image to use
:return: the image filename
"""
filename = None
if (
st.sidebar.radio(
"Image to use",
("Use Example Image", "Upload Image"),
label_visibility="hidden",
)
== "Use Example Image"
):
|
def init_session_state():
for k, v in PRESETS["None"].items():
if k not in st.session_state:
if k != "octave":
st.session_state[k] = v
else:
octave_options = ["Low", "Mid", "High"]
st.session_state[k] = octave_options[v - 1]
def update_session_state(preset):
for k, v in preset.items():
if k != "octave":
st.session_state[k] = v
else:
octave_options = ["Low", "Mid", "High"]
st.session_state[k] = octave_options[v - 1]
def write_intro():
"""Defines general settings and introduces the app.
:return: placeholder for the rolling title
"""
st.set_page_config(
page_title="Pix2Beats",
page_icon=":musical_note:",
layout="centered",
initial_sidebar_state="expanded",
)
st.markdown(
"""
<style>
.stApp {
background: url("https://images.unsplash.com/photo-1557695126-fa2ce36f6828?q=80&w=2670&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D");
background-size: cover;
background-opacity: 0;
}
</style>""",
unsafe_allow_html=True,
)
st.title(":blue[Pix]2:red[Beats]")
plh = st.empty()
# Display the description
st.markdown(
"""
Welcome to :blue[Pix]2:red[Beats]—a web application at the intersection of visual art and musical expression.
Harnessing the power of Artificial Intelligence, :blue[Pix]2:red[Beats] transforms your images into sounds,
unlocking a fascinating synergy between the realms of visual and auditory creativity.
At the heart of :blue[Pix]2:red[Beats] lies the intuition that both images and sound can be effortlessly
represented as matrices of numbers.
This unique foundation allows us to create a one-of-a-kind mapping between color spaces and musical scales.
Choose an image, tinker with the parameters, and let :blue[Pix]2:red[Beats] do the rest :musical_note:
"""
)
return plh
def handle_presets():
presetsel, presetupl, _ = st.columns([1, 1, 2])
with presetsel:
preset_name = st.selectbox(
"***Choose a preset***",
PRESETS.keys(),
key="preset_select",
help="Tip: you can modify an existing preset by selecting it and then selecting "
"*None* from this list.",
)
if preset_name is not None:
if preset_name != "None":
update_session_state(PRESETS[preset_name])
with presetupl:
uploaded_preset = st.file_uploader(
"***...or upload your own!***", type=["json"]
)
css = """
<style>
[data-testid='stFileUploader'] {
width: max-content;
}
[data-testid='stFileUploader'] section {
padding: 0;
float: left;
}
[data-testid='stFileUploader'] section > input + div {
display: none;
}
[data-testid='stFileUploader'] section + div {
float: right;
padding-top: 0;
}
</style>
"""
st.markdown(css, unsafe_allow_html=True)
if uploaded_preset is not None:
preset_name = uploaded_preset.name.split(".")[0]
preset = json.load(uploaded_preset)
PRESETS[preset_name] = preset
update_session_state(preset)
def make_sidebar_and_select_file():
"""
Create the sidebar for the app
The sidebar lets the user select an image to use
:return: the image filename
"""
filename = None
if (
st.sidebar.radio(
"Image to use",
("Use Example Image", "Upload Image"),
label_visibility="hidden",
)
== "Use Example Image"
): | filename = st.sidebar.selectbox("Choose a sample image", SAMPLE_IMAGES) | 6 | 2023-12-30 13:12:10+00:00 | 4k |
AbstractUmbra/GreatAsset | great_asset/save_file.py | [
{
"identifier": "decrypt",
"path": "great_asset/crypt.py",
"snippet": "def decrypt(\n *, path: str | PathLike[str] | Path | None = None, data: bytes | None = None\n) -> Any: # it returns the type of file we decrypt but alas\n if not path and not data:\n raise ValueError(\"Either `path` or ... | import random
from pathlib import Path
from typing import TYPE_CHECKING, Any, Generic, Self, TypeVar
from .crypt import decrypt, encrypt
from .enums import BestiaryEntry, ExtraUnlock, Item, Moon, Scrap, ShipUnlock
from .item import GrabbableScrap
from .utils import MISSING, SaveValue, _to_json, resolve_save_path # type: ignore[reportPrivateUsage] we allow this here.
from .vector import Vector
from os import PathLike
from types import TracebackType
from .types_.config_file import ConfigFile as ConfigFileType
from .types_.save_file import (
SaveFile as SaveFileType,
)
from .types_.shared import * | 3,353 | """
The MIT License (MIT)
Copyright (c) 2023-present AbstractUmbra
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
if TYPE_CHECKING:
SaveT = TypeVar("SaveT", "SaveFileType", "ConfigFileType")
TEMP_FILE = Path("./_previously_decrypted_file.json")
TIPS = [
"LC_MoveObjectsTip",
"LC_StorageTip",
"LC_LightningTip",
"LCTip_SecureDoors",
"LC_EclipseTip",
"LCTip_SellScrap",
"LCTip_UseManual",
"LC_IntroTip1",
]
__all__ = (
"SaveFile",
"ConfigFile",
)
class _BaseSaveFile(Generic[SaveT]):
_inner_data: SaveT
_file_type: str
_extra_data: dict[str, Any]
_written: bool
_skip_parsing: bool
__slots__ = (
"_inner_data",
"_file_type",
"_extra_data",
"_written",
"_skip_parsing",
"path",
)
def __init__(self, path: str | PathLike[str] | Path, /) -> None:
self._skip_parsing = False
self._written = False
if not isinstance(path, Path):
path = Path(path)
if not path.exists():
raise ValueError("The path given does not exist")
self.path: Path = path
self._parse_file()
@classmethod
def from_data(cls, *, data: bytes, path: Path | None = None, save_number: SaveValue | None = None) -> Self:
_number = save_number or ""
path = path or Path(f"./LCSaveFile{_number}")
file = cls.__new__(cls)
file._skip_parsing = True
| """
The MIT License (MIT)
Copyright (c) 2023-present AbstractUmbra
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
if TYPE_CHECKING:
SaveT = TypeVar("SaveT", "SaveFileType", "ConfigFileType")
TEMP_FILE = Path("./_previously_decrypted_file.json")
TIPS = [
"LC_MoveObjectsTip",
"LC_StorageTip",
"LC_LightningTip",
"LCTip_SecureDoors",
"LC_EclipseTip",
"LCTip_SellScrap",
"LCTip_UseManual",
"LC_IntroTip1",
]
__all__ = (
"SaveFile",
"ConfigFile",
)
class _BaseSaveFile(Generic[SaveT]):
_inner_data: SaveT
_file_type: str
_extra_data: dict[str, Any]
_written: bool
_skip_parsing: bool
__slots__ = (
"_inner_data",
"_file_type",
"_extra_data",
"_written",
"_skip_parsing",
"path",
)
def __init__(self, path: str | PathLike[str] | Path, /) -> None:
self._skip_parsing = False
self._written = False
if not isinstance(path, Path):
path = Path(path)
if not path.exists():
raise ValueError("The path given does not exist")
self.path: Path = path
self._parse_file()
@classmethod
def from_data(cls, *, data: bytes, path: Path | None = None, save_number: SaveValue | None = None) -> Self:
_number = save_number or ""
path = path or Path(f"./LCSaveFile{_number}")
file = cls.__new__(cls)
file._skip_parsing = True
| decrypted: SaveT = decrypt(data=data) | 0 | 2023-12-25 11:03:20+00:00 | 4k |
Amirtheahmed/ddd-cqrs-fastapi | src/apps/photostore/dependencies/PhotoStoreContainer.py | [
{
"identifier": "StatusGetController",
"path": "src/apps/backoffice/controllers/StatusGetController.py",
"snippet": "class StatusGetController(BackofficeController):\n\n def __init__(self):\n pass\n\n async def run(self, req: Request) -> JSONResponse:\n return JSONResponse(status_cod... | from dependency_injector import containers, providers
from src.apps.backoffice.controllers.StatusGetController import StatusGetController
from src.apps.photostore.controllers.PhotoPostController import PhotoPostController
from src.contexts.photostore.photo.application.createone.CreatePhotoCommandHandler import CreatePhotoCommandHandler
from src.contexts.photostore.photo.application.createone.PhotoCreator import PhotoCreator
from src.contexts.photostore.photo.infrastructure.persistence.MinioPhotoStorePhotoRepository import MinioPhotoRepository
from src.contexts.photostore.photo.infrastructure.persistence.config.MinioPhotoConfigFactory import \
MinioPhotoConfigFactory
from src.contexts.photostore.photoregistry.application.CreatePhotoRegistryOnPhotoCreated import \
CreatePhotoRegistryOnPhotoCreated
from src.contexts.photostore.photoregistry.application.PhotoRegistryCreator import PhotoRegistryCreator
from src.contexts.photostore.photoregistry.infrastructure.persistence.PyMongoPhotoRegistryRepository import \
PyMongoPhotoRegistryRepository
from src.contexts.photostore.photoregistry.infrastructure.persistence.config.PyMongoPhotoRegistryConfigFactory import \
PyMongoPhotoRegistryConfigFactory
from src.contexts.shared.Infrastructure.commandbus.InMemoryCommandBus import InMemoryCommandBus
from src.contexts.shared.Infrastructure.eventbus.InMemoryEventBus import InMemoryEventBus
from src.contexts.shared.Infrastructure.persistence.minio.MinioClientFactory import MinioClientFactory
from src.contexts.shared.Infrastructure.persistence.mongo.PyMongoClientFactory import PyMongoClientFactory | 3,241 |
class PhotoStoreContainer(containers.DeclarativeContainer):
event_bus = providers.Singleton(
InMemoryEventBus,
)
photo_minio_config = providers.Singleton(MinioPhotoConfigFactory.create)
photo_minio_client = providers.Singleton(MinioClientFactory.create_instance, 'photo', photo_minio_config)
photo_registry_mongo_config = providers.Singleton(PyMongoPhotoRegistryConfigFactory.create)
photo_registry_mongo_client = providers.Singleton(PyMongoClientFactory.create_instance, 'photo-registry',
photo_registry_mongo_config)
photo_repository = providers.Singleton(MinioPhotoRepository, photo_minio_client)
photo_registry_repository = providers.Singleton(PyMongoPhotoRegistryRepository, photo_registry_mongo_client)
photo_creator = providers.Singleton(PhotoCreator, photo_repository, event_bus)
photo_registry_creator = providers.Singleton(PhotoRegistryCreator, photo_registry_repository, event_bus)
create_photo_command_handler = providers.Singleton(
CreatePhotoCommandHandler,
photo_creator,
)
create_photo_registry_on_photo_created = providers.Singleton(
CreatePhotoRegistryOnPhotoCreated,
photo_registry_creator,
event_bus,
)
command_bus = providers.Singleton(
InMemoryCommandBus,
create_photo_command_handler,
)
|
class PhotoStoreContainer(containers.DeclarativeContainer):
event_bus = providers.Singleton(
InMemoryEventBus,
)
photo_minio_config = providers.Singleton(MinioPhotoConfigFactory.create)
photo_minio_client = providers.Singleton(MinioClientFactory.create_instance, 'photo', photo_minio_config)
photo_registry_mongo_config = providers.Singleton(PyMongoPhotoRegistryConfigFactory.create)
photo_registry_mongo_client = providers.Singleton(PyMongoClientFactory.create_instance, 'photo-registry',
photo_registry_mongo_config)
photo_repository = providers.Singleton(MinioPhotoRepository, photo_minio_client)
photo_registry_repository = providers.Singleton(PyMongoPhotoRegistryRepository, photo_registry_mongo_client)
photo_creator = providers.Singleton(PhotoCreator, photo_repository, event_bus)
photo_registry_creator = providers.Singleton(PhotoRegistryCreator, photo_registry_repository, event_bus)
create_photo_command_handler = providers.Singleton(
CreatePhotoCommandHandler,
photo_creator,
)
create_photo_registry_on_photo_created = providers.Singleton(
CreatePhotoRegistryOnPhotoCreated,
photo_registry_creator,
event_bus,
)
command_bus = providers.Singleton(
InMemoryCommandBus,
create_photo_command_handler,
)
| status_get_controller = providers.Singleton(StatusGetController) | 0 | 2023-12-27 13:58:25+00:00 | 4k |
smonsays/modular-hyperteacher | metax/data/dataset/teacher.py | [
{
"identifier": "MultitaskDataset",
"path": "metax/data/base.py",
"snippet": "class MultitaskDataset(NamedTuple):\n x: Array\n y: Array\n task_id: Array\n info: Dict = dict()"
},
{
"identifier": "DatasetGenerator",
"path": "metax/data/dataset/base.py",
"snippet": "class Datas... | import itertools
import haiku as hk
import jax
import jax.numpy as jnp
import jax.tree_util as jtu
import numpy as np
from functools import partial
from metax.data.base import MultitaskDataset
from metax.data.dataset.base import DatasetGenerator
from metax.models.mlp import MultilayerPerceptron
from metax.utils import PytreeReshaper | 1,893 | """
Copyright (c) Simon Schug
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
| """
Copyright (c) Simon Schug
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
| class HyperTeacher(DatasetGenerator): | 1 | 2023-12-22 16:35:49+00:00 | 4k |
kyegomez/qformer | qformer/model.py | [
{
"identifier": "ImgBlock",
"path": "qformer/blocks.py",
"snippet": "class ImgBlock(nn.Module):\n \"\"\"\n ImgBlock is a module that performs multi-query attention, cross-attention, and feedforward operations on input tensors.\n\n Args:\n dim (int): The dimension of the input tensors.\n ... | from torch import Tensor, nn
from qformer.blocks import ImgBlock, TextBlock
from qformer.masking import mask_top_right_quadrant | 1,891 |
class QFormer(nn.Module):
"""
QFormer is a transformer-based model for processing text and image inputs.
Args:
dim (int): The dimension of the model.
heads (int): The number of attention heads.
depth (int): The depth of the model.
dropout (float, optional): The dropout rate. Defaults to 0.1.
text_block_depth (int, optional): The depth of the text block. Defaults to None.
img_text_block_depth (int, optional): The depth of the image text block. Defaults to None.
Attributes:
dim (int): The dimension of the model.
heads (int): The number of attention heads.
depth (int): The depth of the model.
dropout (float): The dropout rate.
img_block (ImgBlock): The image block of the model.
text_block (TextBlock): The text block of the model.
img_layers (nn.ModuleList): The list of image layers.
text_layers (nn.ModuleList): The list of text layers.
Examples:
>>> model = QFormer(dim=512, heads=8, depth=6, dropout=0.1, text_block_depth=2, img_text_block_depth=2)
>>> x = torch.randn(1, 10, 512)
>>> img = torch.randn(1, 3, 224, 224)
>>> out = model(x, img)
>>> out.shape
torch.Size([1, 10, 512])
"""
def __init__(
self,
dim: int,
heads: int,
depth: int,
dropout: float = 0.1,
text_block_depth: int = None,
img_text_block_depth: int = None,
*args,
**kwargs,
):
super().__init__()
self.dim = dim
self.heads = heads
self.depth = depth
self.dropout = dropout
|
class QFormer(nn.Module):
"""
QFormer is a transformer-based model for processing text and image inputs.
Args:
dim (int): The dimension of the model.
heads (int): The number of attention heads.
depth (int): The depth of the model.
dropout (float, optional): The dropout rate. Defaults to 0.1.
text_block_depth (int, optional): The depth of the text block. Defaults to None.
img_text_block_depth (int, optional): The depth of the image text block. Defaults to None.
Attributes:
dim (int): The dimension of the model.
heads (int): The number of attention heads.
depth (int): The depth of the model.
dropout (float): The dropout rate.
img_block (ImgBlock): The image block of the model.
text_block (TextBlock): The text block of the model.
img_layers (nn.ModuleList): The list of image layers.
text_layers (nn.ModuleList): The list of text layers.
Examples:
>>> model = QFormer(dim=512, heads=8, depth=6, dropout=0.1, text_block_depth=2, img_text_block_depth=2)
>>> x = torch.randn(1, 10, 512)
>>> img = torch.randn(1, 3, 224, 224)
>>> out = model(x, img)
>>> out.shape
torch.Size([1, 10, 512])
"""
def __init__(
self,
dim: int,
heads: int,
depth: int,
dropout: float = 0.1,
text_block_depth: int = None,
img_text_block_depth: int = None,
*args,
**kwargs,
):
super().__init__()
self.dim = dim
self.heads = heads
self.depth = depth
self.dropout = dropout | self.img_block = ImgBlock(dim, depth, heads, dropout) | 0 | 2023-12-29 03:55:46+00:00 | 4k |
willfinnigan/RetroBioCat_2 | rbc2/reaction_network_entities/network.py | [
{
"identifier": "get_pa_route",
"path": "rbc2/pathway_tools/pa_route_conversion.py",
"snippet": "def get_pa_route(smi: str,\n starting_material_evaluator: StartingMaterialEvaluatorInterface,\n get_smi_produced_by: Callable[[str], List[Reaction]]) -> dict:\n \"\"\"\n ... | from collections import defaultdict
from typing import List, Set, Sequence
from typing import TYPE_CHECKING
from rbc2.pathway_tools.pa_route_conversion import get_pa_route
from rbc2.reaction_evaluation.starting_material_evaluator.starting_material_evaluator_interface import \
StartingMaterialEvaluatorInterface
from rbc2.reaction_network_entities.reaction import reactions_to_dicts, reaction_from_dict
from rbc2.reaction_network_entities.reaction import Reaction
from rbc2.reaction_network_entities.reaction_option import ReactionOption
from rbc2.expansion.default_expander_interface import Expander | 1,822 | from __future__ import annotations
if TYPE_CHECKING:
ReactionID = str
OptionID = str
ExpanderID = str
Smi = str
RxnType = str
class Network():
""" Network is used to keep a record of the outcome of all expansions."""
def __init__(self, reactions: Sequence[Reaction] = ()):
self.smi_produced_by: dict[Smi: Set[Reaction]] = defaultdict(set)
self.smi_substrate_of: dict[Smi: Set[Reaction]] = defaultdict(set)
self.reaction_options: dict[Smi: dict[ExpanderID: List[ReactionOption]]] = defaultdict(lambda: defaultdict(dict))
self.reactions: Set[Reaction] = set()
if len(reactions) != 0:
for rxn in reactions:
self.add_reaction(rxn)
def add_reaction(self, reaction: Reaction):
self.reactions.add(reaction)
self.smi_produced_by[reaction.product].add(reaction)
for smi in reaction.substrates:
self.smi_substrate_of[smi].add(reaction)
def remove_reaction(self, reaction: Reaction):
self.reactions.discard(reaction)
self.smi_produced_by[reaction.product].discard(reaction)
for smi in reaction.substrates:
self.smi_substrate_of[smi].discard(reaction)
def add_option(self, option: ReactionOption):
self.reaction_options[option.target_smi][option.rxn_type][option.unique_id] = option
def bulk_add_options(self, smi: Smi, rxn_type: RxnType, list_options: List[ReactionOption]):
self.reaction_options[smi][rxn_type] = {option.unique_id: option for option in list_options}
def remove_option(self, option: ReactionOption):
self.reaction_options[option.target_smi][option.rxn_type].pop(option.unique_id, None)
def get_reaction_options(self, smi: Smi, rxn_type: RxnType) -> list[ReactionOption]:
options_for_smi = self.reaction_options.get(smi, {})
options_for_rxn_type = options_for_smi.get(rxn_type, {})
return list(options_for_rxn_type.values())
def are_options_available(self, smi: Smi, rxn_type: RxnType) -> bool:
return self.reaction_options.get(smi, {}).get(rxn_type, False) is not False
def get_reactions_which_molecule_is_produced_by(self, smi: Smi) -> Set[Reaction]:
return self.smi_produced_by.get(smi, set())
def get_reactions_which_molecule_is_substrate_of(self, smi: Smi) -> Set[Reaction]:
return self.smi_substrate_of.get(smi, set())
def all_smis(self) -> Set[Smi]:
all_smis = set(self.smi_produced_by.keys())
all_smis.update(set(self.smi_substrate_of.keys()))
return all_smis
def all_reactions(self) -> List[Reaction]:
return list(self.reactions)
def all_reaction_options(self) -> List[ReactionOption]:
all_options = []
for smi, rxn_type_options in self.reaction_options.items():
for rxn_type, options_dict in rxn_type_options.items():
for option_id, option in options_dict.items():
all_options.append(option)
return all_options
def save(self):
"""Save the network to a dict"""
data = {"reactions": reactions_to_dicts(self.all_reactions()),
"reaction_options": [option_to_dict(opt) for opt in self.all_reaction_options()]}
return data
def load(self, data: dict, expanders: List[Expander]):
"""
Load the network from data dict
ReactionOptions will only be loaded if the relevant expander is provided
"""
# check each expander is associated with this network
for expander in expanders:
if expander.network != self:
raise Exception("Can not load reaction options when expander is not associated with the same network")
# load reactions
reaction_unique_id_dict = {}
for reaction_dict in data['reactions']:
| from __future__ import annotations
if TYPE_CHECKING:
ReactionID = str
OptionID = str
ExpanderID = str
Smi = str
RxnType = str
class Network():
""" Network is used to keep a record of the outcome of all expansions."""
def __init__(self, reactions: Sequence[Reaction] = ()):
self.smi_produced_by: dict[Smi: Set[Reaction]] = defaultdict(set)
self.smi_substrate_of: dict[Smi: Set[Reaction]] = defaultdict(set)
self.reaction_options: dict[Smi: dict[ExpanderID: List[ReactionOption]]] = defaultdict(lambda: defaultdict(dict))
self.reactions: Set[Reaction] = set()
if len(reactions) != 0:
for rxn in reactions:
self.add_reaction(rxn)
def add_reaction(self, reaction: Reaction):
self.reactions.add(reaction)
self.smi_produced_by[reaction.product].add(reaction)
for smi in reaction.substrates:
self.smi_substrate_of[smi].add(reaction)
def remove_reaction(self, reaction: Reaction):
self.reactions.discard(reaction)
self.smi_produced_by[reaction.product].discard(reaction)
for smi in reaction.substrates:
self.smi_substrate_of[smi].discard(reaction)
def add_option(self, option: ReactionOption):
self.reaction_options[option.target_smi][option.rxn_type][option.unique_id] = option
def bulk_add_options(self, smi: Smi, rxn_type: RxnType, list_options: List[ReactionOption]):
self.reaction_options[smi][rxn_type] = {option.unique_id: option for option in list_options}
def remove_option(self, option: ReactionOption):
self.reaction_options[option.target_smi][option.rxn_type].pop(option.unique_id, None)
def get_reaction_options(self, smi: Smi, rxn_type: RxnType) -> list[ReactionOption]:
options_for_smi = self.reaction_options.get(smi, {})
options_for_rxn_type = options_for_smi.get(rxn_type, {})
return list(options_for_rxn_type.values())
def are_options_available(self, smi: Smi, rxn_type: RxnType) -> bool:
return self.reaction_options.get(smi, {}).get(rxn_type, False) is not False
def get_reactions_which_molecule_is_produced_by(self, smi: Smi) -> Set[Reaction]:
return self.smi_produced_by.get(smi, set())
def get_reactions_which_molecule_is_substrate_of(self, smi: Smi) -> Set[Reaction]:
return self.smi_substrate_of.get(smi, set())
def all_smis(self) -> Set[Smi]:
all_smis = set(self.smi_produced_by.keys())
all_smis.update(set(self.smi_substrate_of.keys()))
return all_smis
def all_reactions(self) -> List[Reaction]:
return list(self.reactions)
def all_reaction_options(self) -> List[ReactionOption]:
all_options = []
for smi, rxn_type_options in self.reaction_options.items():
for rxn_type, options_dict in rxn_type_options.items():
for option_id, option in options_dict.items():
all_options.append(option)
return all_options
def save(self):
"""Save the network to a dict"""
data = {"reactions": reactions_to_dicts(self.all_reactions()),
"reaction_options": [option_to_dict(opt) for opt in self.all_reaction_options()]}
return data
def load(self, data: dict, expanders: List[Expander]):
"""
Load the network from data dict
ReactionOptions will only be loaded if the relevant expander is provided
"""
# check each expander is associated with this network
for expander in expanders:
if expander.network != self:
raise Exception("Can not load reaction options when expander is not associated with the same network")
# load reactions
reaction_unique_id_dict = {}
for reaction_dict in data['reactions']: | reaction = reaction_from_dict(reaction_dict) | 3 | 2023-12-30 11:33:41+00:00 | 4k |
DomingoJoseCab/AutoTube | utils/gpt/chatgpt.py | [
{
"identifier": "get_product_script",
"path": "utils/gpt/generate_script.py",
"snippet": "def get_product_script(data_extracted, video_name, i):\r\n\r\n prompt = get_product_replace(data_extracted, video_name, i)\r\n \r\n messages = [\r\n {\"role\": \"system\", \"content\": \"Genera un g... | from openai import OpenAI
from utils.gpt.generate_script import get_product_script, get_better_intro, get_better_outro, set_up_generate_script
from utils.gpt.generate_description import get_description, set_up_generate_description
from utils.gpt.generate_miniature import get_miniature, set_up_generate_miniature
import os
import json
| 1,786 | # ==============================================================================
# AutoTube Script
# Creado por: Domingo Caballero
# Canal de YouTube: https://www.youtube.com/@emprendedomingo?=sub_confirmation=1
# Lista de Correo: https://emprendecondomingo.substack.com/
# ==============================================================================
with open('../AutoTube/argss.json', 'r', encoding='utf-8') as archivo:
datos = json.load(archivo)
OPENAI_API_KEY = datos['OPENAI_API_KEY']
CLIENT = OpenAI(api_key=OPENAI_API_KEY)
MODELO_GPT4 = "gpt-4"
MODELO_IMG = "dall-e-3"
def set_up(folder_path):
set_up_generate_script(CLIENT, MODELO_GPT4, folder_path)
set_up_generate_description(CLIENT, MODELO_GPT4, folder_path)
set_up_generate_miniature(CLIENT, MODELO_IMG, folder_path)
def chatgpt(data, folder_path):
set_up(folder_path)
list_asins = ['ASIN_TOP5','ASIN_TOP4','ASIN_TOP3','ASIN_TOP2','ASIN_TOP1']
#################### GENERATING INTRO ####################
print("Generating intro...")
intro_path = os.path.join(datos['scripts_path'],'intro.txt')
get_better_intro(intro_path, datos['video_name'])
print("Intro generated.")
print("-----------------------------")
#################### GENERATING PRODUCTS ####################
for i, product in enumerate(list_asins):
print(f"Generating product {5-i}...")
get_product_script(data[product],datos['video_name'],5-i)
print("Products generated.")
print("-----------------------------")
#################### GENERATING OUTRO ####################
print("Generating outro...")
outro_path = os.path.join(datos['scripts_path'],'outro.txt')
| # ==============================================================================
# AutoTube Script
# Creado por: Domingo Caballero
# Canal de YouTube: https://www.youtube.com/@emprendedomingo?=sub_confirmation=1
# Lista de Correo: https://emprendecondomingo.substack.com/
# ==============================================================================
with open('../AutoTube/argss.json', 'r', encoding='utf-8') as archivo:
datos = json.load(archivo)
OPENAI_API_KEY = datos['OPENAI_API_KEY']
CLIENT = OpenAI(api_key=OPENAI_API_KEY)
MODELO_GPT4 = "gpt-4"
MODELO_IMG = "dall-e-3"
def set_up(folder_path):
set_up_generate_script(CLIENT, MODELO_GPT4, folder_path)
set_up_generate_description(CLIENT, MODELO_GPT4, folder_path)
set_up_generate_miniature(CLIENT, MODELO_IMG, folder_path)
def chatgpt(data, folder_path):
set_up(folder_path)
list_asins = ['ASIN_TOP5','ASIN_TOP4','ASIN_TOP3','ASIN_TOP2','ASIN_TOP1']
#################### GENERATING INTRO ####################
print("Generating intro...")
intro_path = os.path.join(datos['scripts_path'],'intro.txt')
get_better_intro(intro_path, datos['video_name'])
print("Intro generated.")
print("-----------------------------")
#################### GENERATING PRODUCTS ####################
for i, product in enumerate(list_asins):
print(f"Generating product {5-i}...")
get_product_script(data[product],datos['video_name'],5-i)
print("Products generated.")
print("-----------------------------")
#################### GENERATING OUTRO ####################
print("Generating outro...")
outro_path = os.path.join(datos['scripts_path'],'outro.txt')
| get_better_outro(outro_path, datos['video_name'])
| 2 | 2023-12-28 16:15:37+00:00 | 4k |
gregorybchris/typogenetics | typogenetics/search.py | [
{
"identifier": "Base",
"path": "typogenetics/typogenetics.py",
"snippet": "class Base(StrEnum):\n C = auto()\n G = auto()\n T = auto()\n A = auto()\n\n @classmethod\n def from_str(cls, base_str: str) -> \"Base\":\n return {\n \"C\": cls.C,\n \"G\": cls.G,\... | import logging
import numpy as np
from enum import StrEnum, auto
from queue import Queue
from typing import Optional, Tuple
from numpy.random import Generator
from typogenetics.typogenetics import Base, Rewriter, Strand, Translator | 3,114 |
logger = logging.getLogger(__name__)
class EditType(StrEnum):
MUTATE = auto()
INSERT = auto()
DELETE = auto()
class Editor:
PROB_MUTATE = 0.80
PROB_INSERT = 0.10
PROB_DELETE = 0.10
@classmethod
|
logger = logging.getLogger(__name__)
class EditType(StrEnum):
MUTATE = auto()
INSERT = auto()
DELETE = auto()
class Editor:
PROB_MUTATE = 0.80
PROB_INSERT = 0.10
PROB_DELETE = 0.10
@classmethod | def edit(cls, strand: Strand, rng: Generator) -> Strand: | 2 | 2023-12-28 08:59:06+00:00 | 4k |
chaoren2357/gsplatstudio | gsplatstudio/models/structOptim/splitAcloneAprune.py | [
{
"identifier": "build_rotation",
"path": "gsplatstudio/utils/general_utils.py",
"snippet": "def build_rotation(r):\n norm = torch.sqrt(r[:,0]*r[:,0] + r[:,1]*r[:,1] + r[:,2]*r[:,2] + r[:,3]*r[:,3])\n\n q = r / norm[:, None]\n\n R = torch.zeros((q.size(0), 3, 3), device='cuda')\n\n r = q[:, ... | import torch
import gsplatstudio
from gsplatstudio.utils.general_utils import build_rotation, inverse_sigmoid
from gsplatstudio.utils.type_utils import *
from gsplatstudio.utils.config import parse_structured | 1,950 | densify_from_iter: int = 500
densify_until_iter: int = 15000
densify_grad_threshold: float = 0.0002
densification_interval: int = 100
size_threshold: int = 20
min_opacity: float = 0.005
num_split: int = 2
@gsplatstudio.register("split.clone.prune-structOptim")
class splitAcloneAprune:
def __init__(self, cfg):
self.cfg = parse_structured(splitAcloneApruneConfig, cfg)
@property
def state(self):
return (
self.max_radii2D,
self.xyz_gradient_accum,
self.denom
)
def restore(self, state, spatial_lr_scale):
(self.max_radii2D,
self.xyz_gradient_accum,
self.denom) = state
self.spatial_lr_scale = spatial_lr_scale
def init_optim(self,model, spatial_lr_scale):
self.spatial_lr_scale = spatial_lr_scale
self.reset_stats(model)
def update(self, iteration, model, paramOptim, render_pkg, is_white_background):
viewspace_point_tensor, visibility_filter, radii = render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"]
if iteration < self.cfg.densify_until_iter:
# Keep track of max radii in image-space for pruning
self.max_radii2D[visibility_filter] = torch.max(self.max_radii2D[visibility_filter], radii[visibility_filter])
self.xyz_gradient_accum[visibility_filter] += torch.norm(viewspace_point_tensor.grad[visibility_filter,:2], dim=-1, keepdim=True)
self.denom[visibility_filter] += 1
if iteration > self.cfg.densify_from_iter and iteration % self.cfg.densification_interval == 0:
self.densify_and_prune(iteration, model, paramOptim)
if iteration % self.cfg.opacity_reset_interval == 0 or (is_white_background and iteration == self.cfg.densify_from_iter):
self.reset_model_opacity(model, paramOptim)
def should_start_limit_size(self,iteration):
return iteration > self.cfg.opacity_reset_interval
def densify_and_prune(self, iteration, model, paramOptim):
grads = self.xyz_gradient_accum / self.denom
grads[grads.isnan()] = 0.0
self.densify_and_clone(model, paramOptim, grads)
self.densify_and_split(model, paramOptim, grads)
prune_mask = (model.opacity < self.cfg.min_opacity).squeeze()
if self.should_start_limit_size(iteration):
big_points_vs = self.max_radii2D > self.cfg.size_threshold
big_points_ws = model.scaling.max(dim=1).values > 0.1 * self.spatial_lr_scale
prune_mask = torch.logical_or(torch.logical_or(prune_mask, big_points_vs), big_points_ws)
self.prune_points(prune_mask, model, paramOptim)
torch.cuda.empty_cache()
def densify_and_clone(self, model, paramOptim, grads):
# Extract points that satisfy the gradient condition
selected_pts_mask = torch.where(torch.norm(grads, dim=-1) >= self.cfg.densify_grad_threshold, True, False)
selected_pts_mask = torch.logical_and(selected_pts_mask,
torch.max(model.scaling, dim=1).values <= self.cfg.percent_dense*self.spatial_lr_scale)
new_tensors_dict = {
"xyz": model._xyz[selected_pts_mask],
"f_dc": model._features_dc[selected_pts_mask],
"f_rest": model._features_rest[selected_pts_mask],
"opacity": model._opacity[selected_pts_mask],
"scaling" : model._scaling[selected_pts_mask],
"rotation" : model._rotation[selected_pts_mask]
}
self.densification_postfix(model, paramOptim, new_tensors_dict)
def densify_and_split(self, model, paramOptim, grads):
# Extract points that satisfy the gradient condition
padded_grad = torch.zeros((model.xyz.shape[0]), device="cuda")
padded_grad[:grads.shape[0]] = grads.squeeze()
selected_pts_mask = torch.where(padded_grad >= self.cfg.densify_grad_threshold, True, False)
selected_pts_mask = torch.logical_and(selected_pts_mask,
torch.max(model.scaling, dim=1).values > self.cfg.percent_dense*self.spatial_lr_scale)
stds = model.scaling[selected_pts_mask].repeat(self.cfg.num_split,1)
means = torch.zeros((stds.size(0), 3),device="cuda")
samples = torch.normal(mean=means, std=stds)
rots = build_rotation(model._rotation[selected_pts_mask]).repeat(self.cfg.num_split,1,1)
new_tensors_dict = {
"xyz": torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + model.xyz[selected_pts_mask].repeat(self.cfg.num_split, 1),
"f_dc": model._features_dc[selected_pts_mask].repeat(self.cfg.num_split,1,1),
"f_rest": model._features_rest[selected_pts_mask].repeat(self.cfg.num_split,1,1),
"opacity": model._opacity[selected_pts_mask].repeat(self.cfg.num_split,1),
"scaling" : model.scaling_inverse_activation(model.scaling[selected_pts_mask].repeat(self.cfg.num_split,1) / (0.8*self.cfg.num_split)),
"rotation" : model._rotation[selected_pts_mask].repeat(self.cfg.num_split,1)
}
self.densification_postfix(model, paramOptim, new_tensors_dict)
prune_filter = torch.cat((selected_pts_mask, torch.zeros(self.cfg.num_split * selected_pts_mask.sum(), device="cuda", dtype=bool)))
self.prune_points(prune_filter, model, paramOptim)
def prune_points(self, mask, model, paramOptim):
valid_points_mask = ~mask
optimizable_tensors = paramOptim.prune_optim(valid_points_mask)
model.update_params(optimizable_tensors)
self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask]
self.denom = self.denom[valid_points_mask]
self.max_radii2D = self.max_radii2D[valid_points_mask]
def densification_postfix(self, model, paramOptim, new_tensors_dict):
optimizable_tensors = paramOptim.cat_tensors(new_tensors_dict)
model.update_params(optimizable_tensors)
self.reset_stats(model)
def reset_model_opacity(self, model, paramOptim):
|
@dataclass
class splitAcloneApruneConfig:
max_sh_drgree: int = 3
percent_dense: float = 0.01
opacity_reset_interval: int = 3000
densify_from_iter: int = 500
densify_until_iter: int = 15000
densify_grad_threshold: float = 0.0002
densification_interval: int = 100
size_threshold: int = 20
min_opacity: float = 0.005
num_split: int = 2
@gsplatstudio.register("split.clone.prune-structOptim")
class splitAcloneAprune:
def __init__(self, cfg):
self.cfg = parse_structured(splitAcloneApruneConfig, cfg)
@property
def state(self):
return (
self.max_radii2D,
self.xyz_gradient_accum,
self.denom
)
def restore(self, state, spatial_lr_scale):
(self.max_radii2D,
self.xyz_gradient_accum,
self.denom) = state
self.spatial_lr_scale = spatial_lr_scale
def init_optim(self,model, spatial_lr_scale):
self.spatial_lr_scale = spatial_lr_scale
self.reset_stats(model)
def update(self, iteration, model, paramOptim, render_pkg, is_white_background):
viewspace_point_tensor, visibility_filter, radii = render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"]
if iteration < self.cfg.densify_until_iter:
# Keep track of max radii in image-space for pruning
self.max_radii2D[visibility_filter] = torch.max(self.max_radii2D[visibility_filter], radii[visibility_filter])
self.xyz_gradient_accum[visibility_filter] += torch.norm(viewspace_point_tensor.grad[visibility_filter,:2], dim=-1, keepdim=True)
self.denom[visibility_filter] += 1
if iteration > self.cfg.densify_from_iter and iteration % self.cfg.densification_interval == 0:
self.densify_and_prune(iteration, model, paramOptim)
if iteration % self.cfg.opacity_reset_interval == 0 or (is_white_background and iteration == self.cfg.densify_from_iter):
self.reset_model_opacity(model, paramOptim)
def should_start_limit_size(self,iteration):
return iteration > self.cfg.opacity_reset_interval
def densify_and_prune(self, iteration, model, paramOptim):
grads = self.xyz_gradient_accum / self.denom
grads[grads.isnan()] = 0.0
self.densify_and_clone(model, paramOptim, grads)
self.densify_and_split(model, paramOptim, grads)
prune_mask = (model.opacity < self.cfg.min_opacity).squeeze()
if self.should_start_limit_size(iteration):
big_points_vs = self.max_radii2D > self.cfg.size_threshold
big_points_ws = model.scaling.max(dim=1).values > 0.1 * self.spatial_lr_scale
prune_mask = torch.logical_or(torch.logical_or(prune_mask, big_points_vs), big_points_ws)
self.prune_points(prune_mask, model, paramOptim)
torch.cuda.empty_cache()
def densify_and_clone(self, model, paramOptim, grads):
# Extract points that satisfy the gradient condition
selected_pts_mask = torch.where(torch.norm(grads, dim=-1) >= self.cfg.densify_grad_threshold, True, False)
selected_pts_mask = torch.logical_and(selected_pts_mask,
torch.max(model.scaling, dim=1).values <= self.cfg.percent_dense*self.spatial_lr_scale)
new_tensors_dict = {
"xyz": model._xyz[selected_pts_mask],
"f_dc": model._features_dc[selected_pts_mask],
"f_rest": model._features_rest[selected_pts_mask],
"opacity": model._opacity[selected_pts_mask],
"scaling" : model._scaling[selected_pts_mask],
"rotation" : model._rotation[selected_pts_mask]
}
self.densification_postfix(model, paramOptim, new_tensors_dict)
def densify_and_split(self, model, paramOptim, grads):
# Extract points that satisfy the gradient condition
padded_grad = torch.zeros((model.xyz.shape[0]), device="cuda")
padded_grad[:grads.shape[0]] = grads.squeeze()
selected_pts_mask = torch.where(padded_grad >= self.cfg.densify_grad_threshold, True, False)
selected_pts_mask = torch.logical_and(selected_pts_mask,
torch.max(model.scaling, dim=1).values > self.cfg.percent_dense*self.spatial_lr_scale)
stds = model.scaling[selected_pts_mask].repeat(self.cfg.num_split,1)
means = torch.zeros((stds.size(0), 3),device="cuda")
samples = torch.normal(mean=means, std=stds)
rots = build_rotation(model._rotation[selected_pts_mask]).repeat(self.cfg.num_split,1,1)
new_tensors_dict = {
"xyz": torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + model.xyz[selected_pts_mask].repeat(self.cfg.num_split, 1),
"f_dc": model._features_dc[selected_pts_mask].repeat(self.cfg.num_split,1,1),
"f_rest": model._features_rest[selected_pts_mask].repeat(self.cfg.num_split,1,1),
"opacity": model._opacity[selected_pts_mask].repeat(self.cfg.num_split,1),
"scaling" : model.scaling_inverse_activation(model.scaling[selected_pts_mask].repeat(self.cfg.num_split,1) / (0.8*self.cfg.num_split)),
"rotation" : model._rotation[selected_pts_mask].repeat(self.cfg.num_split,1)
}
self.densification_postfix(model, paramOptim, new_tensors_dict)
prune_filter = torch.cat((selected_pts_mask, torch.zeros(self.cfg.num_split * selected_pts_mask.sum(), device="cuda", dtype=bool)))
self.prune_points(prune_filter, model, paramOptim)
def prune_points(self, mask, model, paramOptim):
valid_points_mask = ~mask
optimizable_tensors = paramOptim.prune_optim(valid_points_mask)
model.update_params(optimizable_tensors)
self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask]
self.denom = self.denom[valid_points_mask]
self.max_radii2D = self.max_radii2D[valid_points_mask]
def densification_postfix(self, model, paramOptim, new_tensors_dict):
optimizable_tensors = paramOptim.cat_tensors(new_tensors_dict)
model.update_params(optimizable_tensors)
self.reset_stats(model)
def reset_model_opacity(self, model, paramOptim): | opacities_new = inverse_sigmoid(torch.min(model.opacity, torch.ones_like(model.opacity)*0.01)) | 1 | 2023-12-22 08:27:26+00:00 | 4k |
onestepai/api_rag | src/api_rag/api_rag_model.py | [
{
"identifier": "ServiceApiConfig",
"path": "src/config/ServiceApiConfig.py",
"snippet": "class ServiceApiConfig(ServiceApiConfigBase):\n def __init__(self):\n ServiceApiConfigBase.__init__(self,\n url_prefix=DockerConfig.URL_PREFIX + DockerConfig.API_VERSI... | import json
import requests
import logging
from src.config.ServiceApiConfig import ServiceApiConfig
from src.utils.apis import apis_info
from src.api_rag.gpt_api import GPTChatBot | 2,513 |
class APIRAGModel(object):
def __init__(self):
self.apis_info = apis_info()
def call_apis(self, answer, headers):
results = ''
for api in answer['apis']:
result = self.call_api(api, headers)
results += result
return results
def call_api(self, api, headers):
url = self.apis_info.api_definitions[api['name']]['url']
prefix = self.apis_info.api_definitions[api['name']]['prefix']
logging.info(str(api) + "-------->" + "url" + "------>" + str(url))
input_data = ''
params = {}
input_params = {}
if 'params' in api:
input_params = api['params']
elif 'parameters' in api:
input_params = api['parameters']
for key, value in input_params.items():
if key in self.apis_info.api_definitions[api['name']]['input']:
if self.apis_info.api_definitions[api['name']]['input'][key]['in'] == 'header':
headers[key] = input_params[key].encode(encoding='utf-8')
elif self.apis_info.api_definitions[api['name']]['input'][key]['in'] == 'body':
input_data = json.dumps(input_params[key])
elif self.apis_info.api_definitions[api['name']]['input'][key]['in'] == 'query':
params[key] = input_params[key].encode(encoding='utf-8')
logging.info(str(api) + "------>" + "request_data----->" + str(input_data))
output_data = requests.request(method=self.apis_info.api_definitions[api['name']]['calling_type'].upper(),
url=url + prefix, headers=headers, params=params,
data=input_data.encode(encoding='utf-8')).text
output_explain = self.apis_info.api_definitions[api['name']]['output_explain']
logging.info(str(api) + "------>" + "output_data----->" + str(output_data))
|
class APIRAGModel(object):
def __init__(self):
self.apis_info = apis_info()
def call_apis(self, answer, headers):
results = ''
for api in answer['apis']:
result = self.call_api(api, headers)
results += result
return results
def call_api(self, api, headers):
url = self.apis_info.api_definitions[api['name']]['url']
prefix = self.apis_info.api_definitions[api['name']]['prefix']
logging.info(str(api) + "-------->" + "url" + "------>" + str(url))
input_data = ''
params = {}
input_params = {}
if 'params' in api:
input_params = api['params']
elif 'parameters' in api:
input_params = api['parameters']
for key, value in input_params.items():
if key in self.apis_info.api_definitions[api['name']]['input']:
if self.apis_info.api_definitions[api['name']]['input'][key]['in'] == 'header':
headers[key] = input_params[key].encode(encoding='utf-8')
elif self.apis_info.api_definitions[api['name']]['input'][key]['in'] == 'body':
input_data = json.dumps(input_params[key])
elif self.apis_info.api_definitions[api['name']]['input'][key]['in'] == 'query':
params[key] = input_params[key].encode(encoding='utf-8')
logging.info(str(api) + "------>" + "request_data----->" + str(input_data))
output_data = requests.request(method=self.apis_info.api_definitions[api['name']]['calling_type'].upper(),
url=url + prefix, headers=headers, params=params,
data=input_data.encode(encoding='utf-8')).text
output_explain = self.apis_info.api_definitions[api['name']]['output_explain']
logging.info(str(api) + "------>" + "output_data----->" + str(output_data)) | if ServiceApiConfig.prompt_language == 'en_us': | 0 | 2023-12-28 03:13:03+00:00 | 4k |
DerwenAI/textgraphs | textgraphs/gor.py | [
{
"identifier": "Edge",
"path": "textgraphs/elem.py",
"snippet": "class Edge:\n \"\"\"\nA data class representing an edge between two nodes.\n \"\"\"\n src_node: int\n dst_node: int\n kind: RelEnum\n rel: str\n prob: float\n count: int = 1"
},
{
"identifier": "Node",
... | from collections import Counter, defaultdict
from dataclasses import dataclass, field
from icecream import ic # pylint: disable=E0401
from .elem import Edge, Node, NodeEnum, RelEnum
from .graph import SimpleGraph
import enum
import itertools
import pathlib
import json
import sys
import typing
import networkx as nx # pylint: disable=E0401
import pandas as pd # pylint: disable=E0401
import pyvis # pylint: disable=E0401 | 3,215 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This class handles toplogical transforms of graph data into a
_graph of relations_ dual representation.
see copyright/license https://huggingface.co/spaces/DerwenAI/textgraphs/blob/main/README.md
"""
######################################################################
## class definitions
class RelDir (enum.IntEnum):
"""
Enumeration for the directions of a relation.
"""
HEAD = 0 # relation flows into node
TAIL = 1 # relation flows out of node
def __str__ (
self
) -> str:
"""
Codec for representing as a string.
"""
decoder: typing.List[ str ] = [
"head",
"tail",
]
return decoder[self.value]
@dataclass(order=False, frozen=False)
class SheafSeed:
"""
A data class representing a node from the source graph plus its
partial edge, based on a _Sheaf Theory_ decomposition of a graph.
"""
node_id: int
rel_id: int
rel_dir: RelDir
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This class handles toplogical transforms of graph data into a
_graph of relations_ dual representation.
see copyright/license https://huggingface.co/spaces/DerwenAI/textgraphs/blob/main/README.md
"""
######################################################################
## class definitions
class RelDir (enum.IntEnum):
"""
Enumeration for the directions of a relation.
"""
HEAD = 0 # relation flows into node
TAIL = 1 # relation flows out of node
def __str__ (
self
) -> str:
"""
Codec for representing as a string.
"""
decoder: typing.List[ str ] = [
"head",
"tail",
]
return decoder[self.value]
@dataclass(order=False, frozen=False)
class SheafSeed:
"""
A data class representing a node from the source graph plus its
partial edge, based on a _Sheaf Theory_ decomposition of a graph.
"""
node_id: int
rel_id: int
rel_dir: RelDir | edge: Edge | 0 | 2023-12-25 11:42:53+00:00 | 4k |
lc4337/avfcomp | tests/test.py | [
{
"identifier": "AVFComp",
"path": "avfcomp/comp.py",
"snippet": "class AVFComp(AVFParser):\n \"\"\"Compression of an AVF file.\"\"\"\n\n @staticmethod\n def zigzag_enc(n: int) -> int:\n \"\"\"Zigzag transformation encode.\"\"\"\n return (n << 1) ^ (n >> 31)\n\n @staticmethod\n... | import hashlib
import shutil
import time
import unittest
from os import listdir, mkdir, path
from typing import Any, Callable, Iterator, Tuple
from avfcomp import AVFComp, AVFDecomp, CompHandler
from avfcomp.handler import T_CompFile | 2,659 | """Test compression and decompression."""
work_dir = path.dirname(path.dirname(__file__))
data_path = path.join(work_dir, "data")
beg_path = path.join(data_path, "avf_beg")
int_path = path.join(data_path, "avf_int")
exp_path = path.join(data_path, "avf_exp")
cvf_path = path.join(data_path, "cvf")
decomp_path = path.join(data_path, "avf_decomp")
# refresh
def refresh():
"""Refresh the data directory."""
shutil.rmtree(cvf_path, ignore_errors=True)
shutil.rmtree(decomp_path, ignore_errors=True)
mkdir(cvf_path)
mkdir(decomp_path)
def list_files(paths: str) -> Iterator[Tuple[str, str]]:
"""List all files in a directory."""
for file in listdir(paths):
yield file, path.join(paths, file)
def calc_file_hash(file_path):
"""Calculate the hash of a file."""
with open(file_path, "rb") as fin:
return hashlib.sha256(fin.read()).hexdigest()
def cost_time(func: Callable) -> Callable[..., Tuple[Any, float]]:
"""Calculate the time cost of a function."""
def fun(*args, **kwargs) -> Tuple[Any, float]:
t = time.perf_counter()
result = func(*args, **kwargs)
return (result, time.perf_counter() - t)
return fun
@cost_time
| """Test compression and decompression."""
work_dir = path.dirname(path.dirname(__file__))
data_path = path.join(work_dir, "data")
beg_path = path.join(data_path, "avf_beg")
int_path = path.join(data_path, "avf_int")
exp_path = path.join(data_path, "avf_exp")
cvf_path = path.join(data_path, "cvf")
decomp_path = path.join(data_path, "avf_decomp")
# refresh
def refresh():
"""Refresh the data directory."""
shutil.rmtree(cvf_path, ignore_errors=True)
shutil.rmtree(decomp_path, ignore_errors=True)
mkdir(cvf_path)
mkdir(decomp_path)
def list_files(paths: str) -> Iterator[Tuple[str, str]]:
"""List all files in a directory."""
for file in listdir(paths):
yield file, path.join(paths, file)
def calc_file_hash(file_path):
"""Calculate the hash of a file."""
with open(file_path, "rb") as fin:
return hashlib.sha256(fin.read()).hexdigest()
def cost_time(func: Callable) -> Callable[..., Tuple[Any, float]]:
"""Calculate the time cost of a function."""
def fun(*args, **kwargs) -> Tuple[Any, float]:
t = time.perf_counter()
result = func(*args, **kwargs)
return (result, time.perf_counter() - t)
return fun
@cost_time | def get_comp(paths: str, handler: Callable[..., T_CompFile]) -> Tuple[int, int]: | 3 | 2023-12-22 02:19:59+00:00 | 4k |
Noubissie237/StockManagment | StockManagment/App/views.py | [
{
"identifier": "panier_cookie",
"path": "StockManagment/App/utils.py",
"snippet": "def panier_cookie(request):\n articles = []\n\n commande = {\n 'get_panier_total':0,\n 'get_panier_article':0,\n 'produit_physique': True,\n }\n\n nombre_article = commande['get_panier_ar... | from django.shortcuts import render, redirect
from django.http import JsonResponse, HttpResponse
from .models import *
from django.contrib.auth.decorators import login_required
from datetime import datetime
from .utils import panier_cookie, data_cookie, getDataFromApi
from .forms import LoginForm
from django.contrib.auth import authenticate, login, logout
import json, requests | 1,952 |
@login_required(login_url='/login')
def update_article(request, *args, **kwargs):
data = json.loads(request.body)
produit_id = data['produit_id']
action = data['action']
produit = Produit.objects.get(id=produit_id)
client = request.user.client
commande, created = Commande.objects.get_or_create(client=client, complete=False)
commande_article, created = CommandeArticle.objects.get_or_create(commande=commande, produit=produit)
if action == "add":
commande_article.quantite += 1
if action == "remove":
commande_article.quantite -=1
commande_article.save()
if commande_article.quantite <= 0:
commande_article.delete()
return JsonResponse("panier modifié", safe=False)
@login_required(login_url='/login')
def commandeAnonyme(request, data):
name = data['form']['name']
username = data['form']['username']
email = data['form']['email']
phone = data['form']['phone']
cookie_panier = panier_cookie(request)
articles = cookie_panier['articles']
client, created = Client.objects.get_or_create(
email=email
)
client.name = name
client.save()
commande = Commande.objects.create(
client=client
)
for article in articles:
produit = Produit.objects.get(id=article['produit']['pk'])
CommandeArticle.objects.create(
produit=produit,
commande=commande,
quantite=article['quantite']
)
return client, commande
@login_required(login_url='/login')
def traitement_commande(request, *args, **kwargs):
data = json.loads(request.body)
transaction_id = datetime.now().timestamp()
if request.user.is_authenticated:
client = request.user.client
commande, created = Commande.objects.get_or_create(client=client, complete=False)
else:
client, commande = commandeAnonyme(request, data)
total = float(data['form']['total'])
commande.transaction_id = data["payment_info"]["transaction_id"]
commande.total_trans = data['payment_info']['total']
if commande.get_panier_total == total:
commande.complete = True
commande.status = data['payment_info']['status']
else:
commande.status = "REFUSED"
commande.save()
return JsonResponse("Attention!!! Traitement Refuse Fraude detecte!", safe=False)
commande.save()
if commande.produit_physique:
AddressChipping.objects.create(
client=client,
commande=commande,
addresse=data['shipping']['address'],
ville=data['shipping']['city'],
zipcode=data['shipping']['zipcode']
)
return JsonResponse("Traitement complet", safe=False)
def login_view(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return render(request, 'shop/index.html', context={'name' : request.user.username})
else:
form.add_error(None, "Nom d'utilisateur ou mot de passe incorrect.")
else:
|
@login_required(login_url='/login')
def shop(request, *args, **kwargs):
"""Vue des produits"""
produits = Produit.objects.all()
data = data_cookie(request)
articles = data['articles']
commande = data['commande']
nombre_article = data['nombre_article']
context = {
'produits': produits,
'nombre_article': nombre_article
}
return render(request, 'shop/index.html', context)
@login_required(login_url='/login')
def panier(request, *args, **kwargs):
data = data_cookie(request)
articles = data['articles']
commande = data['commande']
nombre_article = data['nombre_article']
context = {
'articles' : articles,
'commande': commande,
'nombre_article': nombre_article
}
return render(request, 'shop/panier.html', context)
@login_required(login_url='/login')
def commande(request, *args, **kwargs):
data = data_cookie(request)
articles = data['articles']
commande = data['commande']
nombre_article = data['nombre_article']
context = {
'articles' : articles,
'commande': commande,
'nombre_article': nombre_article
}
return render(request, 'shop/commande.html', context)
@login_required(login_url='/login')
def update_article(request, *args, **kwargs):
data = json.loads(request.body)
produit_id = data['produit_id']
action = data['action']
produit = Produit.objects.get(id=produit_id)
client = request.user.client
commande, created = Commande.objects.get_or_create(client=client, complete=False)
commande_article, created = CommandeArticle.objects.get_or_create(commande=commande, produit=produit)
if action == "add":
commande_article.quantite += 1
if action == "remove":
commande_article.quantite -=1
commande_article.save()
if commande_article.quantite <= 0:
commande_article.delete()
return JsonResponse("panier modifié", safe=False)
@login_required(login_url='/login')
def commandeAnonyme(request, data):
name = data['form']['name']
username = data['form']['username']
email = data['form']['email']
phone = data['form']['phone']
cookie_panier = panier_cookie(request)
articles = cookie_panier['articles']
client, created = Client.objects.get_or_create(
email=email
)
client.name = name
client.save()
commande = Commande.objects.create(
client=client
)
for article in articles:
produit = Produit.objects.get(id=article['produit']['pk'])
CommandeArticle.objects.create(
produit=produit,
commande=commande,
quantite=article['quantite']
)
return client, commande
@login_required(login_url='/login')
def traitement_commande(request, *args, **kwargs):
data = json.loads(request.body)
transaction_id = datetime.now().timestamp()
if request.user.is_authenticated:
client = request.user.client
commande, created = Commande.objects.get_or_create(client=client, complete=False)
else:
client, commande = commandeAnonyme(request, data)
total = float(data['form']['total'])
commande.transaction_id = data["payment_info"]["transaction_id"]
commande.total_trans = data['payment_info']['total']
if commande.get_panier_total == total:
commande.complete = True
commande.status = data['payment_info']['status']
else:
commande.status = "REFUSED"
commande.save()
return JsonResponse("Attention!!! Traitement Refuse Fraude detecte!", safe=False)
commande.save()
if commande.produit_physique:
AddressChipping.objects.create(
client=client,
commande=commande,
addresse=data['shipping']['address'],
ville=data['shipping']['city'],
zipcode=data['shipping']['zipcode']
)
return JsonResponse("Traitement complet", safe=False)
def login_view(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return render(request, 'shop/index.html', context={'name' : request.user.username})
else:
form.add_error(None, "Nom d'utilisateur ou mot de passe incorrect.")
else:
| res = getDataFromApi(request) | 2 | 2023-12-29 11:13:34+00:00 | 4k |
kokiez/raydium-convert-SOLorTokens | main.py | [
{
"identifier": "fetch_pool_keys",
"path": "pools.py",
"snippet": "def fetch_pool_keys(mint: str):\r\n amm_info = {}\r\n all_pools = {}\r\n try:\r\n # Using this so it will be faster else no option, we go the slower way.\r\n with open('all_pools.json', 'r') as file:\r\n ... | from solana.rpc.commitment import Commitment
from solana.rpc.api import Client
from solana.transaction import Transaction
from solders.keypair import Keypair
from pools import fetch_pool_keys, make_simulate_pool_info_instruction
from ast import literal_eval
import re
| 1,722 |
LIQUIDITY_FEES_NUMERATOR = 25
LIQUIDITY_FEES_DENOMINATOR = 10000
"""
Required Variables
"""
endpoint = "your_rpc_url"
payer = Keypair.from_base58_string("your_private_key")
token = "ca of your mint/mint address"
solana_client = Client(endpoint, commitment=Commitment("confirmed"), blockhash_cache=True)
def calculateAmountOut(amount, pool_info):
status = pool_info['status']
SWAP_decimals = pool_info['coin_decimals'] #swap coin
SOL_decimals = pool_info['pc_decimals'] #SOL
COIN_lp_decimals = pool_info['lp_decimals'] #swap coin
pool_SOL_amount = pool_info['pool_pc_amount'] #sol
pool_SWAP_amount = pool_info['pool_coin_amount'] #coin
Coin_pool_lp_supply = pool_info['pool_lp_supply'] #coin
reserve_in = pool_SOL_amount
reserve_out = pool_SWAP_amount
current_price = reserve_out / reserve_in
# print(f"Current Price in SOL: {current_price:.12f}")
amount_in = amount * 10 ** SOL_decimals
Fees = (amount_in * LIQUIDITY_FEES_NUMERATOR)/LIQUIDITY_FEES_DENOMINATOR
amount_in_with_fee = amount_in - Fees
amountOutRaw = (reserve_out * amount_in_with_fee) / (reserve_in + amount_in_with_fee)
# Slippage = 1 + slippage
# minimumAmountOut = amountOutRaw / slippage
return amountOutRaw / 10 ** SWAP_decimals
def calculateAmountIn(amount, pool_info):
SWAP_decimals = pool_info['coin_decimals'] #swap coin
SOL_decimals = pool_info['pc_decimals'] #SOL
COIN_lp_decimals = pool_info['lp_decimals'] #swap coin
pool_SOL_amount = pool_info['pool_pc_amount'] #sol
pool_SWAP_amount = pool_info['pool_coin_amount'] #coin
Coin_pool_lp_supply = pool_info['pool_lp_supply'] #coin
reserve_in = pool_SWAP_amount
reserve_out = pool_SOL_amount
current_price = reserve_out / reserve_in
# print(f"Current Price in SOL: {current_price:.12f}")
amount_in = amount * 10 ** SWAP_decimals
Fees = (amount_in * LIQUIDITY_FEES_NUMERATOR)/LIQUIDITY_FEES_DENOMINATOR
amount_in_with_fee = amount_in - Fees
amountOutRaw = (reserve_out * amount_in_with_fee) / (reserve_in + amount_in_with_fee)
# Slippage = 1 + slippage
# minimumAmountOut = amountOutRaw / slippage
return amountOutRaw / 10 ** SOL_decimals
def PoolInfo(mint):
while True:
quote = ""
pool_keys = fetch_pool_keys(mint)
if str(pool_keys['quote_mint']) == "So11111111111111111111111111111111111111112":
quote = "SOL"
elif str(pool_keys['quote_mint']) == "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v":
quote = "USDC"
elif str(pool_keys['quote_mint']) == "Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB":
quote = "USDC"
recent_block_hash = solana_client.get_latest_blockhash().value.blockhash
tx = Transaction(recent_blockhash=recent_block_hash, fee_payer=payer.pubkey())
|
LIQUIDITY_FEES_NUMERATOR = 25
LIQUIDITY_FEES_DENOMINATOR = 10000
"""
Required Variables
"""
endpoint = "your_rpc_url"
payer = Keypair.from_base58_string("your_private_key")
token = "ca of your mint/mint address"
solana_client = Client(endpoint, commitment=Commitment("confirmed"), blockhash_cache=True)
def calculateAmountOut(amount, pool_info):
status = pool_info['status']
SWAP_decimals = pool_info['coin_decimals'] #swap coin
SOL_decimals = pool_info['pc_decimals'] #SOL
COIN_lp_decimals = pool_info['lp_decimals'] #swap coin
pool_SOL_amount = pool_info['pool_pc_amount'] #sol
pool_SWAP_amount = pool_info['pool_coin_amount'] #coin
Coin_pool_lp_supply = pool_info['pool_lp_supply'] #coin
reserve_in = pool_SOL_amount
reserve_out = pool_SWAP_amount
current_price = reserve_out / reserve_in
# print(f"Current Price in SOL: {current_price:.12f}")
amount_in = amount * 10 ** SOL_decimals
Fees = (amount_in * LIQUIDITY_FEES_NUMERATOR)/LIQUIDITY_FEES_DENOMINATOR
amount_in_with_fee = amount_in - Fees
amountOutRaw = (reserve_out * amount_in_with_fee) / (reserve_in + amount_in_with_fee)
# Slippage = 1 + slippage
# minimumAmountOut = amountOutRaw / slippage
return amountOutRaw / 10 ** SWAP_decimals
def calculateAmountIn(amount, pool_info):
SWAP_decimals = pool_info['coin_decimals'] #swap coin
SOL_decimals = pool_info['pc_decimals'] #SOL
COIN_lp_decimals = pool_info['lp_decimals'] #swap coin
pool_SOL_amount = pool_info['pool_pc_amount'] #sol
pool_SWAP_amount = pool_info['pool_coin_amount'] #coin
Coin_pool_lp_supply = pool_info['pool_lp_supply'] #coin
reserve_in = pool_SWAP_amount
reserve_out = pool_SOL_amount
current_price = reserve_out / reserve_in
# print(f"Current Price in SOL: {current_price:.12f}")
amount_in = amount * 10 ** SWAP_decimals
Fees = (amount_in * LIQUIDITY_FEES_NUMERATOR)/LIQUIDITY_FEES_DENOMINATOR
amount_in_with_fee = amount_in - Fees
amountOutRaw = (reserve_out * amount_in_with_fee) / (reserve_in + amount_in_with_fee)
# Slippage = 1 + slippage
# minimumAmountOut = amountOutRaw / slippage
return amountOutRaw / 10 ** SOL_decimals
def PoolInfo(mint):
while True:
quote = ""
pool_keys = fetch_pool_keys(mint)
if str(pool_keys['quote_mint']) == "So11111111111111111111111111111111111111112":
quote = "SOL"
elif str(pool_keys['quote_mint']) == "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v":
quote = "USDC"
elif str(pool_keys['quote_mint']) == "Es9vMFrzaCERmJfrF4H2FYD4KCoNkY11McCe8BenwNYB":
quote = "USDC"
recent_block_hash = solana_client.get_latest_blockhash().value.blockhash
tx = Transaction(recent_blockhash=recent_block_hash, fee_payer=payer.pubkey())
| sim_inst = make_simulate_pool_info_instruction(pool_keys)
| 1 | 2023-12-29 12:35:38+00:00 | 4k |
Sen-Yao/RCS-Calculator | main.py | [
{
"identifier": "load_E_field",
"path": "info_io.py",
"snippet": "def load_E_field(path):\n E_field_table = {}\n try:\n with open(path, 'r') as file:\n line = file.readline()\n\n # Get numbers of example frequencies\n while line != '// #Frequencies\\n':\n ... | import os
import argparse
import tqdm
import warnings
from info_io import load_E_field, single_RCS_output, load_RCS, rcs_to_dB
from RCS_Table import RCS_Table | 1,897 |
def main():
"""
TODO: Add rad unit
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('--path', default='data', type=str, help='path to data folder')
parser.add_argument('--Ei', '-E', default=1, type=float, help='Mode of incident electric field vector')
parser.add_argument('--R', '-R', default=1, type=float, help='Far field range')
args = parser.parse_args()
print('\n\nParameter:',
'\nPath to data folder =', args.path,
'\nMode of incident electric field vector =', args.Ei,
'\nFar field range =', args.R,
'\n\n Start Calculating...')
RCS_Table_list = []
for root, directories, files in os.walk(args.path):
# Detect E_field txt
for filename in tqdm.tqdm(files):
if filename[0:2] == 'E_' and filename[-4:] == '.txt':
|
def main():
"""
TODO: Add rad unit
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('--path', default='data', type=str, help='path to data folder')
parser.add_argument('--Ei', '-E', default=1, type=float, help='Mode of incident electric field vector')
parser.add_argument('--R', '-R', default=1, type=float, help='Far field range')
args = parser.parse_args()
print('\n\nParameter:',
'\nPath to data folder =', args.path,
'\nMode of incident electric field vector =', args.Ei,
'\nFar field range =', args.R,
'\n\n Start Calculating...')
RCS_Table_list = []
for root, directories, files in os.walk(args.path):
# Detect E_field txt
for filename in tqdm.tqdm(files):
if filename[0:2] == 'E_' and filename[-4:] == '.txt': | Table = RCS_Table(args.R, args.Ei) | 4 | 2023-12-24 02:32:49+00:00 | 4k |
karloskar/homeassistant-goecontroller-mqtt | custom_components/goecontroller_mqtt/sensor.py | [
{
"identifier": "SENSORS",
"path": "custom_components/goecontroller_mqtt/definitions/sensor.py",
"snippet": "SENSORS: tuple[GoEControllerSensorEntityDescription, ...] = (\n GoEControllerSensorEntityDescription(\n key=\"isv\",\n attribute=(\"0\", \"i\"),\n name=\"Amp 1\",\n ... | import logging
from homeassistant import config_entries, core
from homeassistant.components import mqtt
from homeassistant.components.sensor import SensorEntity
from homeassistant.core import callback
from .definitions.sensor import SENSORS, GoEControllerSensorEntityDescription
from .entity import GoEControllerEntity | 3,119 | """The go-eController (MQTT) sensor."""
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: core.HomeAssistant,
config_entry: config_entries.ConfigEntry,
async_add_entities,
):
"""Config entry setup."""
async_add_entities(
GoEControllerSensor(config_entry, description)
| """The go-eController (MQTT) sensor."""
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: core.HomeAssistant,
config_entry: config_entries.ConfigEntry,
async_add_entities,
):
"""Config entry setup."""
async_add_entities(
GoEControllerSensor(config_entry, description) | for description in SENSORS | 0 | 2023-12-22 11:32:11+00:00 | 4k |
T0kyoB0y/PotatoWidgets | PotatoWidgets/Widget/_Scroll.py | [
{
"identifier": "Listener",
"path": "PotatoWidgets/Variable/_Listener.py",
"snippet": "class Listener(Variable):\n def __init__(self, callback, initial_value=None):\n super().__init__(initial_value)\n self._callback = callback\n self._thread = None\n self._stop_thread = th... | from ..__Import import *
from ..Variable import Listener, Poll, Variable
from ._Common._BasicProps import BasicProps | 1,886 |
class Scroll(Gtk.ScrolledWindow, BasicProps):
def __init__(
self,
orientation="h",
children=None,
attributes=None,
css=None,
halign="fill",
valign="fill",
hexpand=False,
vexpand=False,
visible=True,
classname="",
):
Gtk.ScrolledWindow.__init__(self)
BasicProps.__init__(
self,
css=css,
halign=halign,
valign=valign,
hexpand=hexpand,
vexpand=vexpand,
active=None,
visible=visible,
classname=classname,
)
self.__clasif_orientation(orientation)
self.set_visible(visible)
self.add_with_viewport(children) if children else None
attributes(self) if attributes else None
for key, value in locals().items():
if key not in [
"self",
"halign",
"valign",
"hexpand",
"vexpand",
"visible",
"active",
"visible",
"classname",
|
class Scroll(Gtk.ScrolledWindow, BasicProps):
def __init__(
self,
orientation="h",
children=None,
attributes=None,
css=None,
halign="fill",
valign="fill",
hexpand=False,
vexpand=False,
visible=True,
classname="",
):
Gtk.ScrolledWindow.__init__(self)
BasicProps.__init__(
self,
css=css,
halign=halign,
valign=valign,
hexpand=hexpand,
vexpand=vexpand,
active=None,
visible=visible,
classname=classname,
)
self.__clasif_orientation(orientation)
self.set_visible(visible)
self.add_with_viewport(children) if children else None
attributes(self) if attributes else None
for key, value in locals().items():
if key not in [
"self",
"halign",
"valign",
"hexpand",
"vexpand",
"visible",
"active",
"visible",
"classname", | ] and isinstance(value, (Listener, Poll, Variable)): | 1 | 2023-12-30 01:34:01+00:00 | 4k |
0xn0ne/sensitive-helper | sensitive-helper.py | [
{
"identifier": "compress",
"path": "utils/compress.py",
"snippet": "def zip_info(file_path: pathlib.Path) -> Dict[str, Any]:\ndef uncompress_zip(\n file_path: Union[pathlib.Path, str], extract_dir: Union[pathlib.Path, str] = None, is_error: bool = True\n) -> Union[pathlib.Path, Any]:\ndef is_tar(fil... | import base64
import binascii
import csv
import json
import pathlib
import re
import time
import pandas
import tqdm
import argparse
from typing import Any, AnyStr, Dict, List, Union
from utils import compress, configurator, office, process | 2,159 | if len(result) % 4 != 0:
return True, ''
try:
# 编码错误的全都丢掉,不丢掉也看不懂
ret_extend = base64.b64decode(result).decode('utf-8')
if not re.search(r'^[\u0020-\u007F\u2010-\u202f\u3000-\u301f\u4e00-\u9fa5\uff00-\uffef]+$', ret_extend):
return True, ''
# \u0020-\u007F:英文可视字符集
# \u2010-\u202f:中文部分符号集
# \u3000-\u301f:中文部分符号集
# \u4e00-\u9fa5:中文常见文字集
# \u2e80-\u9fff:中文文字及中文异形文字集
# \uff00-\uffef:中文部分符号集
except UnicodeDecodeError:
return True, ''
except binascii.Error:
return True, ''
return False, ret_extend
def is_filter_jwt(result: AnyStr):
times = 0
res_split = result.split(b'.')
while times < 2:
if len(res_split[times]) % 4 != 0:
return True, ''
times += 1
return False, ''
def is_filter_result(result: AnyStr, filters: List[AnyStr], flags: int):
if not filters:
return False, ''
for fil in filters:
if re.search(fil, result, flags):
return True, ''
return False, ''
# @log_run_times
def search_content(
file_object: Union[pathlib.Path, bytes],
rules: Dict[str, List[str]],
split: bytes = b'[\x00-\x1F\x7F]+',
is_re_all: bool = False,
) -> List[Dict[str, str]]:
ret = []
row_contents = [file_object]
if isinstance(file_object, pathlib.Path):
row_contents = re.split(split, file_object.read_bytes())
for row_one in row_contents:
# 按控制字符进行分割行
if len(row_one) < 12:
# 单行内容少于8个字符,丢掉
continue
for rule_name in rules:
rule = rules[rule_name]
flags = 0
filters = None
if isinstance(rule, Dict):
if 'flags' in rule:
flags = string_to_reg_flags(rule['flags'])
if 're_filters' in rule:
filters = rule['re_filters']
rule = rule['regexp']
for regexp in rule:
r_result = re.search(regexp, row_one, flags)
if not r_result:
continue
try:
result_byte = r_result.group()
result_text = result_byte.decode('utf-8')
except UnicodeDecodeError:
continue
is_filter, extend = is_filter_result(result_byte, filters, flags)
if rule_name == 'BASE64':
is_filter, extend = is_filter_base64(result_byte)
if rule_name == 'JSON WEB TOKEN(JWT)':
is_filter, extend = is_filter_jwt(result_byte)
if is_filter:
continue
ret.append(
{
'file': file_object.__str__(),
'group': rule_name,
'regexp': regexp.decode('utf-8'),
'match': result_text,
'extend': extend,
}
)
if not is_re_all:
# 如果关闭了匹配所有正则组数据且已发现有用数据,则退出循环
return ret
return ret
def gen_file_list(src_path: str, exclude_files: List[str]) -> List[pathlib.Path]:
tar_path = pathlib.Path(src_path)
ret = []
if tar_path.is_file():
ret.append(tar_path)
else:
for filepath in tar_path.glob('**/*'):
is_skip = False
if filepath.is_dir():
continue
filename = filepath.name
for r_exclude in exclude_files:
# 文件名正则匹配,在排除名单中则排除文件
if re.match(r_exclude, filename):
is_skip = True
break
if is_skip:
continue
if filename.endswith('.docx') and not filename.startswith('~$'):
office.docx_handler(filepath)
elif filename.endswith('.xlsx') and not filename.startswith('~$'):
office.xlsx_handler(filepath)
else:
| #!/bin/python3
# _*_ coding:utf-8 _*_
#
# sensitive-helper.py
# 本地文件敏感信息搜索工具
def log_run_times(func):
def wrapper(*args, **kwargs):
s_time = time.time()
ret = func(*args, **kwargs)
total_time = time.time() - s_time
if total_time <= 1:
return ret
with open('run_times.log', 'a') as _f:
_f.write('total time(s): {}, args: {}\n'.format(time.time() - s_time, args[0][:127]))
return ret
return wrapper
def string_to_reg_flags(flags: str):
flags_int = 0
for flag in flags.split('|'):
flags_int |= getattr(re, flag)
return flags_int
def is_filter_base64(result: AnyStr):
if len(result) % 4 != 0:
return True, ''
try:
# 编码错误的全都丢掉,不丢掉也看不懂
ret_extend = base64.b64decode(result).decode('utf-8')
if not re.search(r'^[\u0020-\u007F\u2010-\u202f\u3000-\u301f\u4e00-\u9fa5\uff00-\uffef]+$', ret_extend):
return True, ''
# \u0020-\u007F:英文可视字符集
# \u2010-\u202f:中文部分符号集
# \u3000-\u301f:中文部分符号集
# \u4e00-\u9fa5:中文常见文字集
# \u2e80-\u9fff:中文文字及中文异形文字集
# \uff00-\uffef:中文部分符号集
except UnicodeDecodeError:
return True, ''
except binascii.Error:
return True, ''
return False, ret_extend
def is_filter_jwt(result: AnyStr):
times = 0
res_split = result.split(b'.')
while times < 2:
if len(res_split[times]) % 4 != 0:
return True, ''
times += 1
return False, ''
def is_filter_result(result: AnyStr, filters: List[AnyStr], flags: int):
if not filters:
return False, ''
for fil in filters:
if re.search(fil, result, flags):
return True, ''
return False, ''
# @log_run_times
def search_content(
file_object: Union[pathlib.Path, bytes],
rules: Dict[str, List[str]],
split: bytes = b'[\x00-\x1F\x7F]+',
is_re_all: bool = False,
) -> List[Dict[str, str]]:
ret = []
row_contents = [file_object]
if isinstance(file_object, pathlib.Path):
row_contents = re.split(split, file_object.read_bytes())
for row_one in row_contents:
# 按控制字符进行分割行
if len(row_one) < 12:
# 单行内容少于8个字符,丢掉
continue
for rule_name in rules:
rule = rules[rule_name]
flags = 0
filters = None
if isinstance(rule, Dict):
if 'flags' in rule:
flags = string_to_reg_flags(rule['flags'])
if 're_filters' in rule:
filters = rule['re_filters']
rule = rule['regexp']
for regexp in rule:
r_result = re.search(regexp, row_one, flags)
if not r_result:
continue
try:
result_byte = r_result.group()
result_text = result_byte.decode('utf-8')
except UnicodeDecodeError:
continue
is_filter, extend = is_filter_result(result_byte, filters, flags)
if rule_name == 'BASE64':
is_filter, extend = is_filter_base64(result_byte)
if rule_name == 'JSON WEB TOKEN(JWT)':
is_filter, extend = is_filter_jwt(result_byte)
if is_filter:
continue
ret.append(
{
'file': file_object.__str__(),
'group': rule_name,
'regexp': regexp.decode('utf-8'),
'match': result_text,
'extend': extend,
}
)
if not is_re_all:
# 如果关闭了匹配所有正则组数据且已发现有用数据,则退出循环
return ret
return ret
def gen_file_list(src_path: str, exclude_files: List[str]) -> List[pathlib.Path]:
tar_path = pathlib.Path(src_path)
ret = []
if tar_path.is_file():
ret.append(tar_path)
else:
for filepath in tar_path.glob('**/*'):
is_skip = False
if filepath.is_dir():
continue
filename = filepath.name
for r_exclude in exclude_files:
# 文件名正则匹配,在排除名单中则排除文件
if re.match(r_exclude, filename):
is_skip = True
break
if is_skip:
continue
if filename.endswith('.docx') and not filename.startswith('~$'):
office.docx_handler(filepath)
elif filename.endswith('.xlsx') and not filename.startswith('~$'):
office.xlsx_handler(filepath)
else: | compress.uncompress(filepath, is_error=False, is_recursive=True) | 0 | 2023-12-26 03:30:39+00:00 | 4k |
Zerohertz/Streamlit-Quant | lib/visual.py | [
{
"identifier": "_main",
"path": "lib/layout.py",
"snippet": "def _main():\n layout = _default()\n layout.height = 500 * st.session_state[\"scale\"]\n layout.width = 1000\n layout.xaxis = {\n \"type\": \"category\",\n \"gridcolor\": \"black\",\n \"tickangle\": -45,\n ... | import plotly.graph_objs as go
import streamlit as st
import zerohertzLib as zz
from plotly.subplots import make_subplots
from lib.layout import _main, _transaction
from lib.util import _color | 2,410 | x=st.session_state["cache"]["transaction"]["period"],
name="Period",
marker_color="#0a0a80",
nbinsx=20,
),
row=1,
col=3,
)
fig.update_xaxes(
gridcolor="black",
tickangle=-45,
tickprefix="₩",
tickformat=",",
tickfont={"color": "black"},
showgrid=True,
tickmode="auto",
row=1,
col=1,
)
fig.update_yaxes(
gridcolor="black",
tickfont={"color": "black"},
showgrid=True,
autorange=True,
row=1,
col=1,
)
fig.update_xaxes(
gridcolor="black",
tickangle=-45,
tickfont={"color": "black"},
showgrid=True,
tickmode="auto",
ticksuffix="%",
tickformat=".2f",
row=1,
col=2,
)
fig.update_yaxes(
gridcolor="black",
tickfont={"color": "black"},
showgrid=True,
autorange=True,
row=1,
col=2,
)
fig.update_xaxes(
gridcolor="black",
tickangle=-45,
tickfont={"color": "black"},
showgrid=True,
tickmode="auto",
ticksuffix="days",
row=1,
col=3,
)
fig.update_yaxes(
gridcolor="black",
tickfont={"color": "black"},
showgrid=True,
autorange=True,
row=1,
col=3,
)
return fig
def _vert(xdata, signal, logic, threshold=(-1, 1)):
threshold_sell, threshold_buy = threshold
if logic == 1:
dash = "solid"
color = "rgba(255, 0, 0, 0.2)"
elif logic == -1:
dash = "solid"
color = "rgba(0, 0, 255, 0.2)"
elif logic == 2:
dash = "longdashdot"
color = "rgba(255, 0, 0, 0.2)"
elif logic == -2:
dash = "longdashdot"
color = "rgba(0, 0, 255, 0.2)"
elif signal >= threshold_buy:
dash = "dash"
color = "rgba(255, 0, 0, 0.2)"
elif signal <= threshold_sell:
dash = "dash"
color = "rgba(0, 0, 255, 0.2)"
else:
return None
return go.layout.Shape(
type="line",
x0=xdata,
y0=0,
x1=xdata,
y1=1,
xref="x",
yref="paper",
line={"color": color, "width": 2, "dash": dash},
)
def main():
figs = [st.session_state["cache"]["candle"]]
if st.session_state["cache"]["vis_ma"]:
figs += st.session_state["cache"]["ma"]
if st.session_state["cache"]["vis_bollinger"]:
figs += st.session_state["cache"]["bollinger"]
if st.session_state["cache"]["vis_signals"]:
figs += st.session_state["cache"]["quant"]
st.plotly_chart(
go.Figure(
data=figs,
layout=_main(),
),
use_container_width=True,
)
def transaction():
fig = _backtest()
|
def candle():
data, xdata = st.session_state["cache"]["data"], st.session_state["cache"]["xdata"]
st.session_state["cache"]["candle"] = go.Candlestick(
x=xdata,
open=data.Open,
high=data.High,
low=data.Low,
close=data.Close,
increasing={"line": {"color": "red"}},
decreasing={"line": {"color": "blue"}},
name=st.session_state["cache"]["name"],
)
st.session_state["logger"].info(
f"""[Plot] Candle Chart: {st.session_state["cache"]["name"]} ({st.session_state["cache"]["symbol"]})"""
)
def moving_average():
xdata = st.session_state["cache"]["xdata"]
st.session_state["cache"]["ma"] = []
colors = _color(4, 0.5, "Set1")
for idx, window in enumerate([5, 20, 60, 120]):
st.session_state["cache"]["ma"].append(
go.Scatter(
x=xdata,
y=st.session_state["cache"]["data"]
.iloc[:, :4]
.mean(1)
.rolling(window)
.mean(),
mode="lines",
name=f"MA{window}",
line={"color": colors[idx]},
)
)
st.session_state["logger"].info(
f"""[Plot] Moving Average: {st.session_state["cache"]["name"]} ({st.session_state["cache"]["symbol"]})"""
)
def bollinger_bands():
bands = zz.quant.util._bollinger_bands(st.session_state["cache"]["data"])
xdata = st.session_state["cache"]["xdata"]
st.session_state["cache"]["bollinger"] = []
for col_, name_, color_ in zip(
["lower_band", "middle_band", "upper_band"],
["Lower", "Middle", "Upper"],
["rgba(255, 0, 0, 0.5)", "rgba(0, 255, 0, 0.5)", "rgba(0, 0, 255, 0.5)"],
):
st.session_state["cache"]["bollinger"].append(
go.Scatter(
x=xdata,
y=bands[col_],
mode="lines",
name=name_,
line={"color": color_},
)
)
st.session_state["logger"].info(
f"""[Plot] Bollinger Bands: {st.session_state["cache"]["name"]} ({st.session_state["cache"]["symbol"]})"""
)
def _signal(signals):
st.session_state["cache"]["quant"] = []
if isinstance(signals, zz.quant.Quant):
threshold_sell, threshold_buy = signals.threshold_sell, signals.threshold_buy
signals = signals.signals
else:
threshold_sell, threshold_buy = -1, 1
colors = _color(len(signals.columns))
for idx, col in enumerate(signals.columns[:-2]):
signals[col]
st.session_state["cache"]["quant"].append(
go.Scatter(
x=st.session_state["cache"]["xdata"],
y=signals[col],
yaxis="y3",
mode="lines",
name=zz.quant.util._method2str(col),
line={"color": colors[idx]},
)
)
st.session_state["cache"]["quant"].append(
go.Scatter(
x=st.session_state["cache"]["xdata"],
y=signals.signals,
yaxis="y2",
mode="lines",
name="Signal",
line={"color": "rgba(0, 0, 0, 0.5)"},
)
)
st.session_state["cache"]["transaction_vert"] = []
for day, sig, log in zip(
st.session_state["cache"]["xdata"],
signals.signals,
signals.logic,
):
vert_ = _vert(day, sig, log, (threshold_sell, threshold_buy))
if vert_ is not None:
st.session_state["cache"]["transaction_vert"].append(vert_)
def _backtest():
fig = make_subplots(rows=1, cols=3)
fig.add_trace(
go.Histogram(
x=st.session_state["cache"]["transaction"]["buy"],
name="Buy",
marker_color="red",
nbinsx=20,
),
row=1,
col=1,
)
fig.add_trace(
go.Histogram(
x=st.session_state["cache"]["transaction"]["sell"],
name="Sell",
marker_color="blue",
nbinsx=20,
),
row=1,
col=1,
)
fig.add_trace(
go.Histogram(
x=st.session_state["cache"]["transaction"]["profit"],
name="Profit",
marker_color="#0a800a",
nbinsx=20,
),
row=1,
col=2,
)
fig.add_trace(
go.Histogram(
x=st.session_state["cache"]["transaction"]["period"],
name="Period",
marker_color="#0a0a80",
nbinsx=20,
),
row=1,
col=3,
)
fig.update_xaxes(
gridcolor="black",
tickangle=-45,
tickprefix="₩",
tickformat=",",
tickfont={"color": "black"},
showgrid=True,
tickmode="auto",
row=1,
col=1,
)
fig.update_yaxes(
gridcolor="black",
tickfont={"color": "black"},
showgrid=True,
autorange=True,
row=1,
col=1,
)
fig.update_xaxes(
gridcolor="black",
tickangle=-45,
tickfont={"color": "black"},
showgrid=True,
tickmode="auto",
ticksuffix="%",
tickformat=".2f",
row=1,
col=2,
)
fig.update_yaxes(
gridcolor="black",
tickfont={"color": "black"},
showgrid=True,
autorange=True,
row=1,
col=2,
)
fig.update_xaxes(
gridcolor="black",
tickangle=-45,
tickfont={"color": "black"},
showgrid=True,
tickmode="auto",
ticksuffix="days",
row=1,
col=3,
)
fig.update_yaxes(
gridcolor="black",
tickfont={"color": "black"},
showgrid=True,
autorange=True,
row=1,
col=3,
)
return fig
def _vert(xdata, signal, logic, threshold=(-1, 1)):
threshold_sell, threshold_buy = threshold
if logic == 1:
dash = "solid"
color = "rgba(255, 0, 0, 0.2)"
elif logic == -1:
dash = "solid"
color = "rgba(0, 0, 255, 0.2)"
elif logic == 2:
dash = "longdashdot"
color = "rgba(255, 0, 0, 0.2)"
elif logic == -2:
dash = "longdashdot"
color = "rgba(0, 0, 255, 0.2)"
elif signal >= threshold_buy:
dash = "dash"
color = "rgba(255, 0, 0, 0.2)"
elif signal <= threshold_sell:
dash = "dash"
color = "rgba(0, 0, 255, 0.2)"
else:
return None
return go.layout.Shape(
type="line",
x0=xdata,
y0=0,
x1=xdata,
y1=1,
xref="x",
yref="paper",
line={"color": color, "width": 2, "dash": dash},
)
def main():
figs = [st.session_state["cache"]["candle"]]
if st.session_state["cache"]["vis_ma"]:
figs += st.session_state["cache"]["ma"]
if st.session_state["cache"]["vis_bollinger"]:
figs += st.session_state["cache"]["bollinger"]
if st.session_state["cache"]["vis_signals"]:
figs += st.session_state["cache"]["quant"]
st.plotly_chart(
go.Figure(
data=figs,
layout=_main(),
),
use_container_width=True,
)
def transaction():
fig = _backtest() | fig.update_layout(_transaction()) | 1 | 2023-12-26 11:29:06+00:00 | 4k |
lvyufeng/uie_mindspore | uie_predictor.py | [
{
"identifier": "ErnieMTokenizerFast",
"path": "tokenizer.py",
"snippet": "class ErnieMTokenizerFast(PreTrainedTokenizerFast):\n r\"\"\"\n Construct a \"fast\" ERNIE-M tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.\n This tokenizer inherits from [`PreTrainedTokeni... | import re
import numpy as np
import math
import argparse
import mindspore
from mindnlp.transformers import UIE, UIEM
from tokenizer import ErnieMTokenizerFast
from utils import logger, get_bool_ids_greater_than, get_span, get_id_and_prob, cut_chinese_sent, dbc2sbc
from mindnlp.transformers import BertTokenizerFast | 3,372 |
class MindSporeInferBackend:
def __init__(self,
model_path_prefix,
multilingual=False,
use_fp16=False):
logger.info(">>> [MindSporeInferBackend] Creating Engine ...")
if multilingual:
self.model = UIEM.from_pretrained(model_path_prefix)
else:
self.model = UIE.from_pretrained(model_path_prefix)
self.model.set_train(False)
if use_fp16:
logger.info(
">>> [MindSporeInferBackend] Use FP16 to inference ...")
self.model = self.model.half()
logger.info(">>> [MindSporeInferBackend] Engine Created ...")
def infer(self, input_dict):
for input_name, input_value in input_dict.items():
input_value = mindspore.Tensor(input_value)
input_dict[input_name] = input_value
outputs = self.model(**input_dict)
start_prob, end_prob = outputs[0], outputs[1]
start_prob = start_prob.asnumpy()
end_prob = end_prob.asnumpy()
return start_prob, end_prob
class UIEPredictor(object):
def __init__(self, model, schema, task_path=None, schema_lang="zh", engine='mindspore', position_prob=0.5, max_seq_len=512, batch_size=64, split_sentence=False, use_fp16=False):
if model in ['uie-m-base', 'uie-m-large']:
self._multilingual = True
else:
self._multilingual = False
self._model = model
self._engine = engine
self._task_path = task_path
self._position_prob = position_prob
self._max_seq_len = max_seq_len
self._batch_size = batch_size
self._split_sentence = split_sentence
self._use_fp16 = use_fp16
self._schema_tree = None
self._is_en = True if model in ['uie-base-en'
] or schema_lang == 'en' else False
self.set_schema(schema)
self._prepare_predictor()
def _prepare_predictor(self):
assert self._engine in ['mindspore'], "engine must be mindspore!"
if self._task_path is None:
self._task_path = self._model
if self._multilingual:
|
class MindSporeInferBackend:
def __init__(self,
model_path_prefix,
multilingual=False,
use_fp16=False):
logger.info(">>> [MindSporeInferBackend] Creating Engine ...")
if multilingual:
self.model = UIEM.from_pretrained(model_path_prefix)
else:
self.model = UIE.from_pretrained(model_path_prefix)
self.model.set_train(False)
if use_fp16:
logger.info(
">>> [MindSporeInferBackend] Use FP16 to inference ...")
self.model = self.model.half()
logger.info(">>> [MindSporeInferBackend] Engine Created ...")
def infer(self, input_dict):
for input_name, input_value in input_dict.items():
input_value = mindspore.Tensor(input_value)
input_dict[input_name] = input_value
outputs = self.model(**input_dict)
start_prob, end_prob = outputs[0], outputs[1]
start_prob = start_prob.asnumpy()
end_prob = end_prob.asnumpy()
return start_prob, end_prob
class UIEPredictor(object):
def __init__(self, model, schema, task_path=None, schema_lang="zh", engine='mindspore', position_prob=0.5, max_seq_len=512, batch_size=64, split_sentence=False, use_fp16=False):
if model in ['uie-m-base', 'uie-m-large']:
self._multilingual = True
else:
self._multilingual = False
self._model = model
self._engine = engine
self._task_path = task_path
self._position_prob = position_prob
self._max_seq_len = max_seq_len
self._batch_size = batch_size
self._split_sentence = split_sentence
self._use_fp16 = use_fp16
self._schema_tree = None
self._is_en = True if model in ['uie-base-en'
] or schema_lang == 'en' else False
self.set_schema(schema)
self._prepare_predictor()
def _prepare_predictor(self):
assert self._engine in ['mindspore'], "engine must be mindspore!"
if self._task_path is None:
self._task_path = self._model
if self._multilingual: | self._tokenizer = ErnieMTokenizerFast.from_pretrained( | 0 | 2023-12-25 11:02:24+00:00 | 4k |
Tongjilibo/bert4vector | bert4vector/bert.py | [
{
"identifier": "Base",
"path": "bert4vector/base.py",
"snippet": "class Base:\n \"\"\"\n Interface for similarity compute and search.\n\n In all instances, there is a corpus against which we want to perform the similarity search.\n For each similarity search, the input is a document or a co... | from loguru import logger
from typing import List, Union, Dict
from bert4torch.pipelines import Text2Vec
from bert4vector.base import Base
from bert4vector.utils import cos_sim, dot_score, semantic_search
from sentence_transformers import SentenceTransformer
import numpy as np
import json | 2,118 |
class BertVector(Base):
def __init__(self, model_path, corpus: Union[List[str], Dict[str, str]] = None, **model_config):
"""
Initialize the similarity object.
:param checkpoint_path: 模型权重地址
:param config_path: 权重的config地址
:param corpus: Corpus of documents to use for similarity queries.
:param device: Device (like 'cuda' / 'cpu') to use for the computation.
"""
self.model = self.build_model(model_path, **model_config)
|
class BertVector(Base):
def __init__(self, model_path, corpus: Union[List[str], Dict[str, str]] = None, **model_config):
"""
Initialize the similarity object.
:param checkpoint_path: 模型权重地址
:param config_path: 权重的config地址
:param corpus: Corpus of documents to use for similarity queries.
:param device: Device (like 'cuda' / 'cpu') to use for the computation.
"""
self.model = self.build_model(model_path, **model_config) | self.score_functions = {'cos_sim': cos_sim, 'dot': dot_score} | 2 | 2023-12-25 01:18:52+00:00 | 4k |
SamsungLabs/ShellRecontruction | shell/models/shell_reconstructor_model.py | [
{
"identifier": "UNet",
"path": "shell/models/utils.py",
"snippet": "class UNet(torch.nn.Module):\n def __init__(\n self,\n in_channels=1,\n out_channels=2,\n depth=5,\n wf=6,\n padding=True,\n normalization=None,\n up_mode='upconv',\n us... | import torch
import numpy as np
from shell.models.utils import UNet
from shell.utils import misc | 1,754 | """
Copyright (c) 2023 Samsung Electronics Co., Ltd.
Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0/
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
For conditions of distribution and use, see the accompanying LICENSE file.
"""
class ShellReconstructorModel(torch.nn.Module):
def __init__(
self,
device=None,
):
super().__init__()
self.height = 4
self.max_fms = 256
self.wf = 6
self.xy_size = 1.2
self.in_channels = 3
self.exit_only = False
self.mask_channel = 1
self.depth_channel = 0
self.device = device
| """
Copyright (c) 2023 Samsung Electronics Co., Ltd.
Licensed under the Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License, (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://creativecommons.org/licenses/by-nc/4.0/
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
For conditions of distribution and use, see the accompanying LICENSE file.
"""
class ShellReconstructorModel(torch.nn.Module):
def __init__(
self,
device=None,
):
super().__init__()
self.height = 4
self.max_fms = 256
self.wf = 6
self.xy_size = 1.2
self.in_channels = 3
self.exit_only = False
self.mask_channel = 1
self.depth_channel = 0
self.device = device | self.unet = UNet( | 0 | 2023-12-22 06:25:27+00:00 | 4k |
SAITPublic/BiRF | test.py | [
{
"identifier": "NGPradianceField",
"path": "lib/models/ngp.py",
"snippet": "class NGPradianceField(torch.nn.Module):\n def __init__(\n self,\n aabb: Union[torch.Tensor, List[float]],\n num_dim: int = 3,\n use_viewdirs: bool = True,\n density_activation: Callable = ... | import argparse
import math
import os
import time
import json
import gin
import imageio
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
from typing import *
from datetime import datetime
from torchmetrics import StructuralSimilarityIndexMeasure
from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity
from lib.models.ngp import NGPradianceField
from lib.utils import render_image, set_random_seed, load_dataset, load_occgrid, load_model
from nerfacc import ContractionType, OccupancyGrid | 3,562 |
class ExtendAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
items = getattr(namespace, self.dest) or []
items.extend(values)
setattr(namespace, self.dest, items)
def parse_args():
parser = argparse.ArgumentParser()
parser.register('action', 'extend', ExtendAction)
parser.add_argument(
"configs",
action="append",
help="path to config files",
)
parser.add_argument(
"--bind",
nargs='+',
action="extend",
help="param to bind",
)
parser.add_argument(
"--scene",
type=str,
required=True,
choices=[
# nerf synthetic
"chair",
"drums",
"ficus",
"hotdog",
"lego",
"materials",
"mic",
"ship",
# nsvf synthetic
"Bike",
"Lifestyle",
"Palace",
"Robot",
"Spaceship",
"Steamtrain",
"Toad",
"Wineholder",
# nsvf TankAndTemple
"Barn",
"Caterpillar",
"Family",
"Ignatius",
"Truck",
],
help="which scene to use",
)
parser.add_argument(
"--n_features",
type=int,
default=2,
help="number of features"
)
parser.add_argument(
"--seed",
type=int,
default=0,
help="random seed number"
)
parser.add_argument(
"--ckpt_dir",
type=str,
default=None,
help="path for checkpoint directory"
)
return parser.parse_args()
@gin.configurable
def main(
scene: str,
ckpt_dir: str,
n_features: int=2,
seed: int = 2023,
log_dir: str = "./logs",
prefix: Optional[str] = None,
postfix: Optional[str] = None,
max_steps: int = 20000,
render_n_samples: int = 1024,
test_chunk_size: int = 16384,
aabb: List[float] = [-1.5, -1.5, -1.5, 1.5, 1.5, 1.5],
data_root_fp: str = "data/nerf_synthetic/",
train_split: str = "train",
cone_angle: float = 0.0,
sparsity_weight: float = 2e-5,
render_per_frame: int = -1,
):
# log
save_path = f"{log_dir}/{scene}" if ckpt_dir == None else ckpt_dir
if prefix is not None:
save_path = f"{prefix}_{save_path}"
if postfix is not None:
save_path = f"{save_path}_{postfix}"
save_path = f"{save_path}_{n_features}"
print(f'Evaluation for pretrained model in "{save_path}"')
results = {}
# setup the dataset
test_dataset_kwargs = {}
target_sample_batch_size = 1 << 18
grid_resolution = 128
| """
"Copyright (C) 2021 Samsung Electronics Co. LTD
This software is a property of Samsung Electronics.
No part of this software, either material or conceptual may be copied or distributed, transmitted,
transcribed, stored in a retrieval system, or translated into any human or computer language in any form by any means,
electronic, mechanical, manual or otherwise, or disclosed
to third parties without the express written permission of Samsung Electronics.
(Use of the Software is restricted to non-commercial, personal or academic, research purpose only)"
"""
"""
Modified from NerfAcc (https://github.com/KAIR-BAIR/nerfacc)
Copyright (c) 2022 Ruilong Li, UC Berkeley.
"""
class ExtendAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
items = getattr(namespace, self.dest) or []
items.extend(values)
setattr(namespace, self.dest, items)
def parse_args():
parser = argparse.ArgumentParser()
parser.register('action', 'extend', ExtendAction)
parser.add_argument(
"configs",
action="append",
help="path to config files",
)
parser.add_argument(
"--bind",
nargs='+',
action="extend",
help="param to bind",
)
parser.add_argument(
"--scene",
type=str,
required=True,
choices=[
# nerf synthetic
"chair",
"drums",
"ficus",
"hotdog",
"lego",
"materials",
"mic",
"ship",
# nsvf synthetic
"Bike",
"Lifestyle",
"Palace",
"Robot",
"Spaceship",
"Steamtrain",
"Toad",
"Wineholder",
# nsvf TankAndTemple
"Barn",
"Caterpillar",
"Family",
"Ignatius",
"Truck",
],
help="which scene to use",
)
parser.add_argument(
"--n_features",
type=int,
default=2,
help="number of features"
)
parser.add_argument(
"--seed",
type=int,
default=0,
help="random seed number"
)
parser.add_argument(
"--ckpt_dir",
type=str,
default=None,
help="path for checkpoint directory"
)
return parser.parse_args()
@gin.configurable
def main(
scene: str,
ckpt_dir: str,
n_features: int=2,
seed: int = 2023,
log_dir: str = "./logs",
prefix: Optional[str] = None,
postfix: Optional[str] = None,
max_steps: int = 20000,
render_n_samples: int = 1024,
test_chunk_size: int = 16384,
aabb: List[float] = [-1.5, -1.5, -1.5, 1.5, 1.5, 1.5],
data_root_fp: str = "data/nerf_synthetic/",
train_split: str = "train",
cone_angle: float = 0.0,
sparsity_weight: float = 2e-5,
render_per_frame: int = -1,
):
# log
save_path = f"{log_dir}/{scene}" if ckpt_dir == None else ckpt_dir
if prefix is not None:
save_path = f"{prefix}_{save_path}"
if postfix is not None:
save_path = f"{save_path}_{postfix}"
save_path = f"{save_path}_{n_features}"
print(f'Evaluation for pretrained model in "{save_path}"')
results = {}
# setup the dataset
test_dataset_kwargs = {}
target_sample_batch_size = 1 << 18
grid_resolution = 128
| test_dataset, data_root_fp = load_dataset( | 3 | 2023-12-28 02:08:29+00:00 | 4k |
pkariz/grin-explorer | backend/api/models.py | [
{
"identifier": "NodeV2API",
"path": "backend/api/node.py",
"snippet": "class NodeV2API:\n def __init__(self, node):\n self.foreign_api_url = node.api_url\n self.foreign_api_user = node.api_username\n self.foreign_api_password = node.api_password\n self._cached_blocks = {}... | from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.fields import ArrayField
from django.core.validators import (
MinLengthValidator,
MinValueValidator,
MaxValueValidator,
)
from django.db import models, transaction
from django.db.models import Q
from model_utils.models import TimeStampedModel
from slugify import slugify
from requests.exceptions import (
Timeout as RequestsTimeout,
ConnectionError as RequestsConnectionError,
HTTPError as RequestsHTTPError,
ReadTimeout as RequestsReadTimeout
)
from .node import NodeV2API, NodeError
from .bootstrap import load_blocks
from .models import Block, BlockHeader, Input, Output, Kernel, DramatiqTask, Reorg
from django.contrib.contenttypes.models import ContentType
from decimal import Decimal
from .serializers import DramatiqTaskSerializer
import logging | 2,106 |
logger = logging.getLogger(__name__)
class NodeGroup(models.Model):
"""
NodeGroup represents a group of nodes. These nodes should be on the same
network.:
"""
id = models.BigAutoField(primary_key=True)
# name is probably mainnet, testnet or smth similar
name = models.CharField(max_length=255, unique=True)
# by default that's slug of the name
slug = models.SlugField(max_length=255, unique=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name, to_lower=True)
else:
self.slug = self.slug.lower()
self.full_clean()
return super().save(*args, **kwargs)
class Node(TimeStampedModel):
"""Node on the network. Currently it only supports grin-rust."""
id = models.BigAutoField(primary_key=True)
# name can be whatever
name = models.CharField(max_length=255, unique=True)
# by default that's slug of the name
slug = models.SlugField(max_length=255, unique=True)
group = models.ForeignKey(
NodeGroup, related_name='nodes', on_delete=models.PROTECT)
# foreign api url of the grin-rust node
api_url = models.URLField()
# username of the grin-rust node
api_username = models.CharField(max_length=255)
# foreign api secret of the grin-rust node
api_password = models.CharField(max_length=255)
# if archive is true then we fetch every block when we bootstrap, otherwise
# we fetch only latest 1440 blocks (1 day)
archive = models.BooleanField(default=False)
def __str__(self):
repr = f'{self.name}'
if self.archive:
repr += ' (archive)'
return repr
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name, to_lower=True)
else:
self.slug = self.slug.lower()
return super().save(*args, **kwargs)
def is_reachable(self):
try:
NodeV2API(self).get_tip()
return True
except (
RequestsConnectionError,
RequestsTimeout,
RequestsHTTPError,
RequestsReadTimeout
):
logger.exception('Node unreachable', extra={'node': self.slug})
return False
class Blockchain(TimeStampedModel):
id = models.BigAutoField(primary_key=True)
# testnet, mainnet etc
name = models.CharField(max_length=255, unique=True)
# slug of the name, we use it in url
slug = models.SlugField(max_length=255, unique=True)
# node from which the data is fetched
node = models.ForeignKey(
Node, related_name='blockchains', on_delete=models.PROTECT)
# the default blockchain will be picked on the gui by default
default = models.BooleanField(default=False)
# if fetch_price is False then the shown price will always be 0.
# Testnets and localnets should have this set to false.
fetch_price = models.BooleanField(default=True)
# load_progress shows current % of loaded blocks. If archive is True then
# load_progress will represent % of missing all blocks, otherwise % of
# missing blocks from the latest 1440 blocks
load_progress = models.DecimalField(
max_digits=5,
decimal_places=2,
default=0.0,
validators=[MinValueValidator(0), MaxValueValidator(100)]
)
def __str__(self):
return f'{self.name} - {self.load_progress} [Node<{self.node}>]'
def bootstrap(self, skip_reorg_check=False):
# import here to avoid cyclic import
start_height, end_height = self.get_bootstrap_heights()
load_blocks(self, start_height, end_height, skip_reorg_check)
def get_tip_height(self):
node_api = NodeV2API(self.node)
try:
end_block = node_api.get_tip()['height']
|
logger = logging.getLogger(__name__)
class NodeGroup(models.Model):
"""
NodeGroup represents a group of nodes. These nodes should be on the same
network.:
"""
id = models.BigAutoField(primary_key=True)
# name is probably mainnet, testnet or smth similar
name = models.CharField(max_length=255, unique=True)
# by default that's slug of the name
slug = models.SlugField(max_length=255, unique=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name, to_lower=True)
else:
self.slug = self.slug.lower()
self.full_clean()
return super().save(*args, **kwargs)
class Node(TimeStampedModel):
"""Node on the network. Currently it only supports grin-rust."""
id = models.BigAutoField(primary_key=True)
# name can be whatever
name = models.CharField(max_length=255, unique=True)
# by default that's slug of the name
slug = models.SlugField(max_length=255, unique=True)
group = models.ForeignKey(
NodeGroup, related_name='nodes', on_delete=models.PROTECT)
# foreign api url of the grin-rust node
api_url = models.URLField()
# username of the grin-rust node
api_username = models.CharField(max_length=255)
# foreign api secret of the grin-rust node
api_password = models.CharField(max_length=255)
# if archive is true then we fetch every block when we bootstrap, otherwise
# we fetch only latest 1440 blocks (1 day)
archive = models.BooleanField(default=False)
def __str__(self):
repr = f'{self.name}'
if self.archive:
repr += ' (archive)'
return repr
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name, to_lower=True)
else:
self.slug = self.slug.lower()
return super().save(*args, **kwargs)
def is_reachable(self):
try:
NodeV2API(self).get_tip()
return True
except (
RequestsConnectionError,
RequestsTimeout,
RequestsHTTPError,
RequestsReadTimeout
):
logger.exception('Node unreachable', extra={'node': self.slug})
return False
class Blockchain(TimeStampedModel):
id = models.BigAutoField(primary_key=True)
# testnet, mainnet etc
name = models.CharField(max_length=255, unique=True)
# slug of the name, we use it in url
slug = models.SlugField(max_length=255, unique=True)
# node from which the data is fetched
node = models.ForeignKey(
Node, related_name='blockchains', on_delete=models.PROTECT)
# the default blockchain will be picked on the gui by default
default = models.BooleanField(default=False)
# if fetch_price is False then the shown price will always be 0.
# Testnets and localnets should have this set to false.
fetch_price = models.BooleanField(default=True)
# load_progress shows current % of loaded blocks. If archive is True then
# load_progress will represent % of missing all blocks, otherwise % of
# missing blocks from the latest 1440 blocks
load_progress = models.DecimalField(
max_digits=5,
decimal_places=2,
default=0.0,
validators=[MinValueValidator(0), MaxValueValidator(100)]
)
def __str__(self):
return f'{self.name} - {self.load_progress} [Node<{self.node}>]'
def bootstrap(self, skip_reorg_check=False):
# import here to avoid cyclic import
start_height, end_height = self.get_bootstrap_heights()
load_blocks(self, start_height, end_height, skip_reorg_check)
def get_tip_height(self):
node_api = NodeV2API(self.node)
try:
end_block = node_api.get_tip()['height'] | except NodeError as e: | 1 | 2023-12-24 22:15:11+00:00 | 4k |
Rubics-Xuan/Med-DANet | utils/predict.py | [
{
"identifier": "slide_window_2D_only_output",
"path": "utils/slide_test.py",
"snippet": "def slide_window_2D_only_output(ori_img, crop_size, model):\n\n stride_rate = 1.0/3.0\n stride = int(crop_size * stride_rate) # default = 85\n batch, classes, origin_h, origin_w = ori_img.size()\n\n wi... | import os
import time
import logging
import torch
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import numpy as np
import cv2
import nibabel as nib
import imageio
import scipy.misc
import SimpleITK as sitk
from utils.slide_test import slide_window_2D_only_output, slide_window_2D_out_gflops
| 2,379 | index_list = (ori == j).nonzero()
for i in range(len(index_list)):
batch, height, width, depth = index_list[i]
new_gd[batch, j, height, width, depth] = 1
return new_gd.float()
def tailor_and_concat(x, model):
temp = []
temp.append(x[..., :128, :128, :128])
temp.append(x[..., :128, 112:240, :128])
temp.append(x[..., 112:240, :128, :128])
temp.append(x[..., 112:240, 112:240, :128])
temp.append(x[..., :128, :128, 27:155])
temp.append(x[..., :128, 112:240, 27:155])
temp.append(x[..., 112:240, :128, 27:155])
temp.append(x[..., 112:240, 112:240, 27:155])
y = x.clone()
for i in range(len(temp)):
# temp[i] = model(temp[i])
temp[i] = model(temp[i])
y[..., :128, :128, :128] = temp[0]
y[..., :128, 128:240, :128] = temp[1][..., :, 16:128, :]
y[..., 128:240, :128, :128] = temp[2][..., 16:128, :, :]
y[..., 128:240, 128:240, :128] = temp[3][..., 16:128, 16:128, :]
y[..., :128, :128, 128:155] = temp[4][..., 96:123]
y[..., :128, 128:240, 128:155] = temp[5][..., :, 16:128, 96:123]
y[..., 128:240, :128, 128:155] = temp[6][..., 16:128, :, 96:123]
y[..., 128:240, 128:240, 128:155] = temp[7][..., 16:128, 16:128, 96:123]
return y[..., :155]
def dice_score(o, t, eps=1e-8):
num = 2*(o*t).sum() + eps
den = o.sum() + t.sum() + eps
return num/den
def mIOU(o, t, eps=1e-8):
num = (o*t).sum() + eps
den = (o | t).sum() + eps
return num/den
def softmax_mIOU_score(output, target):
mIOU_score = []
mIOU_score.append(mIOU(o=(output==1),t=(target==1)))
mIOU_score.append(mIOU(o=(output==2),t=(target==2)))
mIOU_score.append(mIOU(o=(output==3),t=(target==4)))
return mIOU_score
def softmax_output_dice(output, target):
ret = []
# whole
o = output > 0; t = target > 0 # ce
ret += dice_score(o, t),
# core
o = (output == 1) | (output == 3)
t = (target == 1) | (target == 4)
ret += dice_score(o, t),
# active
o = (output == 3);t = (target == 4)
ret += dice_score(o, t),
return ret
keys = 'whole', 'core', 'enhancing', 'loss'
def validate_softmax(
valid_loader,
model,
heatmap_use=True,
heatmap_dir='',
savepath='', # when in validation set, you must specify the path to save the 'nii' segmentation results here
names=None, # The names of the patients orderly!
verbose=False,
save_format=None, # ['nii','npy'], use 'nii' as default. Its purpose is for submission.
snapshot=False, # for visualization. Default false. It is recommended to generate the visualized figures.
visual='', # the path to save visualization
postprocess=False, # Default False, when use postprocess, the score of dice_ET would be changed.
valid_in_train=False, # if you are valid when train
):
H, W, T = 240, 240, 160
model.eval()
WT_LIST, TC_LIST, ET_LIST, flops_sample_list = [], [], [], []
runtimes = []
for i, data in enumerate(valid_loader):
print('-------------------------------------------------------------------')
msg = 'Subject {}/{}, '.format(i + 1, len(valid_loader))
if valid_in_train:
target_cpu = data[1][0, :H, :W, :T].numpy()
data = [t.cuda(non_blocking=True) for t in data]
x, target = data[:2]
else:
x = data
x.cuda()
flops_sample = 0
torch.cuda.synchronize() # add the code synchronize() to correctly count the runtime.
start_time = time.time()
x = x[..., :155]
output = x.clone().cpu().detach().numpy()
print('start to predict segmentation!!')
for s in range(155):
x_s = x[..., s].cuda()
x_origin = x_s
|
cudnn.benchmark = True
def one_hot(ori, classes):
batch, h, w, d = ori.size()
new_gd = torch.zeros((batch, classes, h, w, d), dtype=ori.dtype).cuda()
for j in range(classes):
index_list = (ori == j).nonzero()
for i in range(len(index_list)):
batch, height, width, depth = index_list[i]
new_gd[batch, j, height, width, depth] = 1
return new_gd.float()
def tailor_and_concat(x, model):
temp = []
temp.append(x[..., :128, :128, :128])
temp.append(x[..., :128, 112:240, :128])
temp.append(x[..., 112:240, :128, :128])
temp.append(x[..., 112:240, 112:240, :128])
temp.append(x[..., :128, :128, 27:155])
temp.append(x[..., :128, 112:240, 27:155])
temp.append(x[..., 112:240, :128, 27:155])
temp.append(x[..., 112:240, 112:240, 27:155])
y = x.clone()
for i in range(len(temp)):
# temp[i] = model(temp[i])
temp[i] = model(temp[i])
y[..., :128, :128, :128] = temp[0]
y[..., :128, 128:240, :128] = temp[1][..., :, 16:128, :]
y[..., 128:240, :128, :128] = temp[2][..., 16:128, :, :]
y[..., 128:240, 128:240, :128] = temp[3][..., 16:128, 16:128, :]
y[..., :128, :128, 128:155] = temp[4][..., 96:123]
y[..., :128, 128:240, 128:155] = temp[5][..., :, 16:128, 96:123]
y[..., 128:240, :128, 128:155] = temp[6][..., 16:128, :, 96:123]
y[..., 128:240, 128:240, 128:155] = temp[7][..., 16:128, 16:128, 96:123]
return y[..., :155]
def dice_score(o, t, eps=1e-8):
num = 2*(o*t).sum() + eps
den = o.sum() + t.sum() + eps
return num/den
def mIOU(o, t, eps=1e-8):
num = (o*t).sum() + eps
den = (o | t).sum() + eps
return num/den
def softmax_mIOU_score(output, target):
mIOU_score = []
mIOU_score.append(mIOU(o=(output==1),t=(target==1)))
mIOU_score.append(mIOU(o=(output==2),t=(target==2)))
mIOU_score.append(mIOU(o=(output==3),t=(target==4)))
return mIOU_score
def softmax_output_dice(output, target):
ret = []
# whole
o = output > 0; t = target > 0 # ce
ret += dice_score(o, t),
# core
o = (output == 1) | (output == 3)
t = (target == 1) | (target == 4)
ret += dice_score(o, t),
# active
o = (output == 3);t = (target == 4)
ret += dice_score(o, t),
return ret
keys = 'whole', 'core', 'enhancing', 'loss'
def validate_softmax(
valid_loader,
model,
heatmap_use=True,
heatmap_dir='',
savepath='', # when in validation set, you must specify the path to save the 'nii' segmentation results here
names=None, # The names of the patients orderly!
verbose=False,
save_format=None, # ['nii','npy'], use 'nii' as default. Its purpose is for submission.
snapshot=False, # for visualization. Default false. It is recommended to generate the visualized figures.
visual='', # the path to save visualization
postprocess=False, # Default False, when use postprocess, the score of dice_ET would be changed.
valid_in_train=False, # if you are valid when train
):
H, W, T = 240, 240, 160
model.eval()
WT_LIST, TC_LIST, ET_LIST, flops_sample_list = [], [], [], []
runtimes = []
for i, data in enumerate(valid_loader):
print('-------------------------------------------------------------------')
msg = 'Subject {}/{}, '.format(i + 1, len(valid_loader))
if valid_in_train:
target_cpu = data[1][0, :H, :W, :T].numpy()
data = [t.cuda(non_blocking=True) for t in data]
x, target = data[:2]
else:
x = data
x.cuda()
flops_sample = 0
torch.cuda.synchronize() # add the code synchronize() to correctly count the runtime.
start_time = time.time()
x = x[..., :155]
output = x.clone().cpu().detach().numpy()
print('start to predict segmentation!!')
for s in range(155):
x_s = x[..., s].cuda()
x_origin = x_s
| logit, gflops_slice = slide_window_2D_out_gflops(x_s, crop_size=128, model=model) # no flip
| 1 | 2023-12-28 07:26:55+00:00 | 4k |
the-seeds/cardinal | src/cardinal/utils/builder.py | [
{
"identifier": "BaseExtractor",
"path": "src/cardinal/core/extractor/base_extractor.py",
"snippet": "class BaseExtractor(Extractor):\n def __init__(\n self, vectorizer: \"EmbedOpenAI\", storage: \"StringKeyedStorage[Leaf]\", vectorstore: \"VectorStore[LeafIndex]\"\n ) -> None:\n sel... | import os
from pathlib import Path
from ..core.extractor import BaseExtractor
from ..core.logging import get_logger
from ..core.model import EmbedOpenAI
from ..core.schema import Leaf, LeafIndex
from ..core.storage import RedisStorage
from ..core.vectorstore import Chroma | 1,954 |
logger = get_logger(__name__)
def build_database(folder: Path, database: str) -> None:
input_files = []
for path in folder.rglob("*.*"):
if path.is_file() and path.suffix == ".txt":
input_files.append(path)
extractor = BaseExtractor(
vectorizer=EmbedOpenAI(),
|
logger = get_logger(__name__)
def build_database(folder: Path, database: str) -> None:
input_files = []
for path in folder.rglob("*.*"):
if path.is_file() and path.suffix == ".txt":
input_files.append(path)
extractor = BaseExtractor(
vectorizer=EmbedOpenAI(), | storage=RedisStorage[Leaf](name=database), | 3 | 2023-12-26 14:16:40+00:00 | 4k |
datrocity/pond | tests/artifact/test_artifact_registry.py | [
{
"identifier": "Artifact",
"path": "pond/artifact/artifact.py",
"snippet": "class Artifact(ABC):\n \"\"\" Knows how to read and write one type of artifact.\n\n Concrete Artifact implementation should save the metadata with the data if possible,\n so that the artifact is self-contained even if,... | import pytest
from pond.artifact import Artifact
from pond.artifact.artifact_registry import ArtifactRegistry
from pond.exceptions import FormatNotFound, ArtifactNotFound | 2,095 |
class MockArtifactCSV(Artifact):
pass
class MockArtifactExcel(Artifact):
pass
@pytest.fixture()
def registry():
registry = ArtifactRegistry()
registry.register(MockArtifactCSV, list, format='csv')
registry.register(MockArtifactExcel, list, format='xlsx')
return registry
def test_lookup_with_format(registry):
# look-up with format
cls = registry.get_artifact(list, format='csv')
assert cls == MockArtifactCSV
def test_lookup_no_format(registry):
# look-up without format, return last inserted
cls = registry.get_artifact(list)
assert cls == MockArtifactExcel
def test_lookup_format_not_found(registry):
# look-up, format is not registry
with pytest.raises(FormatNotFound) as excinfo:
registry.get_artifact(list, format='foo')
msg = str(excinfo.value)
assert 'foo' in msg
assert 'list' in msg
def test_lookup_data_type_not_found(registry):
# look-up, format is not registry
|
class MockArtifactCSV(Artifact):
pass
class MockArtifactExcel(Artifact):
pass
@pytest.fixture()
def registry():
registry = ArtifactRegistry()
registry.register(MockArtifactCSV, list, format='csv')
registry.register(MockArtifactExcel, list, format='xlsx')
return registry
def test_lookup_with_format(registry):
# look-up with format
cls = registry.get_artifact(list, format='csv')
assert cls == MockArtifactCSV
def test_lookup_no_format(registry):
# look-up without format, return last inserted
cls = registry.get_artifact(list)
assert cls == MockArtifactExcel
def test_lookup_format_not_found(registry):
# look-up, format is not registry
with pytest.raises(FormatNotFound) as excinfo:
registry.get_artifact(list, format='foo')
msg = str(excinfo.value)
assert 'foo' in msg
assert 'list' in msg
def test_lookup_data_type_not_found(registry):
# look-up, format is not registry | with pytest.raises(ArtifactNotFound) as excinfo: | 3 | 2023-12-24 13:05:58+00:00 | 4k |
demirogun/pyethnobiology | pyethnobiology/indices.py | [
{
"identifier": "RadialPlot",
"path": "pyethnobiology/visualization.py",
"snippet": "class RadialPlot:\n \"\"\"\n Creates a radial bar plot to visualize data in a circular layout.\n \"\"\"\n\n def __init__(self,\n data: pd.DataFrame,\n colorbar_title: str,\n ... | import pandas as pd
from pyethnobiology.visualization import RadialPlot
from pyethnobiology.visualization import HeatmapPlot | 2,788 |
class FC:
def __init__(self, data, informant_column="informant", taxon_column="taxon", use_column="ailments_treated"):
"""
Initializes the class with necessary data and column names.
Args:
data (pd.DataFrame): DataFrame containing plant usage information.
informant_column (str, optional): Name of the column containing informant IDs. Defaults to "informant".
taxon_column (str, optional): Name of the column containing species names. Defaults to "taxon".
use_column (str, optional): Name of the column containing plant uses. Defaults to "ailments_treated".
"""
self.data = data
self.informant_column = informant_column
self.taxon_column = taxon_column
self.use_column = use_column
def calculate(self):
"""
Calculates the frequency of citation (FC) for each species.
Returns:
pd.DataFrame: DataFrame containing taxon and FC columns.
"""
# Calculate FC per species by counting unique informants for each taxon
fc_df = (
self.data.groupby(self.taxon_column, observed=True)[self.informant_column]
.nunique()
.reset_index(name="FC")
)
# Sort FC values in descending order
fc_df = fc_df.sort_values(by="FC", ascending=False).reset_index(drop=True)
return fc_df
def save_data(self):
FC_df = self.calculate()
FC_df.to_csv("frequency_of_citation_FC.csv", index=False)
print("Saved to frequency_of_citation_FC.csv")
def plot_radial(self, filename="FC.png", dpi=300, num_row=10, ytick_position="onbar", colors=None, show_colorbar=True):
# Plot radial bar chart
|
class FC:
def __init__(self, data, informant_column="informant", taxon_column="taxon", use_column="ailments_treated"):
"""
Initializes the class with necessary data and column names.
Args:
data (pd.DataFrame): DataFrame containing plant usage information.
informant_column (str, optional): Name of the column containing informant IDs. Defaults to "informant".
taxon_column (str, optional): Name of the column containing species names. Defaults to "taxon".
use_column (str, optional): Name of the column containing plant uses. Defaults to "ailments_treated".
"""
self.data = data
self.informant_column = informant_column
self.taxon_column = taxon_column
self.use_column = use_column
def calculate(self):
"""
Calculates the frequency of citation (FC) for each species.
Returns:
pd.DataFrame: DataFrame containing taxon and FC columns.
"""
# Calculate FC per species by counting unique informants for each taxon
fc_df = (
self.data.groupby(self.taxon_column, observed=True)[self.informant_column]
.nunique()
.reset_index(name="FC")
)
# Sort FC values in descending order
fc_df = fc_df.sort_values(by="FC", ascending=False).reset_index(drop=True)
return fc_df
def save_data(self):
FC_df = self.calculate()
FC_df.to_csv("frequency_of_citation_FC.csv", index=False)
print("Saved to frequency_of_citation_FC.csv")
def plot_radial(self, filename="FC.png", dpi=300, num_row=10, ytick_position="onbar", colors=None, show_colorbar=True):
# Plot radial bar chart | radial_plot = RadialPlot(self.calculate(), "Frequency of Citation (FC)", "FC", num_row, ytick_position, colors, | 0 | 2023-12-25 01:06:51+00:00 | 4k |
Zitronenjoghurt/Colonaut | src/ui/dialogue.py | [
{
"identifier": "DisplayText",
"path": "src/ui/display_text.py",
"snippet": "class DisplayText:\n CHARACTER_SPACES = {\n \"energy\": 0,\n \"lifesupport\": 0,\n \"nexus\": 0,\n \"sensor\": 0,\n \"you\": 0\n }\n\n def __init__(\n self, \n t... | from copy import deepcopy
from src.ui.display_text import DisplayText
from src.constants.custom_exceptions import EventTypeNotSubscribedError
from src.events.event import Event
from src.events.event_bus import EventBus
from src.utils.file_operations import construct_path, file_to_dict, files_in_directory | 2,649 |
EVENT_BUS = EventBus.get_instance()
DIALOGUE_CATEGORIES = ["system"]
DIALOGUE_FILE_PATH = construct_path("src/data/dialogue/{dialogue_category}/")
class Dialogue():
def __init__(self, name: str, display_texts: list[DisplayText]) -> None:
self.name = name
self.display_texts = display_texts
self.current_index = 0
self.action_pending = False
self.actions = {}
self.event_pending = False
self.event = None
self.id_index_map = {}
# Register indices for entry ids
for i, display_text in enumerate(self.display_texts):
id = display_text.get_id()
if id in self.id_index_map:
raise RuntimeError(f"An error occured while loading dialogue '{name}': the id '{id}' of entry at index {i} already exists at index {self.id_index_map[id]}.")
if id != "":
self.id_index_map[id] = i
# Verify action target ids
for i, display_text in enumerate(self.display_texts):
actions = display_text.get_actions()
if len(actions) == 0:
continue
for action_target in actions.values():
if action_target not in self.id_index_map:
raise RuntimeError(f"An error occured while loading dialogue '{name}': the action of the entry at index {i} references an invalid id '{action_target}'.")
def get_texts(self) -> list[DisplayText]:
display_texts = []
i: int = self.current_index
while i < len(self.display_texts):
self.current_index = i
display_text = self.display_texts[i]
display_texts.append(display_text)
event_type = display_text.get_event()
if isinstance(event_type, str):
event_data = display_text.get_event_data()
try:
if isinstance(event_data, dict):
self.event = Event(type=event_type, **event_data)
self.event_pending = True
else:
self.event = Event(type=event_type)
self.event_pending = True
|
EVENT_BUS = EventBus.get_instance()
DIALOGUE_CATEGORIES = ["system"]
DIALOGUE_FILE_PATH = construct_path("src/data/dialogue/{dialogue_category}/")
class Dialogue():
def __init__(self, name: str, display_texts: list[DisplayText]) -> None:
self.name = name
self.display_texts = display_texts
self.current_index = 0
self.action_pending = False
self.actions = {}
self.event_pending = False
self.event = None
self.id_index_map = {}
# Register indices for entry ids
for i, display_text in enumerate(self.display_texts):
id = display_text.get_id()
if id in self.id_index_map:
raise RuntimeError(f"An error occured while loading dialogue '{name}': the id '{id}' of entry at index {i} already exists at index {self.id_index_map[id]}.")
if id != "":
self.id_index_map[id] = i
# Verify action target ids
for i, display_text in enumerate(self.display_texts):
actions = display_text.get_actions()
if len(actions) == 0:
continue
for action_target in actions.values():
if action_target not in self.id_index_map:
raise RuntimeError(f"An error occured while loading dialogue '{name}': the action of the entry at index {i} references an invalid id '{action_target}'.")
def get_texts(self) -> list[DisplayText]:
display_texts = []
i: int = self.current_index
while i < len(self.display_texts):
self.current_index = i
display_text = self.display_texts[i]
display_texts.append(display_text)
event_type = display_text.get_event()
if isinstance(event_type, str):
event_data = display_text.get_event_data()
try:
if isinstance(event_data, dict):
self.event = Event(type=event_type, **event_data)
self.event_pending = True
else:
self.event = Event(type=event_type)
self.event_pending = True | except EventTypeNotSubscribedError: | 1 | 2023-12-22 21:24:33+00:00 | 4k |
akkoaya/ArticleSpider | ArticleSpider/spiders/zhihu.py | [
{
"identifier": "ZhihuQuestionItem",
"path": "ArticleSpider/items.py",
"snippet": "class ZhihuQuestionItem(scrapy.Item):\n # 知乎的问题 item\n question_id = scrapy.Field()\n topics = scrapy.Field()\n url = scrapy.Field()\n title = scrapy.Field()\n # content = scrapy.Field()\n answer_num ... | import datetime
import scrapy
import time
import pickle
import os
import re
import json
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver import Keys
from urllib import parse
from scrapy.loader import ItemLoader
from ..items import ZhihuQuestionItem
from ..items import ZhihuAnswerItem | 2,263 |
class ZhihuSpider(scrapy.Spider):
name = "zhihu"
allowed_domains = ["www.zhihu.com"]
start_urls = ["https://www.zhihu.com"]
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
'referer':'https://www.zhihu.com/',
}
#需要把`feeds?`后面的`cursor`的内容手动过滤掉,到`include`为止;然后把最后的`session_id`的内容也过滤掉
#然后在`question/`,`limit=`,`offset=`后面,一一加上占位符
#因为offset的值实际上就是请求添加的回答数量
start_answer_url = 'https://www.zhihu.com/api/v4/questions/{0}/feeds?include=&limit={1}&offset={2}&order=default&platform=desktop'
def start_requests(self):
if os.path.isfile("..\cookies\zhihucookie.txt") is False:
Chrome_options = webdriver.ChromeOptions()
Chrome_options.add_experimental_option('detach', True)
Chrome_options.add_experimental_option('excludeSwitches', ['enable-automation'])
browser = webdriver.Chrome(options=Chrome_options)
browser.maximize_window()
browser.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
"source": """
Object.defineProperty(navigator, 'webdriver', {
get: () => undefined
})
"""
})
browser.get("https://www.zhihu.com")
time.sleep(1)
browser.find_element(By.CSS_SELECTOR, '.SignFlow-tabs div[class="SignFlow-tab"]').click()
browser.find_element(By.CSS_SELECTOR, '.SignFlow-account input[name="username"]').send_keys("username")
browser.find_element(By.CSS_SELECTOR, '.SignFlow-password input[name="password"]').send_keys("password")
time.sleep(1)
browser.find_element(By.CSS_SELECTOR, '.SignFlow-password input[name="password"]').send_keys(Keys.ENTER)
time.sleep(10)
browser.get("https://www.zhihu.com")
cookies = browser.get_cookies()
time.sleep(1)
cookie_dict = {}
# 要使用cookie_dict,需要在settings.py文件中设置 COOKIES_ENABLED = True
# 这样只有首个Request需要加入cookies参数,后续的Request就不需要在加入cookies参数了,会自动读取cookies
# 写入文件
f = open(
'..\cookies\zhihucookie.txt',
'wb')
pickle.dump(cookies, f) # 把文件pickle.dump到文件夹中
f.close()
for cookie in cookies:
cookie_dict[cookie['name']] = cookie['value']
browser.close()
return [scrapy.Request(url=self.start_urls[0],headers=self.headers, dont_filter=True, cookies=cookie_dict)]
else:
f = open(
'..r\cookies\zhihucookie.txt',
'rb')
cookies = pickle.load(f)
f.close()
cookie_dict = {}
for cookie in cookies:
cookie_dict[cookie['name']] = cookie['value']
return [scrapy.Request(url=self.start_urls[0],headers = self.headers, dont_filter=True, cookies=cookie_dict)]
def parse(self, response):
# 获取所有页面内的url,并完整化
all_urls = response.css('a::attr(href)').extract()
all_urls = [parse.urljoin(response.url, url) for url in all_urls]
#进一步过滤掉不是url的内容
all_urls = filter(lambda x: True if x.startswith('https') else False, all_urls)
# 提取知乎问题的url
for url in all_urls:
match_obj = re.match('(.*zhihu.com/question/(\d+))(/|$).*',url) #`$`表示结尾符
if match_obj:
request_url = match_obj.group(1)
question_id = match_obj.group(2)
#如果满足re.match的要求,则下载页面,并交给parse_question函数处理
yield scrapy.Request(url=request_url,meta={'question_id':question_id} ,headers=self.headers, callback=self.parse_question)
#break 这里break出去方便调试,就不会有源源不断的请求过来
else:
#如果不满足,则进一步跟踪
yield scrapy.Request(url=url,headers=self.headers, callback=self.parse) #调试的时候也可以把这个注释掉
#pass
def parse_question(self, response):
#处理question
|
class ZhihuSpider(scrapy.Spider):
name = "zhihu"
allowed_domains = ["www.zhihu.com"]
start_urls = ["https://www.zhihu.com"]
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
'referer':'https://www.zhihu.com/',
}
#需要把`feeds?`后面的`cursor`的内容手动过滤掉,到`include`为止;然后把最后的`session_id`的内容也过滤掉
#然后在`question/`,`limit=`,`offset=`后面,一一加上占位符
#因为offset的值实际上就是请求添加的回答数量
start_answer_url = 'https://www.zhihu.com/api/v4/questions/{0}/feeds?include=&limit={1}&offset={2}&order=default&platform=desktop'
def start_requests(self):
if os.path.isfile("..\cookies\zhihucookie.txt") is False:
Chrome_options = webdriver.ChromeOptions()
Chrome_options.add_experimental_option('detach', True)
Chrome_options.add_experimental_option('excludeSwitches', ['enable-automation'])
browser = webdriver.Chrome(options=Chrome_options)
browser.maximize_window()
browser.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
"source": """
Object.defineProperty(navigator, 'webdriver', {
get: () => undefined
})
"""
})
browser.get("https://www.zhihu.com")
time.sleep(1)
browser.find_element(By.CSS_SELECTOR, '.SignFlow-tabs div[class="SignFlow-tab"]').click()
browser.find_element(By.CSS_SELECTOR, '.SignFlow-account input[name="username"]').send_keys("username")
browser.find_element(By.CSS_SELECTOR, '.SignFlow-password input[name="password"]').send_keys("password")
time.sleep(1)
browser.find_element(By.CSS_SELECTOR, '.SignFlow-password input[name="password"]').send_keys(Keys.ENTER)
time.sleep(10)
browser.get("https://www.zhihu.com")
cookies = browser.get_cookies()
time.sleep(1)
cookie_dict = {}
# 要使用cookie_dict,需要在settings.py文件中设置 COOKIES_ENABLED = True
# 这样只有首个Request需要加入cookies参数,后续的Request就不需要在加入cookies参数了,会自动读取cookies
# 写入文件
f = open(
'..\cookies\zhihucookie.txt',
'wb')
pickle.dump(cookies, f) # 把文件pickle.dump到文件夹中
f.close()
for cookie in cookies:
cookie_dict[cookie['name']] = cookie['value']
browser.close()
return [scrapy.Request(url=self.start_urls[0],headers=self.headers, dont_filter=True, cookies=cookie_dict)]
else:
f = open(
'..r\cookies\zhihucookie.txt',
'rb')
cookies = pickle.load(f)
f.close()
cookie_dict = {}
for cookie in cookies:
cookie_dict[cookie['name']] = cookie['value']
return [scrapy.Request(url=self.start_urls[0],headers = self.headers, dont_filter=True, cookies=cookie_dict)]
def parse(self, response):
# 获取所有页面内的url,并完整化
all_urls = response.css('a::attr(href)').extract()
all_urls = [parse.urljoin(response.url, url) for url in all_urls]
#进一步过滤掉不是url的内容
all_urls = filter(lambda x: True if x.startswith('https') else False, all_urls)
# 提取知乎问题的url
for url in all_urls:
match_obj = re.match('(.*zhihu.com/question/(\d+))(/|$).*',url) #`$`表示结尾符
if match_obj:
request_url = match_obj.group(1)
question_id = match_obj.group(2)
#如果满足re.match的要求,则下载页面,并交给parse_question函数处理
yield scrapy.Request(url=request_url,meta={'question_id':question_id} ,headers=self.headers, callback=self.parse_question)
#break 这里break出去方便调试,就不会有源源不断的请求过来
else:
#如果不满足,则进一步跟踪
yield scrapy.Request(url=url,headers=self.headers, callback=self.parse) #调试的时候也可以把这个注释掉
#pass
def parse_question(self, response):
#处理question | item_loader = ItemLoader(item=ZhihuQuestionItem(), response=response) | 0 | 2023-12-29 15:05:22+00:00 | 4k |
ApiaoSamaa/task2 | train.py | [
{
"identifier": "Encoder",
"path": "models.py",
"snippet": "class Encoder(nn.Module):\n \"\"\"\n Encoder.\n \"\"\"\n\n def __init__(self, encoded_image_size=14):\n super(Encoder, self).__init__()\n self.enc_image_size = encoded_image_size\n\n resnet = torchvision.models.... | import time
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence
from models import Encoder, DecoderWithAttention
from datasets import *
from utils import *
from nltk.translate.bleu_score import corpus_bleu | 2,564 |
# Data parameters
data_folder = './captioned_data' # folder with data files saved by create_input_files.py
data_name = 'flickr30k' # base name shared by data files
min_word_freq = 5
# Model parameters
emb_dim = 512 # dimension of word embeddings
attention_dim = 512 # dimension of attention linear layers
decoder_dim = 512 # dimension of decoder RNN
dropout = 0.5
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # sets device for model and PyTorch tensors
cudnn.benchmark = True # set to true only if inputs to model are fixed size; otherwise lot of computational overhead
# Training parameters
start_epoch = 0
epochs = 120 # number of epochs to train for (if early stopping is not triggered)
epochs_since_improvement = 0 # keeps track of number of epochs since there's been an improvement in validation BLEU
batch_size = 1
workers = 4 # for data-loading; right now, only 1 works with h5py
encoder_lr = 1e-4 # learning rate for encoder if fine-tuning
decoder_lr = 4e-4 # learning rate for decoder
grad_clip = 5. # clip gradients at an absolute value of
alpha_c = 1. # regularization parameter for 'doubly stochastic attention', as in the paper
best_bleu4 = 0. # BLEU-4 score right now
print_freq = 100 # print training/validation stats every __ batches
fine_tune_encoder = False # fine-tune encoder?
checkpoint = None # path to checkpoint, None if none
cap_per_img = 5
def main():
"""
Training and validation.
"""
global best_bleu4, epochs_since_improvement, checkpoint, start_epoch, fine_tune_encoder, data_name, word_map
# Read word map
word_map_file = os.path.join(data_folder, 'WORDMAP_' + data_name + '.json')
with open(word_map_file, 'r') as j:
word_map = json.load(j)
# Initialize / load checkpoint
if checkpoint is None:
decoder = DecoderWithAttention(attention_dim=attention_dim,
embed_dim=emb_dim,
decoder_dim=decoder_dim,
vocab_size=len(word_map),
dropout=dropout).to(device)
decoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, decoder.parameters()),
lr=decoder_lr)
|
# Data parameters
data_folder = './captioned_data' # folder with data files saved by create_input_files.py
data_name = 'flickr30k' # base name shared by data files
min_word_freq = 5
# Model parameters
emb_dim = 512 # dimension of word embeddings
attention_dim = 512 # dimension of attention linear layers
decoder_dim = 512 # dimension of decoder RNN
dropout = 0.5
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # sets device for model and PyTorch tensors
cudnn.benchmark = True # set to true only if inputs to model are fixed size; otherwise lot of computational overhead
# Training parameters
start_epoch = 0
epochs = 120 # number of epochs to train for (if early stopping is not triggered)
epochs_since_improvement = 0 # keeps track of number of epochs since there's been an improvement in validation BLEU
batch_size = 1
workers = 4 # for data-loading; right now, only 1 works with h5py
encoder_lr = 1e-4 # learning rate for encoder if fine-tuning
decoder_lr = 4e-4 # learning rate for decoder
grad_clip = 5. # clip gradients at an absolute value of
alpha_c = 1. # regularization parameter for 'doubly stochastic attention', as in the paper
best_bleu4 = 0. # BLEU-4 score right now
print_freq = 100 # print training/validation stats every __ batches
fine_tune_encoder = False # fine-tune encoder?
checkpoint = None # path to checkpoint, None if none
cap_per_img = 5
def main():
"""
Training and validation.
"""
global best_bleu4, epochs_since_improvement, checkpoint, start_epoch, fine_tune_encoder, data_name, word_map
# Read word map
word_map_file = os.path.join(data_folder, 'WORDMAP_' + data_name + '.json')
with open(word_map_file, 'r') as j:
word_map = json.load(j)
# Initialize / load checkpoint
if checkpoint is None:
decoder = DecoderWithAttention(attention_dim=attention_dim,
embed_dim=emb_dim,
decoder_dim=decoder_dim,
vocab_size=len(word_map),
dropout=dropout).to(device)
decoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, decoder.parameters()),
lr=decoder_lr) | encoder = Encoder().to(device) | 0 | 2023-12-27 11:48:51+00:00 | 4k |
YYJeffrey/july_server | app/api/v2/comment.py | [
{
"identifier": "db",
"path": "app/model/base.py",
"snippet": "class BaseModel(db.Model):\n def __getitem__(self, key):\n def init_on_load(self):\n def __set_fields(self):\n def _set_fields(self):\n def keys(self):\n def hide(self, *keys):\n def append(self, *keys):\n def status(... | from flask import g
from app import db
from app.lib.enums import MessageCategory
from app.lib.exception import NotFound, Success, Deleted, Created
from app.lib.red_print import RedPrint
from app.lib.schema import paginator_schema
from app.lib.token import auth
from app.model.comment import Comment
from app.model.message import Message
from app.model.topic import Topic
from app.service.comment import create_comment_verify, get_comment_list
from app.validator.forms import CreateCommentValidator, GetCommentListValidator | 3,240 | # -*- coding: utf-8 -*-
"""
:copyright: (c) 2023 by Jeffrey.
:license: Apache 2.0, see LICENSE for more details.
"""
api = RedPrint('comment')
@api.route('/', methods=['GET'])
def get_comments():
"""
获取评论列表
"""
form = GetCommentListValidator()
topic_id = form.get_data('topic_id')
user_id = form.get_data('user_id')
comments = get_comment_list(topic_id=topic_id, user_id=user_id)
return Success(data=paginator_schema(comments))
@api.route('/', methods=['POST'])
@auth.login_required
def create_comment():
"""
发布评论
"""
form = CreateCommentValidator()
create_comment_verify(form=form)
return Created()
@api.route('/<comment_id>', methods=['DELETE'])
@auth.login_required
def delete_comment(comment_id):
"""
删除评论
"""
comment = Comment.get_one(id=comment_id)
if comment is None:
raise NotFound(msg='评论不存在')
topic = Topic.get_one(id=comment.topic_id)
| # -*- coding: utf-8 -*-
"""
:copyright: (c) 2023 by Jeffrey.
:license: Apache 2.0, see LICENSE for more details.
"""
api = RedPrint('comment')
@api.route('/', methods=['GET'])
def get_comments():
"""
获取评论列表
"""
form = GetCommentListValidator()
topic_id = form.get_data('topic_id')
user_id = form.get_data('user_id')
comments = get_comment_list(topic_id=topic_id, user_id=user_id)
return Success(data=paginator_schema(comments))
@api.route('/', methods=['POST'])
@auth.login_required
def create_comment():
"""
发布评论
"""
form = CreateCommentValidator()
create_comment_verify(form=form)
return Created()
@api.route('/<comment_id>', methods=['DELETE'])
@auth.login_required
def delete_comment(comment_id):
"""
删除评论
"""
comment = Comment.get_one(id=comment_id)
if comment is None:
raise NotFound(msg='评论不存在')
topic = Topic.get_one(id=comment.topic_id) | exist_msg = Message.get_one(category=MessageCategory.COMMENT, user_id=topic.user_id, action_user_id=g.user.id, | 1 | 2023-12-30 04:08:35+00:00 | 4k |
lchen1019/Image_Cropper | ISAT/widgets/canvas.py | [
{
"identifier": "Polygon",
"path": "ISAT/widgets/polygon.py",
"snippet": "class Polygon(QtWidgets.QGraphicsPolygonItem):\n def __init__(self):\n super(Polygon, self).__init__(parent=None)\n self.line_width = 0\n self.hover_alpha = 150\n self.nohover_alpha = 80\n sel... | from PyQt5 import QtWidgets, QtGui, QtCore
from PyQt5.QtGui import QPen, QBrush, QColor
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QGraphicsScene, QGraphicsView, QGraphicsRectItem
from ISAT.widgets.polygon import Polygon, Vertex, PromptPoint
from ISAT.configs import STATUSMode, CLICKMode, DRAWMode, CONTOURMode
from PIL import Image
import numpy as np
import cv2
import time # 拖动鼠标描点 | 3,307 | # -*- coding: utf-8 -*-
# @Author : LG
class AnnotationScene(QtWidgets.QGraphicsScene):
def __init__(self, mainwindow):
super(AnnotationScene, self).__init__()
self.mainwindow = mainwindow
self.image_item:QtWidgets.QGraphicsPixmapItem = None
self.image_data = None
self.current_graph:QGraphicsRectItem = None
self.mode = STATUSMode.VIEW
| # -*- coding: utf-8 -*-
# @Author : LG
class AnnotationScene(QtWidgets.QGraphicsScene):
def __init__(self, mainwindow):
super(AnnotationScene, self).__init__()
self.mainwindow = mainwindow
self.image_item:QtWidgets.QGraphicsPixmapItem = None
self.image_data = None
self.current_graph:QGraphicsRectItem = None
self.mode = STATUSMode.VIEW | self.click = CLICKMode.POSITIVE | 4 | 2023-12-24 16:19:16+00:00 | 4k |
farhad-dalirani/MultiObjectTracking-YOLO-NAS-DeepSORT | tracking_by_detection.py | [
{
"identifier": "DrawingTrackingInfo",
"path": "drawing_util.py",
"snippet": "class DrawingTrackingInfo:\n\n def __init__(self):\n self.trajectory_len = 50\n self.max_color = 150\n self.tracks_id_colors = np.random.randint(low=0, high=255, size=(self.max_color, 3), dtype='uint8')... | import os
import torch
import cv2 as cv
from super_gradients.training import models
from drawing_util import DrawingTrackingInfo
from deep_sort_pytorch_master.deep_sort import DeepSort
from data_util import ReadData
from datetime import datetime | 2,697 |
def tracking_by_detection(config):
# Check if the output folder exists
if not os.path.exists(config['output_folder']):
# Create it
os.makedirs(config['output_folder'])
print(f"Folder '{config['output_folder']}' created.")
# Select device
if config['detector_device'] == 'cuda:0':
device_detector = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
elif config['detector_device'] == 'cpu':
device_detector = torch.device('cpu')
else:
raise ValueError('Requested device name is not correct!')
print('Device: {}'.format(device_detector))
# Object for reading data
ds_object = ReadData(input_type=config['input_type'],
input_image_dir=config['images_folder'],
input_video_path=config['input_video_path'])
ds_generator = ds_object.data_generator()
# Load YOLO-NAS-Medium for object detecion
detector = models.get(model_name=config['detector_arch'], pretrained_weights=config['pretrained_dataset']).to(device_detector)
# Tracking info drawing object
|
def tracking_by_detection(config):
# Check if the output folder exists
if not os.path.exists(config['output_folder']):
# Create it
os.makedirs(config['output_folder'])
print(f"Folder '{config['output_folder']}' created.")
# Select device
if config['detector_device'] == 'cuda:0':
device_detector = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
elif config['detector_device'] == 'cpu':
device_detector = torch.device('cpu')
else:
raise ValueError('Requested device name is not correct!')
print('Device: {}'.format(device_detector))
# Object for reading data
ds_object = ReadData(input_type=config['input_type'],
input_image_dir=config['images_folder'],
input_video_path=config['input_video_path'])
ds_generator = ds_object.data_generator()
# Load YOLO-NAS-Medium for object detecion
detector = models.get(model_name=config['detector_arch'], pretrained_weights=config['pretrained_dataset']).to(device_detector)
# Tracking info drawing object | draw_obj = DrawingTrackingInfo() | 0 | 2023-12-26 15:22:02+00:00 | 4k |
harvestingmoon/StableVisionBot | bot.py | [
{
"identifier": "BackEnd",
"path": "backend.py",
"snippet": "class BackEnd:\n def __init__(self,model_id) -> None:\n self.model = None\n self.curr_picture = None \n self.final_img = None\n self.call = {1:False,2:False}\n self.model_id = (model_id if model_id else \"... | from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove, Update,InlineKeyboardButton,InlineKeyboardMarkup
from telegram.ext import (
Application,
CommandHandler,
ContextTypes,
ConversationHandler,
MessageHandler,
CallbackQueryHandler,
filters,
CallbackContext,
)
from backend import BackEnd,post_process
from PIL import Image
import numpy as np
import json
import logging
import yaml
import emoji
import asyncio | 2,207 | # Simple telegram bot that takes uses stable diffusion
''' Importing YAML'''
with open("config .yaml", "r") as f:
config = yaml.safe_load(f)
model = config['model']
api_key = config['API_KEY']
''' States for bot'''
ONE,TWO,DOCUMENT,PHOTO = range(4)
START,T2IMG,T2IMG2,IMG2IMG,IMG2IMG2,OUTPUT= range(6)
''' User logging'''
logging.basicConfig(
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level = logging.INFO
)
logger = logging.getLogger(__name__)
''' Important pipeline for stable diffusion'''
engine = BackEnd(model)
''' Function for bot'''
async def startcommand(update,context):
keyboard = [
[ InlineKeyboardButton("Text To Image", callback_data = str(ONE)),
InlineKeyboardButton("Image Editing",callback_data = str(TWO))],
]
reply_markup = InlineKeyboardMarkup(keyboard)
await update.message.reply_text("StableVision Bot v1.1 \U0001F308\
\nby harvestingm00n \U0001F343\
\n\n\nPlease select an option!",reply_markup = reply_markup)
return START
async def info(update: Update, _:CallbackContext) -> None:
await update.message.reply_text("StableVision Bot v1.1 \U0001F308\
\n\n Technical Info: \
\n\n Model: Stable Diffusion v2.0 \U0001F3A8 \
\n\n Pipeline: HuggingFace \U0001F917 \
\n\n GPU: min. 6gb VRAM \
")
async def text_to_image(update: Update, _: CallbackContext) -> int:
query = update.callback_query
query.answer()
await query.edit_message_text("Please input the text you want to convert to image \u2328\
\nIf you are using this in a group chat please reply to the bot \
\n\nNote: This may take a while...")
return T2IMG
async def image_to_image(update: Update, _: CallbackContext) -> int:
query = update.callback_query
query.answer()
await query.edit_message_text("Please input the image you want to edit \U0001F5BC\
\n\nIf you are using this in a group chat please reply to the bot")
return IMG2IMG
async def img2img(update: Update, context: CallbackContext) -> None:
user_photo = await update.message.photo[-1].get_file()
array = await user_photo.download_as_bytearray()
engine.change_picture(array) # temporarily storing the photo there ( will always override no matter what)
await update.message.reply_text("Please input the text you want to convert to image \u2328\
\nIf you are using this in a group chat please reply to the bot \
\n\nNote: This may take a while...")
return IMG2IMG2
async def t2img(update: Update, context: CallbackContext) -> None:
user_input = update.message.text
logging.info("User of text:",user_input)
pipe = engine.call_engine(1)
await update.message.reply_text(emoji.emojize("Painting! This may take awhile... :paintbrush:"))
images = pipe(prompt = user_input,num_inference_steps = 50).images[0]
engine.final_(images)
keyboard = [[InlineKeyboardButton("Send as Document",callback_data = str(DOCUMENT)),
InlineKeyboardButton("Send as Photo",callback_data = str(PHOTO))]]
reply_markup = InlineKeyboardMarkup(keyboard)
await update.message.reply_text("Please select an option! \
\n\n Note: Sending as photo have lower quality",reply_markup = reply_markup)
# await context.bot.send_document(chat_id=update.effective_chat.id,document = final_images ,filename ='photo.pdf', caption = f"Generated Image of {user_input}")
# await context.bot.send_photo(chat_id=update.effective_chat.id,photo = final_images ,filename ='photo.jpg', caption = f"Generated Image of {user_input}")
return OUTPUT
async def t2img2(update: Update, context: CallbackContext) -> None:
user_input = update.message.text
logging.info("User of text:",user_input)
pipe = engine.call_engine(2)
await update.message.reply_text(emoji.emojize("Painting! This may take awhile... :paintbrush:"))
images = pipe(prompt = user_input,image = engine.get_picture()).images[0]
engine.final_(images)
keyboard = [[InlineKeyboardButton("Send as Document",callback_data = str(DOCUMENT)),
InlineKeyboardButton("Send as Photo",callback_data = str(PHOTO))]]
reply_markup = InlineKeyboardMarkup(keyboard)
await update.message.reply_text("Please select an option! \
\n\n Note: Sending as photo have lower quality",reply_markup = reply_markup)
# await context.bot.send_document(chat_id=update.effective_chat.id,document = final_images ,filename ='photo.pdf', caption = f"Generated Image of {user_input}")
# await context.bot.send_photo(chat_id=update.effective_chat.id,photo = final_images ,filename ='photo.jpg', caption = f"Generated Image of {user_input}")
return OUTPUT
async def document(update: Update, context: CallbackContext) -> None:
query = update.callback_query
final_image = engine.get_final()
| # Simple telegram bot that takes uses stable diffusion
''' Importing YAML'''
with open("config .yaml", "r") as f:
config = yaml.safe_load(f)
model = config['model']
api_key = config['API_KEY']
''' States for bot'''
ONE,TWO,DOCUMENT,PHOTO = range(4)
START,T2IMG,T2IMG2,IMG2IMG,IMG2IMG2,OUTPUT= range(6)
''' User logging'''
logging.basicConfig(
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level = logging.INFO
)
logger = logging.getLogger(__name__)
''' Important pipeline for stable diffusion'''
engine = BackEnd(model)
''' Function for bot'''
async def startcommand(update,context):
keyboard = [
[ InlineKeyboardButton("Text To Image", callback_data = str(ONE)),
InlineKeyboardButton("Image Editing",callback_data = str(TWO))],
]
reply_markup = InlineKeyboardMarkup(keyboard)
await update.message.reply_text("StableVision Bot v1.1 \U0001F308\
\nby harvestingm00n \U0001F343\
\n\n\nPlease select an option!",reply_markup = reply_markup)
return START
async def info(update: Update, _:CallbackContext) -> None:
await update.message.reply_text("StableVision Bot v1.1 \U0001F308\
\n\n Technical Info: \
\n\n Model: Stable Diffusion v2.0 \U0001F3A8 \
\n\n Pipeline: HuggingFace \U0001F917 \
\n\n GPU: min. 6gb VRAM \
")
async def text_to_image(update: Update, _: CallbackContext) -> int:
query = update.callback_query
query.answer()
await query.edit_message_text("Please input the text you want to convert to image \u2328\
\nIf you are using this in a group chat please reply to the bot \
\n\nNote: This may take a while...")
return T2IMG
async def image_to_image(update: Update, _: CallbackContext) -> int:
query = update.callback_query
query.answer()
await query.edit_message_text("Please input the image you want to edit \U0001F5BC\
\n\nIf you are using this in a group chat please reply to the bot")
return IMG2IMG
async def img2img(update: Update, context: CallbackContext) -> None:
user_photo = await update.message.photo[-1].get_file()
array = await user_photo.download_as_bytearray()
engine.change_picture(array) # temporarily storing the photo there ( will always override no matter what)
await update.message.reply_text("Please input the text you want to convert to image \u2328\
\nIf you are using this in a group chat please reply to the bot \
\n\nNote: This may take a while...")
return IMG2IMG2
async def t2img(update: Update, context: CallbackContext) -> None:
user_input = update.message.text
logging.info("User of text:",user_input)
pipe = engine.call_engine(1)
await update.message.reply_text(emoji.emojize("Painting! This may take awhile... :paintbrush:"))
images = pipe(prompt = user_input,num_inference_steps = 50).images[0]
engine.final_(images)
keyboard = [[InlineKeyboardButton("Send as Document",callback_data = str(DOCUMENT)),
InlineKeyboardButton("Send as Photo",callback_data = str(PHOTO))]]
reply_markup = InlineKeyboardMarkup(keyboard)
await update.message.reply_text("Please select an option! \
\n\n Note: Sending as photo have lower quality",reply_markup = reply_markup)
# await context.bot.send_document(chat_id=update.effective_chat.id,document = final_images ,filename ='photo.pdf', caption = f"Generated Image of {user_input}")
# await context.bot.send_photo(chat_id=update.effective_chat.id,photo = final_images ,filename ='photo.jpg', caption = f"Generated Image of {user_input}")
return OUTPUT
async def t2img2(update: Update, context: CallbackContext) -> None:
user_input = update.message.text
logging.info("User of text:",user_input)
pipe = engine.call_engine(2)
await update.message.reply_text(emoji.emojize("Painting! This may take awhile... :paintbrush:"))
images = pipe(prompt = user_input,image = engine.get_picture()).images[0]
engine.final_(images)
keyboard = [[InlineKeyboardButton("Send as Document",callback_data = str(DOCUMENT)),
InlineKeyboardButton("Send as Photo",callback_data = str(PHOTO))]]
reply_markup = InlineKeyboardMarkup(keyboard)
await update.message.reply_text("Please select an option! \
\n\n Note: Sending as photo have lower quality",reply_markup = reply_markup)
# await context.bot.send_document(chat_id=update.effective_chat.id,document = final_images ,filename ='photo.pdf', caption = f"Generated Image of {user_input}")
# await context.bot.send_photo(chat_id=update.effective_chat.id,photo = final_images ,filename ='photo.jpg', caption = f"Generated Image of {user_input}")
return OUTPUT
async def document(update: Update, context: CallbackContext) -> None:
query = update.callback_query
final_image = engine.get_final() | final_image = post_process(final_image,to_doc = True) | 1 | 2023-12-22 07:25:26+00:00 | 4k |
khabbazan/Mattermost-Subscriptions | mattermostsub/schema.py | [
{
"identifier": "UserCreate",
"path": "apps/account/gql/mutations.py",
"snippet": "class UserCreate(graphene.Mutation):\n \"\"\"\n UserCreate Mutation\n This mutation is used to create a new user. It involves creating a user in the Django system\n and additionally in an external system (Matt... | import graphene
import graphql_jwt
from apps.account.gql.mutations import UserCreate
from apps.account.gql.mutations import UserGetToken
from apps.account.gql.queries import UserList
from apps.chat.gql.mutations import ChannelCreate
from apps.chat.gql.mutations import TextMessageSend
from apps.chat.gql.queries import ChannelList
from apps.chat.gql.queries import GetMessageList
from apps.chat.gql.subscriptions import OnNewChatMessage | 3,582 |
class Query(UserList, ChannelList, GetMessageList):
pass
class Mutation(graphene.ObjectType):
|
class Query(UserList, ChannelList, GetMessageList):
pass
class Mutation(graphene.ObjectType): | user_create = UserCreate.Field() | 0 | 2023-12-25 11:40:56+00:00 | 4k |
Hatins/DEOE | modules/data/genx.py | [
{
"identifier": "custom_collate_rnd",
"path": "data/genx_utils/collate.py",
"snippet": "def custom_collate_rnd(batch: Any):\n samples = batch\n # NOTE: We do not really need the worker id for map style datasets (rnd) but we still provide the id for consistency\n worker_info = torch.utils.data.g... | from functools import partial
from typing import Any, Dict, Optional, Union
from omegaconf import DictConfig
from torch.utils.data import DataLoader, Dataset
from data.genx_utils.collate import custom_collate_rnd, custom_collate_streaming
from data.genx_utils.dataset_rnd import build_random_access_dataset, get_weighted_random_sampler, CustomConcatDataset
from data.genx_utils.dataset_streaming import build_streaming_dataset
from data.utils.spatial import get_dataloading_hw
from data.utils.types import DatasetMode, DatasetSamplingMode
import math
import pytorch_lightning as pl | 2,416 |
def get_dataloader_kwargs(dataset: Union[Dataset, CustomConcatDataset],
sampling_mode: DatasetSamplingMode,
dataset_mode: DatasetMode,
dataset_config: DictConfig,
batch_size: int,
num_workers: int) -> Dict[str, Any]:
if dataset_mode == DatasetMode.TRAIN:
if sampling_mode == DatasetSamplingMode.STREAM:
return dict(
dataset=dataset,
batch_size=None,
shuffle=False, # Done already in the streaming datapipe
num_workers=num_workers,
pin_memory=False,
drop_last=False, # Cannot be done with streaming datapipes
collate_fn=custom_collate_streaming,
)
if sampling_mode == DatasetSamplingMode.RANDOM:
use_weighted_rnd_sampling = dataset_config.train.random.weighted_sampling
sampler = get_weighted_random_sampler(dataset) if use_weighted_rnd_sampling else None
return dict(
dataset=dataset,
batch_size=batch_size,
shuffle=sampler is None,
sampler=sampler,
num_workers=num_workers,
pin_memory=False,
drop_last=True, # Maintain the same batch size for logging
collate_fn=custom_collate_rnd,
)
raise NotImplementedError
elif dataset_mode in (DatasetMode.VALIDATION, DatasetMode.TESTING):
if sampling_mode == DatasetSamplingMode.STREAM:
return dict(
dataset=dataset,
batch_size=None,
shuffle=False,
num_workers=num_workers,
pin_memory=False,
drop_last=False, # Cannot be done with streaming datapipes
collate_fn=custom_collate_streaming,
)
if sampling_mode == DatasetSamplingMode.RANDOM:
return dict(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=False,
drop_last=True, # Maintain the same batch size for logging
collate_fn=custom_collate_rnd,
)
raise NotImplementedError
raise NotImplementedError
class DataModule(pl.LightningDataModule):
def __init__(self,
dataset_config: DictConfig,
num_workers_train: int,
num_workers_eval: int,
batch_size_train: int,
batch_size_eval: int):
super().__init__()
assert num_workers_train >= 0
assert num_workers_eval >= 0
assert batch_size_train >= 1
assert batch_size_eval >= 1
self.dataset_config = dataset_config
self.train_sampling_mode = dataset_config.train.sampling
self.eval_sampling_mode = dataset_config.eval.sampling
assert self.train_sampling_mode in iter(DatasetSamplingMode)
assert self.eval_sampling_mode in (DatasetSamplingMode.STREAM, DatasetSamplingMode.RANDOM)
# In DDP all configs are per process/GPU (num_workers, batch_size, ...).
self.overall_batch_size_train = batch_size_train
self.overall_batch_size_eval = batch_size_eval
self.overall_num_workers_train = num_workers_train
self.overall_num_workers_eval = num_workers_eval
if self.eval_sampling_mode == DatasetSamplingMode.STREAM:
self.build_eval_dataset = partial(build_streaming_dataset,
batch_size=self.overall_batch_size_eval,
num_workers=self.overall_num_workers_eval)
elif self.eval_sampling_mode == DatasetSamplingMode.RANDOM:
|
def get_dataloader_kwargs(dataset: Union[Dataset, CustomConcatDataset],
sampling_mode: DatasetSamplingMode,
dataset_mode: DatasetMode,
dataset_config: DictConfig,
batch_size: int,
num_workers: int) -> Dict[str, Any]:
if dataset_mode == DatasetMode.TRAIN:
if sampling_mode == DatasetSamplingMode.STREAM:
return dict(
dataset=dataset,
batch_size=None,
shuffle=False, # Done already in the streaming datapipe
num_workers=num_workers,
pin_memory=False,
drop_last=False, # Cannot be done with streaming datapipes
collate_fn=custom_collate_streaming,
)
if sampling_mode == DatasetSamplingMode.RANDOM:
use_weighted_rnd_sampling = dataset_config.train.random.weighted_sampling
sampler = get_weighted_random_sampler(dataset) if use_weighted_rnd_sampling else None
return dict(
dataset=dataset,
batch_size=batch_size,
shuffle=sampler is None,
sampler=sampler,
num_workers=num_workers,
pin_memory=False,
drop_last=True, # Maintain the same batch size for logging
collate_fn=custom_collate_rnd,
)
raise NotImplementedError
elif dataset_mode in (DatasetMode.VALIDATION, DatasetMode.TESTING):
if sampling_mode == DatasetSamplingMode.STREAM:
return dict(
dataset=dataset,
batch_size=None,
shuffle=False,
num_workers=num_workers,
pin_memory=False,
drop_last=False, # Cannot be done with streaming datapipes
collate_fn=custom_collate_streaming,
)
if sampling_mode == DatasetSamplingMode.RANDOM:
return dict(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=False,
drop_last=True, # Maintain the same batch size for logging
collate_fn=custom_collate_rnd,
)
raise NotImplementedError
raise NotImplementedError
class DataModule(pl.LightningDataModule):
def __init__(self,
dataset_config: DictConfig,
num_workers_train: int,
num_workers_eval: int,
batch_size_train: int,
batch_size_eval: int):
super().__init__()
assert num_workers_train >= 0
assert num_workers_eval >= 0
assert batch_size_train >= 1
assert batch_size_eval >= 1
self.dataset_config = dataset_config
self.train_sampling_mode = dataset_config.train.sampling
self.eval_sampling_mode = dataset_config.eval.sampling
assert self.train_sampling_mode in iter(DatasetSamplingMode)
assert self.eval_sampling_mode in (DatasetSamplingMode.STREAM, DatasetSamplingMode.RANDOM)
# In DDP all configs are per process/GPU (num_workers, batch_size, ...).
self.overall_batch_size_train = batch_size_train
self.overall_batch_size_eval = batch_size_eval
self.overall_num_workers_train = num_workers_train
self.overall_num_workers_eval = num_workers_eval
if self.eval_sampling_mode == DatasetSamplingMode.STREAM:
self.build_eval_dataset = partial(build_streaming_dataset,
batch_size=self.overall_batch_size_eval,
num_workers=self.overall_num_workers_eval)
elif self.eval_sampling_mode == DatasetSamplingMode.RANDOM: | self.build_eval_dataset = build_random_access_dataset | 2 | 2023-12-29 04:04:34+00:00 | 4k |
Enthusiasm23/primkit | src/primkit/utils/DataFetcher.py | [
{
"identifier": "WebDriverUtility",
"path": "src/primkit/utils/SiteSeleniumer.py",
"snippet": "class WebDriverUtility:\n \"\"\"\n Utility class to interact with a website using Selenium WebDriver.\n \"\"\"\n\n def __init__(self, url, driver_path=CHROME_DRIVER_PATH, timeout=DEFAULT_TIMEOUT):\... | import logging
import requests
from selenium.common.exceptions import WebDriverException
from ..utils.SiteSeleniumer import WebDriverUtility
from ..utils.SiteRequester import get_site_data
from ..config import PRIMER_URL | 3,493 |
logger = logging.getLogger(__name__)
def fetch_web_data(url=PRIMER_URL, method='requests'):
"""
Fetches headers, cookies, and a token from a given URL using either requests or selenium.
Parameters:
- url (str): URL to fetch data from.
- method (str): Method to use for fetching data ('requests' or 'selenium').
Returns:
- tuple: (headers, cookies, token) if successful, otherwise raises an error.
"""
logger.info(f"Fetching web data from {url} using {method}")
headers = {}
cookies = {}
token = None
if method == 'requests':
# requests fetching logic
try:
# (requests fetching implementation)
|
logger = logging.getLogger(__name__)
def fetch_web_data(url=PRIMER_URL, method='requests'):
"""
Fetches headers, cookies, and a token from a given URL using either requests or selenium.
Parameters:
- url (str): URL to fetch data from.
- method (str): Method to use for fetching data ('requests' or 'selenium').
Returns:
- tuple: (headers, cookies, token) if successful, otherwise raises an error.
"""
logger.info(f"Fetching web data from {url} using {method}")
headers = {}
cookies = {}
token = None
if method == 'requests':
# requests fetching logic
try:
# (requests fetching implementation) | headers, cookies, token = get_site_data(url) | 1 | 2023-12-25 14:12:46+00:00 | 4k |
Wangyuhao06/2022-adhoc | src/env.py | [
{
"identifier": "random_waypoint",
"path": "pymobility/models/mobility.py",
"snippet": "def random_waypoint(*args, **kwargs):\n return iter(RandomWaypoint(*args, **kwargs))"
},
{
"identifier": "Node",
"path": "src/node.py",
"snippet": "class Node(object):\n def __init__(self,id_nod... | import random
import numpy as np
from math import log2, log10
from queue import Queue
from pymobility.models.mobility import random_waypoint
from src.node import Node
from src.packet import Packet
from src.parameter import *
from src.transtask import Trans_task | 1,927 |
class Environment():
#初始化环境
def __init__(self):
#初始数据-最大节点数
self.node_max=NODE_MAX
self.node_space_size=NODE_MAX
self.node_moving_area=MOV_AREA
#初始化二维平面
self.geo_area = random_waypoint(self.node_max, dimensions=(MOV_AREA, MOV_AREA), velocity=(10, 15), wt_max=1.0)
self.position=0
#初始化随机相邻矩阵
self.topology = np.zeros((self.node_space_size,self.node_space_size))
self.topology[0:self.node_max,0:self.node_max] = np.random.randint(0,2,(self.node_max,self.node_max))
for i in range(self.node_max):
self.topology[i,i] = 1
for j in range(self.node_max):
#构建双向图
if self.topology[i,j] == 1:
self.topology[j,i] = 1
#初始化节点动作空间
self.topology_actSpace=[]
#初始化频谱块元组-----(0,[])表示(占用与否,[占用transtaskID列表])
self.freqB_list=([],[],[],[],[],[],[],[],[],[]) #((0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]))
self.freqB_use_history=([],[],[],[],[],[],[],[],[],[])
#初始化传输事件列表
self.trans_task_ID_inTR=[]
self.trans_task_list=[]
self.trans_task_cnt=0 # id计数器
#初始化包列表
self.amount_poisson_list = np.random.poisson(lam=LAMDA,size=MAX_TIME)#包数量初始化
self.size_normal_list = ((np.random.normal(0,1,MAX_TIME*2)*16+16)//8)*8#包大小初始化
self.pack_use_cnt=0#包序号计数器
self.packets_list=[]#包列表
self.packets_live_id=[]
#初始化节点列表
self.node_list=[]
self.live_node_ID_list=[]
for i in range(self.node_max):
|
class Environment():
#初始化环境
def __init__(self):
#初始数据-最大节点数
self.node_max=NODE_MAX
self.node_space_size=NODE_MAX
self.node_moving_area=MOV_AREA
#初始化二维平面
self.geo_area = random_waypoint(self.node_max, dimensions=(MOV_AREA, MOV_AREA), velocity=(10, 15), wt_max=1.0)
self.position=0
#初始化随机相邻矩阵
self.topology = np.zeros((self.node_space_size,self.node_space_size))
self.topology[0:self.node_max,0:self.node_max] = np.random.randint(0,2,(self.node_max,self.node_max))
for i in range(self.node_max):
self.topology[i,i] = 1
for j in range(self.node_max):
#构建双向图
if self.topology[i,j] == 1:
self.topology[j,i] = 1
#初始化节点动作空间
self.topology_actSpace=[]
#初始化频谱块元组-----(0,[])表示(占用与否,[占用transtaskID列表])
self.freqB_list=([],[],[],[],[],[],[],[],[],[]) #((0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]),(0,[]))
self.freqB_use_history=([],[],[],[],[],[],[],[],[],[])
#初始化传输事件列表
self.trans_task_ID_inTR=[]
self.trans_task_list=[]
self.trans_task_cnt=0 # id计数器
#初始化包列表
self.amount_poisson_list = np.random.poisson(lam=LAMDA,size=MAX_TIME)#包数量初始化
self.size_normal_list = ((np.random.normal(0,1,MAX_TIME*2)*16+16)//8)*8#包大小初始化
self.pack_use_cnt=0#包序号计数器
self.packets_list=[]#包列表
self.packets_live_id=[]
#初始化节点列表
self.node_list=[]
self.live_node_ID_list=[]
for i in range(self.node_max): | locals()['node_'+str(i)] = Node(i) | 1 | 2023-12-30 09:35:30+00:00 | 4k |
alshubati99/BeamEye | detectionCode.py | [
{
"identifier": "label_map_util",
"path": "detectionElements/label_map_util.py",
"snippet": "def create_category_index(categories):\ndef convert_label_map_to_categories(label_map,\n max_num_classes,\n use_display_name=True):\ndef load... | import time
import numpy as np
import os
import tensorflow as tf
import cv2
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
from shutil import copy2
from time import sleep
from detectionElements import label_map_util, drawing_tools
from detectionElements.resizeVideo import resize_video | 1,667 |
def frames_to_video(fps, output_folder='videoOut//'):
output_folder += "//"
image_folder = 'videoFrames//'
video_name = output_folder + 'video.avi'
images = [img for img in os.listdir(image_folder) if img.endswith(".jpg")]
# print(images)
frame = cv2.imread(os.path.join(image_folder, images[0]))
height, width, layers = frame.shape
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video = cv2.VideoWriter(video_name, fourcc, fps, (width, height))
# print(video_name)
for image in images:
video.write(cv2.imread(os.path.join(image_folder, image)))
copy2(video_name, video_name[:-4] + "_copy.avi")
cv2.destroyAllWindows()
video.release()
User.frames_progress = 100
User.finished = True
User.output_video = video_name
def detect():
User.finished = False
while User.wait:
sleep(2)
print("waiting")
else:
print("Got Video")
User.wait = True
high_res = User.high_res
if not high_res:
resized_video = resize_video(User.input_video_path)
if resized_video:
cap = cv2.VideoCapture(resized_video)
print("resized video")
User.input_video_path = resized_video
print(cap)
else:
cap = cv2.VideoCapture(User.input_video_path)
print("didnt resize video")
# User.input_video_path = None
print(User.input_video_path)
print(cap)
with open("uiElements//userSettings.txt", "r", encoding="utf-8") as f:
settings = [line.split(" ")[-1] for line in f.read().split("\n")]
include_labels, include_crowd, include_accuracy, pedestrian_color, crowd_color, output_path = settings
output_path = output_path.replace("_SPACE_", " ")
include_labels, include_crowd, include_accuracy, pedestrian_color, crowd_color = int(include_labels), int(
include_crowd), int(include_accuracy), int(pedestrian_color), int(crowd_color)
# in settings {1: "blue", 2: "purple", 3: "red", 4: "orange", 5: "yellow", 6: "green"}
color_dict = {1: "#0094FF", 2: "#FF00F6", 3: "red", 4: "#FF6A00", 5: "yellow", 6: "#26FF5C"} # {1: "red", 2: "purple", 3: "blue", 4: "DodgerBlue", 5: "DeepSkyBlue", 6: "#00FF0C"}
pedestrian_color = color_dict[pedestrian_color]
crowd_color = color_dict[crowd_color]
frame_rate = int(cap.get(cv2.CAP_PROP_FPS))
fps = cap.get(cv2.CAP_PROP_FPS)
video_frames_total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frame_count = 0
video_frame_count = 0
pedestrian_count_second, crowd_count_second = [], []
# pedestrian_count_frame, crowd_count_frame = 0, 0
def begin():
empty_frames_folder()
nonlocal frame_count, video_frame_count, pedestrian_color, crowd_color, cap
with detection_graph.as_default():
with tf.compat.v1.Session(graph=detection_graph) as sess:
frames_left = 100 # percent
pedestrian_count_frame, crowd_count_frame = 0, 0
increment_progress_bar = 0
while True:
frame_count += 1
if increment_progress_bar >= 2.28 * video_frames_total / 100:
frames_left -= 2.28
User.frames_progress += 2
print(
f"Processed {100 - frames_left:.2f}% of frames, {frames_left:.2f}% left. Progress Bar: {User.frames_progress}")
increment_progress_bar = 0
# if frame_count == frame_rate+1:
success, image_np = cap.read()
if not success:
print('EOF')
break
# flatten image using numpy
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
|
tf.compat.v1.disable_v2_behavior()
tf.TF_ENABLE_ONEDNN_OPTS = 0
# sys.path.insert(0, 'detectionElements')
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.compat.v1.GraphDef()
tf.compat.v1.disable_v2_behavior()
with tf.io.gfile.GFile('detectionElements/_detectionModel.pb', 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
NUM_CLASSES = 50
label_map = label_map_util.load_labelmap('detectionElements/person_label_map.pbtxt')
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def save_frame(frame_number, frame):
frame_name = "0" * (5 - len(str(frame_number))) + str(frame_number)
cv2.imwrite(f"videoFrames//frame_{frame_name}.jpg", frame)
def empty_frames_folder():
for frame in os.listdir("videoFrames"):
os.remove(f"videoFrames//{frame}")
def frames_to_video(fps, output_folder='videoOut//'):
output_folder += "//"
image_folder = 'videoFrames//'
video_name = output_folder + 'video.avi'
images = [img for img in os.listdir(image_folder) if img.endswith(".jpg")]
# print(images)
frame = cv2.imread(os.path.join(image_folder, images[0]))
height, width, layers = frame.shape
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video = cv2.VideoWriter(video_name, fourcc, fps, (width, height))
# print(video_name)
for image in images:
video.write(cv2.imread(os.path.join(image_folder, image)))
copy2(video_name, video_name[:-4] + "_copy.avi")
cv2.destroyAllWindows()
video.release()
User.frames_progress = 100
User.finished = True
User.output_video = video_name
def detect():
User.finished = False
while User.wait:
sleep(2)
print("waiting")
else:
print("Got Video")
User.wait = True
high_res = User.high_res
if not high_res:
resized_video = resize_video(User.input_video_path)
if resized_video:
cap = cv2.VideoCapture(resized_video)
print("resized video")
User.input_video_path = resized_video
print(cap)
else:
cap = cv2.VideoCapture(User.input_video_path)
print("didnt resize video")
# User.input_video_path = None
print(User.input_video_path)
print(cap)
with open("uiElements//userSettings.txt", "r", encoding="utf-8") as f:
settings = [line.split(" ")[-1] for line in f.read().split("\n")]
include_labels, include_crowd, include_accuracy, pedestrian_color, crowd_color, output_path = settings
output_path = output_path.replace("_SPACE_", " ")
include_labels, include_crowd, include_accuracy, pedestrian_color, crowd_color = int(include_labels), int(
include_crowd), int(include_accuracy), int(pedestrian_color), int(crowd_color)
# in settings {1: "blue", 2: "purple", 3: "red", 4: "orange", 5: "yellow", 6: "green"}
color_dict = {1: "#0094FF", 2: "#FF00F6", 3: "red", 4: "#FF6A00", 5: "yellow", 6: "#26FF5C"} # {1: "red", 2: "purple", 3: "blue", 4: "DodgerBlue", 5: "DeepSkyBlue", 6: "#00FF0C"}
pedestrian_color = color_dict[pedestrian_color]
crowd_color = color_dict[crowd_color]
frame_rate = int(cap.get(cv2.CAP_PROP_FPS))
fps = cap.get(cv2.CAP_PROP_FPS)
video_frames_total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frame_count = 0
video_frame_count = 0
pedestrian_count_second, crowd_count_second = [], []
# pedestrian_count_frame, crowd_count_frame = 0, 0
def begin():
empty_frames_folder()
nonlocal frame_count, video_frame_count, pedestrian_color, crowd_color, cap
with detection_graph.as_default():
with tf.compat.v1.Session(graph=detection_graph) as sess:
frames_left = 100 # percent
pedestrian_count_frame, crowd_count_frame = 0, 0
increment_progress_bar = 0
while True:
frame_count += 1
if increment_progress_bar >= 2.28 * video_frames_total / 100:
frames_left -= 2.28
User.frames_progress += 2
print(
f"Processed {100 - frames_left:.2f}% of frames, {frames_left:.2f}% left. Progress Bar: {User.frames_progress}")
increment_progress_bar = 0
# if frame_count == frame_rate+1:
success, image_np = cap.read()
if not success:
print('EOF')
break
# flatten image using numpy
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection. | _, tmp_pedestrian_count_frame, tmp_crowd_count_frame = drawing_tools.draw_boxes_on_image_array( | 1 | 2023-12-26 18:39:25+00:00 | 4k |
davidsvy/fractal_video | src/transform/build.py | [
{
"identifier": "Transform_Contrastive",
"path": "src/transform/contrastive.py",
"snippet": "class Transform_Contrastive(nn.Module):\n\n def __init__(\n self, img_size, easy_k=False, randaugment_m=9, randaugment_n=2, n_steps=1,\n back=True, prob_perspective=0.0, prob_scale=0.0, prob_shi... | from src.transform.contrastive import Transform_Contrastive
from src.transform.mixup import Mixup_Background
from src.transform.compose import (
transform_inner_train,
transform_inner_val,
Transform_Outer_Train,
) | 3,453 |
def transform_contrastive(config):
n_steps = max(1, config.AUG.EPOCHS_CURRICULUM * config.STEPS_PER_EPOCH)
transform = Transform_Contrastive(
img_size=config.DATA.IMG_SIZE,
easy_k=config.AUG.SSL_EASY_K,
randaugment_m=config.AUG.AUTO_AUGMENT_M,
randaugment_n=config.AUG.AUTO_AUGMENT_N,
n_steps=n_steps,
back=config.AUG.TYPE_MIXUP == 'back',
prob_perspective=config.AUG.PROB_PERSPECTIVE,
prob_scale=config.AUG.PROB_SCALE,
prob_shift=config.AUG.PROB_SHIFT,
prob_clone=config.AUG.PROB_CLONE,
prob_zoom=config.AUG.PROB_ZOOM,
prob_shake=config.AUG.PROB_SHAKE,
)
return transform
##########################################################################
##########################################################################
# OTHER
##########################################################################
##########################################################################
def transform_inner(is_train, config):
if is_train:
transform = transform_inner_train(
crop_size=config.DATA.IMG_SIZE,
min_scale=config.AUG.MIN_SCALE,
interp=config.AUG.INTERP,
)
else:
|
def transform_contrastive(config):
n_steps = max(1, config.AUG.EPOCHS_CURRICULUM * config.STEPS_PER_EPOCH)
transform = Transform_Contrastive(
img_size=config.DATA.IMG_SIZE,
easy_k=config.AUG.SSL_EASY_K,
randaugment_m=config.AUG.AUTO_AUGMENT_M,
randaugment_n=config.AUG.AUTO_AUGMENT_N,
n_steps=n_steps,
back=config.AUG.TYPE_MIXUP == 'back',
prob_perspective=config.AUG.PROB_PERSPECTIVE,
prob_scale=config.AUG.PROB_SCALE,
prob_shift=config.AUG.PROB_SHIFT,
prob_clone=config.AUG.PROB_CLONE,
prob_zoom=config.AUG.PROB_ZOOM,
prob_shake=config.AUG.PROB_SHAKE,
)
return transform
##########################################################################
##########################################################################
# OTHER
##########################################################################
##########################################################################
def transform_inner(is_train, config):
if is_train:
transform = transform_inner_train(
crop_size=config.DATA.IMG_SIZE,
min_scale=config.AUG.MIN_SCALE,
interp=config.AUG.INTERP,
)
else: | transform = transform_inner_val( | 3 | 2023-12-27 19:43:45+00:00 | 4k |
camenduru/ELYZA-japanese-Llama-2-13b-instruct-demo-hf | app.py | [
{
"identifier": "get_input_token_length",
"path": "model_vllm.py",
"snippet": "def get_input_token_length(message: str, chat_history: list[tuple[str, str]], system_prompt: str) -> int:\n prompt = get_prompt(message, chat_history, system_prompt)\n input_ids = tokenizer([prompt], return_tensors='np'... | from datetime import datetime, timezone, timedelta
from typing import AsyncGenerator
from botocore.config import Config
from model_vllm import get_input_token_length, run
import os
import time
import uuid
import asyncio
import logging
import textwrap
import boto3
import gradio as gr
import pandas as pd
import torch | 2,365 |
logging.basicConfig(encoding='utf-8', level=logging.ERROR)
logger = logging.getLogger(__name__)
JST = timezone(timedelta(hours=+9), 'JST')
DEFAULT_SYSTEM_PROMPT = 'あなたは誠実で優秀な日本人のアシスタントです。'
MAX_MAX_NEW_TOKENS = 2048
DEFAULT_MAX_NEW_TOKENS = 512
MAX_INPUT_TOKEN_LENGTH = 4000
TITLE = '# ELYZA-japanese-Llama-2-13b-instruct'
DESCRIPTION = """
## 概要
- [ELYZA-japanese-Llama-2-13b](https://huggingface.co/elyza/ELYZA-japanese-Llama-2-13b)は、[株式会社ELYZA](https://elyza.ai/) (以降「当社」と呼称) が[Llama2](https://ai.meta.com/llama/)をベースとして日本語能力を拡張するために事前学習を行ったモデルです。
- [ELYZA-japanese-Llama-2-13b-instruct](https://huggingface.co/elyza/ELYZA-japanese-Llama-2-13b-instruct)は ELYZA-japanese-Llama-2-13b を弊社独自のinstruction tuning用データセットで事後学習したモデルです。
- 本デモではこのモデルが使われています。
- 詳細は[Blog記事](https://note.com/elyza/n/n5d42686b60b7)を参照してください。
- 本デモではこちらの[Llama-2 7B Chat](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat)のデモをベースにさせていただきました。
## License
- Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved.
## 免責事項
- 当社は、本デモについて、ユーザーの特定の目的に適合すること、期待する機能・正確性・有用性を有すること、出力データが完全性、正確性、有用性を有すること、ユーザーによる本サービスの利用がユーザーに適用のある法令等に適合すること、継続的に利用できること、及び不具合が生じないことについて、明示又は黙示を問わず何ら保証するものではありません。
- 当社は、本デモに関してユーザーが被った損害等につき、一切の責任を負わないものとし、ユーザーはあらかじめこれを承諾するものとします。
- 当社は、本デモを通じて、ユーザー又は第三者の個人情報を取得することを想定しておらず、ユーザーは、本デモに、ユーザー又は第三者の氏名その他の特定の個人を識別することができる情報等を入力等してはならないものとします。
- ユーザーは、当社が本デモ又は本デモに使用されているアルゴリズム等の改善・向上に使用することを許諾するものとします。
## 本デモで入力・出力されたデータの記録・利用に関して
- 本デモで入力・出力されたデータは当社にて記録させていただき、今後の本デモ又は本デモに使用されているアルゴリズム等の改善・向上に使用させていただく場合がございます。
## We are hiring!
- 当社 (株式会社ELYZA) に興味のある方、ぜひお話ししませんか?
- 機械学習エンジニア・インターン募集: https://open.talentio.com/r/1/c/elyza/homes/2507
- カジュアル面談はこちら: https://chillout.elyza.ai/elyza-japanese-llama2-13b
"""
_format_example = lambda s: textwrap.dedent(s).strip()
examples = list(map(_format_example, [
"""
「キムチプリン」という新商品を考えています。この商品に対する世間の意見として想像されるものを箇条書きで3つ教えて
""",
"""
「メタリック」から「気分上々」までが自然につながるように、あいだの単語を連想してください。
""",
"""
自律神経や副交感神経が乱れている、とはどのような状態ですか?科学的に教えて
""",
"""
日本国内で観光に行きたいと思っています。東京、名古屋、大阪、京都、福岡の特徴を表にまとめてください。
列名は「都道府県」「おすすめスポット」「おすすめグルメ」にしてください。
""",
"""
私の考えた創作料理について、想像して説明を書いてください。
1. トマトマット
2. 餃子風もやし炒め
3. おにぎりすぎ
""",
]))
if not torch.cuda.is_available():
DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
try:
s3 = boto3.client(
's3',
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],
region_name=os.environ['S3_REGION'],
config=Config(
connect_timeout=5,
read_timeout=5,
retries={
'mode': 'standard',
'total_max_attempts': 3,
},
),
)
except Exception:
logger.exception('Failed to initialize S3 client')
def clear_and_save_textbox(message: str) -> tuple[str, str]:
return '', message
def display_input(message: str, history: list[tuple[str, str]]) -> list[tuple[str, str]]:
history.append((message, ''))
return history
def delete_prev_fn(history: list[tuple[str, str]]) -> tuple[list[tuple[str, str]], str]:
try:
message, _ = history.pop()
except IndexError:
message = ''
return history, message or ''
async def generate(
message: str,
history_with_input: list[tuple[str, str]],
system_prompt: str,
max_new_tokens: int,
temperature: float,
top_p: float,
top_k: int,
do_sample: bool,
repetition_penalty: float,
) -> AsyncGenerator[list[tuple[str, str]], None]:
if max_new_tokens > MAX_MAX_NEW_TOKENS:
raise ValueError
history = history_with_input[:-1]
|
logging.basicConfig(encoding='utf-8', level=logging.ERROR)
logger = logging.getLogger(__name__)
JST = timezone(timedelta(hours=+9), 'JST')
DEFAULT_SYSTEM_PROMPT = 'あなたは誠実で優秀な日本人のアシスタントです。'
MAX_MAX_NEW_TOKENS = 2048
DEFAULT_MAX_NEW_TOKENS = 512
MAX_INPUT_TOKEN_LENGTH = 4000
TITLE = '# ELYZA-japanese-Llama-2-13b-instruct'
DESCRIPTION = """
## 概要
- [ELYZA-japanese-Llama-2-13b](https://huggingface.co/elyza/ELYZA-japanese-Llama-2-13b)は、[株式会社ELYZA](https://elyza.ai/) (以降「当社」と呼称) が[Llama2](https://ai.meta.com/llama/)をベースとして日本語能力を拡張するために事前学習を行ったモデルです。
- [ELYZA-japanese-Llama-2-13b-instruct](https://huggingface.co/elyza/ELYZA-japanese-Llama-2-13b-instruct)は ELYZA-japanese-Llama-2-13b を弊社独自のinstruction tuning用データセットで事後学習したモデルです。
- 本デモではこのモデルが使われています。
- 詳細は[Blog記事](https://note.com/elyza/n/n5d42686b60b7)を参照してください。
- 本デモではこちらの[Llama-2 7B Chat](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat)のデモをベースにさせていただきました。
## License
- Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved.
## 免責事項
- 当社は、本デモについて、ユーザーの特定の目的に適合すること、期待する機能・正確性・有用性を有すること、出力データが完全性、正確性、有用性を有すること、ユーザーによる本サービスの利用がユーザーに適用のある法令等に適合すること、継続的に利用できること、及び不具合が生じないことについて、明示又は黙示を問わず何ら保証するものではありません。
- 当社は、本デモに関してユーザーが被った損害等につき、一切の責任を負わないものとし、ユーザーはあらかじめこれを承諾するものとします。
- 当社は、本デモを通じて、ユーザー又は第三者の個人情報を取得することを想定しておらず、ユーザーは、本デモに、ユーザー又は第三者の氏名その他の特定の個人を識別することができる情報等を入力等してはならないものとします。
- ユーザーは、当社が本デモ又は本デモに使用されているアルゴリズム等の改善・向上に使用することを許諾するものとします。
## 本デモで入力・出力されたデータの記録・利用に関して
- 本デモで入力・出力されたデータは当社にて記録させていただき、今後の本デモ又は本デモに使用されているアルゴリズム等の改善・向上に使用させていただく場合がございます。
## We are hiring!
- 当社 (株式会社ELYZA) に興味のある方、ぜひお話ししませんか?
- 機械学習エンジニア・インターン募集: https://open.talentio.com/r/1/c/elyza/homes/2507
- カジュアル面談はこちら: https://chillout.elyza.ai/elyza-japanese-llama2-13b
"""
_format_example = lambda s: textwrap.dedent(s).strip()
examples = list(map(_format_example, [
"""
「キムチプリン」という新商品を考えています。この商品に対する世間の意見として想像されるものを箇条書きで3つ教えて
""",
"""
「メタリック」から「気分上々」までが自然につながるように、あいだの単語を連想してください。
""",
"""
自律神経や副交感神経が乱れている、とはどのような状態ですか?科学的に教えて
""",
"""
日本国内で観光に行きたいと思っています。東京、名古屋、大阪、京都、福岡の特徴を表にまとめてください。
列名は「都道府県」「おすすめスポット」「おすすめグルメ」にしてください。
""",
"""
私の考えた創作料理について、想像して説明を書いてください。
1. トマトマット
2. 餃子風もやし炒め
3. おにぎりすぎ
""",
]))
if not torch.cuda.is_available():
DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
try:
s3 = boto3.client(
's3',
aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],
region_name=os.environ['S3_REGION'],
config=Config(
connect_timeout=5,
read_timeout=5,
retries={
'mode': 'standard',
'total_max_attempts': 3,
},
),
)
except Exception:
logger.exception('Failed to initialize S3 client')
def clear_and_save_textbox(message: str) -> tuple[str, str]:
return '', message
def display_input(message: str, history: list[tuple[str, str]]) -> list[tuple[str, str]]:
history.append((message, ''))
return history
def delete_prev_fn(history: list[tuple[str, str]]) -> tuple[list[tuple[str, str]], str]:
try:
message, _ = history.pop()
except IndexError:
message = ''
return history, message or ''
async def generate(
message: str,
history_with_input: list[tuple[str, str]],
system_prompt: str,
max_new_tokens: int,
temperature: float,
top_p: float,
top_k: int,
do_sample: bool,
repetition_penalty: float,
) -> AsyncGenerator[list[tuple[str, str]], None]:
if max_new_tokens > MAX_MAX_NEW_TOKENS:
raise ValueError
history = history_with_input[:-1] | stream = await run( | 1 | 2023-12-27 02:51:16+00:00 | 4k |
camenduru/MotionCtrl-hf | lvdm/models/autoencoder.py | [
{
"identifier": "DiagonalGaussianDistribution",
"path": "lvdm/distributions.py",
"snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n ... | import os
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from contextlib import contextmanager
from einops import rearrange
from lvdm.distributions import DiagonalGaussianDistribution
from lvdm.modules.networks.ae_modules import Decoder, Encoder
from utils.utils import instantiate_from_config | 2,877 |
class AutoencoderKL(pl.LightningModule):
def __init__(self,
ddconfig,
lossconfig,
embed_dim,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None,
monitor=None,
test=False,
logdir=None,
input_dim=4,
test_args=None,
):
super().__init__()
self.image_key = image_key
|
class AutoencoderKL(pl.LightningModule):
def __init__(self,
ddconfig,
lossconfig,
embed_dim,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None,
monitor=None,
test=False,
logdir=None,
input_dim=4,
test_args=None,
):
super().__init__()
self.image_key = image_key | self.encoder = Encoder(**ddconfig) | 2 | 2023-12-27 19:32:03+00:00 | 4k |
bitstuffing/pychat | core/bing.py | [
{
"identifier": "Browser",
"path": "core/browser.py",
"snippet": "class Browser():\n\n USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:120.0) Gecko/20100101 Firefox/120.0'\n USER_AGENT_EDGE = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safar... | from core.browser import Browser
from aiohttp import ClientSession
from dateutil.tz import tzutc
from core.helpers.binghelper import BingResponse, BingMessageType, BingMessageType1, BingMessageType2, BingTextResponse
import json
import uuid
import os
import re
import asyncio
import aiohttp
import string
import random
import requests
import urllib
import urllib.parse
import datetime
import threading
import queue
import speech_recognition as sr
import traceback
import time
import time | 3,066 |
class AudioRecorder(threading.Thread):
def __init__(self, sample_rate=22500):
threading.Thread.__init__(self)
self.queue = queue.Queue()
self.exit = False
self.recognizer = sr.Recognizer()
self.mic = sr.Microphone(sample_rate=sample_rate)
def getQueue(self):
return self.queue
def getExit(self):
return self.exit
def setExit(self, exit):
self.exit = exit
def run(self):
with self.mic as source:
while not self.exit:
audio = self.recognizer.record(source, duration=1)
self.queue.put(audio.frame_data)
|
class AudioRecorder(threading.Thread):
def __init__(self, sample_rate=22500):
threading.Thread.__init__(self)
self.queue = queue.Queue()
self.exit = False
self.recognizer = sr.Recognizer()
self.mic = sr.Microphone(sample_rate=sample_rate)
def getQueue(self):
return self.queue
def getExit(self):
return self.exit
def setExit(self, exit):
self.exit = exit
def run(self):
with self.mic as source:
while not self.exit:
audio = self.recognizer.record(source, duration=1)
self.queue.put(audio.frame_data)
| class Bing(Browser): | 0 | 2023-12-28 19:45:49+00:00 | 4k |
vita-epfl/social-transmotion | evaluate_jrdb.py | [
{
"identifier": "batch_process_coords",
"path": "dataset_jrdb.py",
"snippet": "def batch_process_coords(coords, masks, padding_mask, config, modality_selection='traj+2dbox', training=False, multiperson=True):\n joints = coords.to(config[\"DEVICE\"])\n masks = masks.to(config[\"DEVICE\"])\n in_F... | import argparse
import torch
import random
import numpy as np
from progress.bar import Bar
from torch.utils.data import DataLoader
from dataset_jrdb import batch_process_coords, create_dataset, collate_batch
from model_jrdb import create_model
from utils.utils import create_logger | 2,246 |
def inference(model, config, input_joints, padding_mask, out_len=14):
model.eval()
with torch.no_grad():
pred_joints = model(input_joints, padding_mask)
output_joints = pred_joints[:,-out_len:]
return output_joints
def evaluate_ade_fde(model, modality_selection, dataloader, bs, config, logger, return_all=False, bar_prefix="", per_joint=False, show_avg=False):
in_F, out_F = config['TRAIN']['input_track_size'], config['TRAIN']['output_track_size']
bar = Bar(f"EVAL ADE_FDE", fill="#", max=len(dataloader))
batch_size = bs
batch_id = 0
ade = 0
fde = 0
ade_batch = 0
fde_batch = 0
for i, batch in enumerate(dataloader):
joints, masks, padding_mask = batch
padding_mask = padding_mask.to(config["DEVICE"])
in_joints, in_masks, out_joints, out_masks, padding_mask = batch_process_coords(joints, masks, padding_mask, config, modality_selection)
pred_joints = inference(model, config, in_joints, padding_mask, out_len=out_F)
out_joints = out_joints.cpu()
pred_joints = pred_joints.cpu().reshape(out_joints.size(0), 12, 1, 2)
for k in range(len(out_joints)):
person_out_joints = out_joints[k,:,0:1]
person_pred_joints = pred_joints[k,:,0:1]
gt_xy = person_out_joints[:,0,:2]
pred_xy = person_pred_joints[:,0,:2]
sum_ade = 0
for t in range(12):
d1 = (gt_xy[t,0].detach().cpu().numpy() - pred_xy[t,0].detach().cpu().numpy())
d2 = (gt_xy[t,1].detach().cpu().numpy() - pred_xy[t,1].detach().cpu().numpy())
dist_ade = [d1,d2]
sum_ade += np.linalg.norm(dist_ade)
sum_ade /= 12
ade_batch += sum_ade
d3 = (gt_xy[-1,0].detach().cpu().numpy() - pred_xy[-1,0].detach().cpu().numpy())
d4 = (gt_xy[-1,1].detach().cpu().numpy() - pred_xy[-1,1].detach().cpu().numpy())
dist_fde = [d3,d4]
scene_fde = np.linalg.norm(dist_fde)
fde_batch += scene_fde
batch_id+=1
ade = ade_batch/((batch_id-1)*batch_size+len(out_joints))
fde = fde_batch/((batch_id-1)*batch_size+len(out_joints))
return ade, fde
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ckpt", type=str, help="checkpoint path")
parser.add_argument("--split", type=str, default="test", help="Split to use. one of [train, test, valid]")
parser.add_argument("--metric", type=str, default="vim", help="Evaluation metric. One of (vim, mpjpe)")
parser.add_argument("--modality", type=str, default="traj+2dbox", help="available modality combination from['traj','traj+2dbox']")
args = parser.parse_args()
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
################################
# Load checkpoint
################################
logger = create_logger('')
logger.info(f'Loading checkpoint from {args.ckpt}')
ckpt = torch.load(args.ckpt, map_location = torch.device('cpu'))
config = ckpt['config']
if torch.cuda.is_available():
config["DEVICE"] = f"cuda:{torch.cuda.current_device()}"
torch.cuda.manual_seed(0)
else:
config["DEVICE"] = "cpu"
logger.info("Initializing with config:")
logger.info(config)
################################
# Initialize model
################################
model = create_model(config, logger)
model.load_state_dict(ckpt['model'])
################################
# Load data
################################
in_F, out_F = config['TRAIN']['input_track_size'], config['TRAIN']['output_track_size']
assert in_F == 9
assert out_F == 12
name = config['DATA']['train_datasets']
|
def inference(model, config, input_joints, padding_mask, out_len=14):
model.eval()
with torch.no_grad():
pred_joints = model(input_joints, padding_mask)
output_joints = pred_joints[:,-out_len:]
return output_joints
def evaluate_ade_fde(model, modality_selection, dataloader, bs, config, logger, return_all=False, bar_prefix="", per_joint=False, show_avg=False):
in_F, out_F = config['TRAIN']['input_track_size'], config['TRAIN']['output_track_size']
bar = Bar(f"EVAL ADE_FDE", fill="#", max=len(dataloader))
batch_size = bs
batch_id = 0
ade = 0
fde = 0
ade_batch = 0
fde_batch = 0
for i, batch in enumerate(dataloader):
joints, masks, padding_mask = batch
padding_mask = padding_mask.to(config["DEVICE"])
in_joints, in_masks, out_joints, out_masks, padding_mask = batch_process_coords(joints, masks, padding_mask, config, modality_selection)
pred_joints = inference(model, config, in_joints, padding_mask, out_len=out_F)
out_joints = out_joints.cpu()
pred_joints = pred_joints.cpu().reshape(out_joints.size(0), 12, 1, 2)
for k in range(len(out_joints)):
person_out_joints = out_joints[k,:,0:1]
person_pred_joints = pred_joints[k,:,0:1]
gt_xy = person_out_joints[:,0,:2]
pred_xy = person_pred_joints[:,0,:2]
sum_ade = 0
for t in range(12):
d1 = (gt_xy[t,0].detach().cpu().numpy() - pred_xy[t,0].detach().cpu().numpy())
d2 = (gt_xy[t,1].detach().cpu().numpy() - pred_xy[t,1].detach().cpu().numpy())
dist_ade = [d1,d2]
sum_ade += np.linalg.norm(dist_ade)
sum_ade /= 12
ade_batch += sum_ade
d3 = (gt_xy[-1,0].detach().cpu().numpy() - pred_xy[-1,0].detach().cpu().numpy())
d4 = (gt_xy[-1,1].detach().cpu().numpy() - pred_xy[-1,1].detach().cpu().numpy())
dist_fde = [d3,d4]
scene_fde = np.linalg.norm(dist_fde)
fde_batch += scene_fde
batch_id+=1
ade = ade_batch/((batch_id-1)*batch_size+len(out_joints))
fde = fde_batch/((batch_id-1)*batch_size+len(out_joints))
return ade, fde
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ckpt", type=str, help="checkpoint path")
parser.add_argument("--split", type=str, default="test", help="Split to use. one of [train, test, valid]")
parser.add_argument("--metric", type=str, default="vim", help="Evaluation metric. One of (vim, mpjpe)")
parser.add_argument("--modality", type=str, default="traj+2dbox", help="available modality combination from['traj','traj+2dbox']")
args = parser.parse_args()
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
################################
# Load checkpoint
################################
logger = create_logger('')
logger.info(f'Loading checkpoint from {args.ckpt}')
ckpt = torch.load(args.ckpt, map_location = torch.device('cpu'))
config = ckpt['config']
if torch.cuda.is_available():
config["DEVICE"] = f"cuda:{torch.cuda.current_device()}"
torch.cuda.manual_seed(0)
else:
config["DEVICE"] = "cpu"
logger.info("Initializing with config:")
logger.info(config)
################################
# Initialize model
################################
model = create_model(config, logger)
model.load_state_dict(ckpt['model'])
################################
# Load data
################################
in_F, out_F = config['TRAIN']['input_track_size'], config['TRAIN']['output_track_size']
assert in_F == 9
assert out_F == 12
name = config['DATA']['train_datasets']
| dataset = create_dataset(name[0], logger, split=args.split, track_size=(in_F+out_F), track_cutoff=in_F) | 1 | 2023-12-25 15:12:40+00:00 | 4k |
AzizKpln/AutoIOC-MISP | main.py | [
{
"identifier": "runAbuseIP",
"path": "Integrations/abuseipdb.py",
"snippet": "def runAbuseIP(mispapi,mispurl,mispeventid):\r\n misp_connect(mispapi,mispurl,mispeventid)\r\n url = 'https://api.abuseipdb.com/api/v2/blacklist'\r\n querystring = {\r\n 'confidenceMinimum':'85'\r\n }\r\n ... | from flask import Flask, render_template, redirect, request
from Integrations.abuseipdb import runAbuseIP
from Integrations.cinsscore import runCinsScore
from Integrations.killnet import runKillnet
from Integrations.emergingthreats import runEmergingThreats
from Integrations.honeydb import runHoneyDB
from Integrations.maltiverse import runMaltiverse
from Integrations.malware_bazar import runMalwareBazaar
from Integrations.openphish import runOpenPhish
from Integrations.phishunt import runPhishHunt
from Integrations.rescureme import runRescureMe
from Integrations.sslbl import runSSLbl
from Integrations.threatfox import runThreatFox
from Integrations.urlhaus import runURLHaus
from Integrations.virusshare import runVirusShare
from Integrations.vxvault import runVXVault
from Integrations.manual import runManually
import threading
| 3,234 | app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def hello_world():
if request.method == 'POST':
operation = request.form['operation']
if operation=="add_manually":
return redirect("/manually")
else:
return redirect('/automaticlly')
return render_template('main.html')
@app.route("/manually",methods=["GET","POST"])
def manually():
if request.method=="POST":
ioclist=request.form.getlist("iocList")
mispapi=request.form["mispapi"];mispurl=request.form["mispurl"];mispeventid=request.form["mispeventid"]
| app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def hello_world():
if request.method == 'POST':
operation = request.form['operation']
if operation=="add_manually":
return redirect("/manually")
else:
return redirect('/automaticlly')
return render_template('main.html')
@app.route("/manually",methods=["GET","POST"])
def manually():
if request.method=="POST":
ioclist=request.form.getlist("iocList")
mispapi=request.form["mispapi"];mispurl=request.form["mispurl"];mispeventid=request.form["mispeventid"]
| threading.Thread(target=runManually,args=(mispapi,mispurl,mispeventid,ioclist,)).start()
| 15 | 2023-12-23 10:39:28+00:00 | 4k |
facebookresearch/ca_body | ca_body/utils/geom_body.py | [
{
"identifier": "index_image_impaint",
"path": "ca_body/utils/geom.py",
"snippet": "def index_image_impaint(\n index_image: th.Tensor,\n bary_image: Optional[th.Tensor] = None,\n distance_threshold=100.0,\n):\n # getting the mask around the indexes?\n if len(index_image.shape) == 3:\n ... | import logging
import igl
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from logging import Logger
from typing import Any, Dict, Optional, Tuple, Union
from ca_body.utils.geom import (
index_image_impaint,
make_uv_barys,
make_uv_vert_index,
)
from trimesh import Trimesh
from trimesh.triangles import points_to_barycentric | 3,201 | uv_size,
dtype=values.dtype,
device=values.device,
)
values_uv[:, :, index_mask] = values_flat
return values_uv
def sample_uv(
values_uv: th.Tensor,
uv_coords: th.Tensor,
v2uv: Optional[th.Tensor] = None,
mode: str = "bilinear",
align_corners: bool = False,
flip_uvs: bool = False,
) -> th.Tensor:
batch_size = values_uv.shape[0]
if flip_uvs:
uv_coords = uv_coords.clone()
uv_coords[:, 1] = 1.0 - uv_coords[:, 1]
uv_coords_norm = (uv_coords * 2.0 - 1.0)[np.newaxis, :, np.newaxis].expand(
batch_size, -1, -1, -1
)
values = (
F.grid_sample(values_uv, uv_coords_norm, align_corners=align_corners, mode=mode)
.squeeze(-1)
.permute((0, 2, 1))
)
if v2uv is not None:
values_duplicate = values[:, v2uv]
values = values_duplicate.mean(2)
# if return_var:
# values_var = values_duplicate.var(2)
# return values, values_var
return values
def compute_tbn_uv(
tri_xyz: th.Tensor, tri_uv: th.Tensor, eps: float = 1e-5
) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""Compute tangents, bitangents, normals.
Args:
tri_xyz: [B,N,3,3] vertex coordinates
tri_uv: [N,2] texture coordinates
Returns:
tangents, bitangents, normals
"""
tri_uv = tri_uv[np.newaxis]
v01 = tri_xyz[:, :, 1] - tri_xyz[:, :, 0]
v02 = tri_xyz[:, :, 2] - tri_xyz[:, :, 0]
normals = th.cross(v01, v02, dim=-1)
normals = normals / th.norm(normals, dim=-1, keepdim=True).clamp(min=eps)
vt01 = tri_uv[:, :, 1] - tri_uv[:, :, 0]
vt02 = tri_uv[:, :, 2] - tri_uv[:, :, 0]
f = th.tensor([1.0], device=tri_xyz.device) / (
vt01[..., 0] * vt02[..., 1] - vt01[..., 1] * vt02[..., 0]
)
tangents = f[..., np.newaxis] * (
v01 * vt02[..., 1][..., np.newaxis] - v02 * vt01[..., 1][..., np.newaxis]
)
tangents = tangents / th.norm(tangents, dim=-1, keepdim=True).clamp(min=eps)
bitangents = th.cross(normals, tangents, dim=-1)
bitangents = bitangents / th.norm(bitangents, dim=-1, keepdim=True).clamp(min=eps).clamp(
min=eps
)
return tangents, bitangents, normals
class GeometryModule(nn.Module):
"""This module encapsulates uv correspondences and vertex images."""
def __init__(
self,
vi: th.Tensor,
vt: th.Tensor,
vti: th.Tensor,
v2uv: th.Tensor,
uv_size: int,
flip_uv: bool = False,
impaint: bool = False,
impaint_threshold: float = 100.0,
device=None,
) -> None:
super().__init__()
self.register_buffer("vi", th.as_tensor(vi))
self.register_buffer("vt", th.as_tensor(vt))
self.register_buffer("vti", th.as_tensor(vti))
self.register_buffer("v2uv", th.as_tensor(v2uv))
self.uv_size: int = uv_size
index_image = make_uv_vert_index(
self.vt,
self.vi,
self.vti,
uv_shape=uv_size,
flip_uv=flip_uv,
).cpu()
face_index, bary_image = make_uv_barys(self.vt, self.vti, uv_shape=uv_size, flip_uv=flip_uv)
if impaint:
# TODO: have an option to pre-compute this?
assert isinstance(uv_size, int)
if uv_size >= 1024:
logger.info("impainting index image might take a while for sizes >= 1024")
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
logger: Logger = logging.getLogger(__name__)
def face_normals_v2(v: th.Tensor, vi: th.Tensor, eps: float = 1e-5) -> th.Tensor:
pts = v[:, vi]
v0 = pts[:, :, 1] - pts[:, :, 0]
v1 = pts[:, :, 2] - pts[:, :, 0]
n = th.cross(v0, v1, dim=-1)
norm = th.norm(n, dim=-1, keepdim=True)
norm[norm < eps] = 1
n /= norm
return n
def vert_normals_v2(v: th.Tensor, vi: th.Tensor, eps: float = 1.0e-5) -> th.Tensor:
fnorms = face_normals_v2(v, vi)
fnorms = fnorms[:, :, None].expand(-1, -1, 3, -1).reshape(fnorms.shape[0], -1, 3)
vi_flat = vi.view(1, -1).expand(v.shape[0], -1)
vnorms = th.zeros_like(v)
for j in range(3):
vnorms[..., j].scatter_add_(1, vi_flat, fnorms[..., j])
norm = th.norm(vnorms, dim=-1, keepdim=True)
norm[norm < eps] = 1
vnorms /= norm
return vnorms
def compute_neighbours(
n_verts: int, vi: th.Tensor, n_max_values: int = 10
) -> Tuple[th.Tensor, th.Tensor]:
"""Computes first-ring neighbours given vertices and faces."""
n_vi = vi.shape[0]
adj = {i: set() for i in range(n_verts)}
for i in range(n_vi):
for idx in vi[i]:
adj[idx] |= set(vi[i]) - {idx}
nbs_idxs = np.tile(np.arange(n_verts)[:, np.newaxis], (1, n_max_values))
nbs_weights = np.zeros((n_verts, n_max_values), dtype=np.float32)
for idx in range(n_verts):
n_values = min(len(adj[idx]), n_max_values)
nbs_idxs[idx, :n_values] = np.array(list(adj[idx]))[:n_values]
nbs_weights[idx, :n_values] = -1.0 / n_values
return nbs_idxs, nbs_weights
def compute_v2uv(n_verts: int, vi: th.Tensor, vti: th.Tensor, n_max: int = 4) -> th.Tensor:
"""Computes mapping from vertex indices to texture indices.
Args:
vi: [F, 3], triangles
vti: [F, 3], texture triangles
n_max: int, max number of texture locations
Returns:
[n_verts, n_max], texture indices
"""
v2uv_dict = {}
for i_v, i_uv in zip(vi.reshape(-1), vti.reshape(-1)):
v2uv_dict.setdefault(i_v, set()).add(i_uv)
assert len(v2uv_dict) == n_verts
v2uv = np.zeros((n_verts, n_max), dtype=np.int32)
for i in range(n_verts):
vals = sorted(v2uv_dict[i])
v2uv[i, :] = vals[0]
v2uv[i, : len(vals)] = np.array(vals)
return v2uv
def values_to_uv(values: th.Tensor, index_img: th.Tensor, bary_img: th.Tensor) -> th.Tensor:
uv_size = index_img.shape[0]
index_mask = th.all(index_img != -1, dim=-1)
idxs_flat = index_img[index_mask].to(th.int64)
bary_flat = bary_img[index_mask].to(th.float32)
# NOTE: here we assume
values_flat = th.sum(values[:, idxs_flat].permute(0, 3, 1, 2) * bary_flat, dim=-1)
values_uv = th.zeros(
values.shape[0],
values.shape[-1],
uv_size,
uv_size,
dtype=values.dtype,
device=values.device,
)
values_uv[:, :, index_mask] = values_flat
return values_uv
def sample_uv(
values_uv: th.Tensor,
uv_coords: th.Tensor,
v2uv: Optional[th.Tensor] = None,
mode: str = "bilinear",
align_corners: bool = False,
flip_uvs: bool = False,
) -> th.Tensor:
batch_size = values_uv.shape[0]
if flip_uvs:
uv_coords = uv_coords.clone()
uv_coords[:, 1] = 1.0 - uv_coords[:, 1]
uv_coords_norm = (uv_coords * 2.0 - 1.0)[np.newaxis, :, np.newaxis].expand(
batch_size, -1, -1, -1
)
values = (
F.grid_sample(values_uv, uv_coords_norm, align_corners=align_corners, mode=mode)
.squeeze(-1)
.permute((0, 2, 1))
)
if v2uv is not None:
values_duplicate = values[:, v2uv]
values = values_duplicate.mean(2)
# if return_var:
# values_var = values_duplicate.var(2)
# return values, values_var
return values
def compute_tbn_uv(
tri_xyz: th.Tensor, tri_uv: th.Tensor, eps: float = 1e-5
) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""Compute tangents, bitangents, normals.
Args:
tri_xyz: [B,N,3,3] vertex coordinates
tri_uv: [N,2] texture coordinates
Returns:
tangents, bitangents, normals
"""
tri_uv = tri_uv[np.newaxis]
v01 = tri_xyz[:, :, 1] - tri_xyz[:, :, 0]
v02 = tri_xyz[:, :, 2] - tri_xyz[:, :, 0]
normals = th.cross(v01, v02, dim=-1)
normals = normals / th.norm(normals, dim=-1, keepdim=True).clamp(min=eps)
vt01 = tri_uv[:, :, 1] - tri_uv[:, :, 0]
vt02 = tri_uv[:, :, 2] - tri_uv[:, :, 0]
f = th.tensor([1.0], device=tri_xyz.device) / (
vt01[..., 0] * vt02[..., 1] - vt01[..., 1] * vt02[..., 0]
)
tangents = f[..., np.newaxis] * (
v01 * vt02[..., 1][..., np.newaxis] - v02 * vt01[..., 1][..., np.newaxis]
)
tangents = tangents / th.norm(tangents, dim=-1, keepdim=True).clamp(min=eps)
bitangents = th.cross(normals, tangents, dim=-1)
bitangents = bitangents / th.norm(bitangents, dim=-1, keepdim=True).clamp(min=eps).clamp(
min=eps
)
return tangents, bitangents, normals
class GeometryModule(nn.Module):
"""This module encapsulates uv correspondences and vertex images."""
def __init__(
self,
vi: th.Tensor,
vt: th.Tensor,
vti: th.Tensor,
v2uv: th.Tensor,
uv_size: int,
flip_uv: bool = False,
impaint: bool = False,
impaint_threshold: float = 100.0,
device=None,
) -> None:
super().__init__()
self.register_buffer("vi", th.as_tensor(vi))
self.register_buffer("vt", th.as_tensor(vt))
self.register_buffer("vti", th.as_tensor(vti))
self.register_buffer("v2uv", th.as_tensor(v2uv))
self.uv_size: int = uv_size
index_image = make_uv_vert_index(
self.vt,
self.vi,
self.vti,
uv_shape=uv_size,
flip_uv=flip_uv,
).cpu()
face_index, bary_image = make_uv_barys(self.vt, self.vti, uv_shape=uv_size, flip_uv=flip_uv)
if impaint:
# TODO: have an option to pre-compute this?
assert isinstance(uv_size, int)
if uv_size >= 1024:
logger.info("impainting index image might take a while for sizes >= 1024")
| index_image, bary_image = index_image_impaint( | 0 | 2023-12-27 15:31:35+00:00 | 4k |
0x00wolf/hkrsAI | src/conversation.py | [
{
"identifier": "SystemPrompt",
"path": "src/systemprompt.py",
"snippet": "class SystemPrompt:\n \"\"\"A class that manages setting the system prompt used to define AI assistants. \\\n To add a new system prompt that will be selectable from the runtime menu, \\\n copy the prompt to an extension... | import dataclasses
import openai
from typing import List
from src.systemprompt import SystemPrompt
from src.gpt import GPT | 1,946 |
@dataclasses.dataclass
class Conversation:
messages: list[dict] = dataclasses.field(default_factory=list)
query: str = ''
reply: str = ''
response: dict = dataclasses.field(default_factory=dict)
tokens: int = 0
def start(self, system_prompt: str):
self.messages = [{"role": "system", "content": system_prompt}]
print()
return Conversation(messages=self.messages)
def speak(self, content: str):
self.messages.append({"role": "user", "content": content})
return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)
def think(self, thought):
if self.query == '':
self.query = thought
else:
self.query = f'{self.query}\n{thought}'
return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)
|
@dataclasses.dataclass
class Conversation:
messages: list[dict] = dataclasses.field(default_factory=list)
query: str = ''
reply: str = ''
response: dict = dataclasses.field(default_factory=dict)
tokens: int = 0
def start(self, system_prompt: str):
self.messages = [{"role": "system", "content": system_prompt}]
print()
return Conversation(messages=self.messages)
def speak(self, content: str):
self.messages.append({"role": "user", "content": content})
return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)
def think(self, thought):
if self.query == '':
self.query = thought
else:
self.query = f'{self.query}\n{thought}'
return Conversation(messages=self.messages, query=self.query, reply=self.reply, response=self.response)
| def listen(self, gpt: GPT): | 1 | 2023-12-22 07:04:47+00:00 | 4k |
ccurme/chesster | chesster/app/app.py | [
{
"identifier": "BoardManager",
"path": "chesster/app/board_manager.py",
"snippet": "class BoardManager:\n def __init__(self):\n self.active_websockets: list[WebSocket] = []\n self.last_updated_image = None\n self.board = chess.Board()\n self.player_side = chess.WHITE\n ... | import time
import chess
import chess.svg
from typing import Any, AsyncIterator
from fastapi import FastAPI, Request, WebSocket
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from chesster.app.board_manager import BoardManager
from chesster.app.utils import (
get_engine_move,
parse_chess_move,
parse_pgn_into_move_list,
serialize_board_state,
) | 1,743 |
app = FastAPI()
app.mount("/static", StaticFiles(directory="chesster/app/static"), name="static")
templates = Jinja2Templates(directory="chesster/app/templates")
board_manager = BoardManager()
@app.get("/", response_class=HTMLResponse)
async def root(request: Request):
return templates.TemplateResponse(request, "index.html")
@app.post("/set_player_side/{color}")
async def set_player_side(color: str) -> dict:
"""Set side to black or white."""
if "w" in color:
player_side = chess.WHITE
side_str = "white"
else:
player_side = chess.BLACK
side_str = "black"
await board_manager.set_player_side(player_side)
return {"message": f"Updated player side successfully to {side_str}."}
@app.post("/initialize_game_vs_opponent/{player_side_str}")
async def initialize_game_vs_opponent(player_side_str: str) -> dict:
"""Start new game."""
await board_manager.set_board(chess.Board())
_ = await set_player_side(player_side_str)
if board_manager.player_side == chess.BLACK:
opponent_move = get_engine_move(board_manager.board)
opponent_move_san = board_manager.board.san(opponent_move)
await board_manager.make_move(opponent_move)
response = f"Game initialized. Opponent move: {opponent_move_san}."
else:
response = "Game initialized. Your move."
return {"message": response}
@app.post("/make_move_vs_opponent/{move_str}")
async def make_move_vs_opponent(move_str: str) -> dict:
"""Push move to board against engine. Move should be a valid UCI string."""
if board_manager.board.is_game_over():
return {"message": "Game over."}
move = parse_chess_move(board_manager.board, move_str)
if not board_manager.board.is_legal(move):
return {"message": "Illegal move, try again."}
move_san = board_manager.board.san(move)
await board_manager.make_move(move)
opponent_move = get_engine_move(board_manager.board)
opponent_move_san = board_manager.board.san(opponent_move)
time.sleep(1)
await board_manager.make_move(opponent_move)
response = (
f"Successfully made move to {move_san}. Opponent responded by moving"
f" to {opponent_move_san}.\n\n"
|
app = FastAPI()
app.mount("/static", StaticFiles(directory="chesster/app/static"), name="static")
templates = Jinja2Templates(directory="chesster/app/templates")
board_manager = BoardManager()
@app.get("/", response_class=HTMLResponse)
async def root(request: Request):
return templates.TemplateResponse(request, "index.html")
@app.post("/set_player_side/{color}")
async def set_player_side(color: str) -> dict:
"""Set side to black or white."""
if "w" in color:
player_side = chess.WHITE
side_str = "white"
else:
player_side = chess.BLACK
side_str = "black"
await board_manager.set_player_side(player_side)
return {"message": f"Updated player side successfully to {side_str}."}
@app.post("/initialize_game_vs_opponent/{player_side_str}")
async def initialize_game_vs_opponent(player_side_str: str) -> dict:
"""Start new game."""
await board_manager.set_board(chess.Board())
_ = await set_player_side(player_side_str)
if board_manager.player_side == chess.BLACK:
opponent_move = get_engine_move(board_manager.board)
opponent_move_san = board_manager.board.san(opponent_move)
await board_manager.make_move(opponent_move)
response = f"Game initialized. Opponent move: {opponent_move_san}."
else:
response = "Game initialized. Your move."
return {"message": response}
@app.post("/make_move_vs_opponent/{move_str}")
async def make_move_vs_opponent(move_str: str) -> dict:
"""Push move to board against engine. Move should be a valid UCI string."""
if board_manager.board.is_game_over():
return {"message": "Game over."}
move = parse_chess_move(board_manager.board, move_str)
if not board_manager.board.is_legal(move):
return {"message": "Illegal move, try again."}
move_san = board_manager.board.san(move)
await board_manager.make_move(move)
opponent_move = get_engine_move(board_manager.board)
opponent_move_san = board_manager.board.san(opponent_move)
time.sleep(1)
await board_manager.make_move(opponent_move)
response = (
f"Successfully made move to {move_san}. Opponent responded by moving"
f" to {opponent_move_san}.\n\n" | f"Board state:\n{serialize_board_state(board_manager.board, board_manager.player_side)}" | 4 | 2023-12-24 19:19:31+00:00 | 4k |
Subsets and Splits
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have consistent code formatting levels across multiple scales (2k, 4k, 8k, 12k) and reveals the structured formatting patterns within these repositories.
SQL Console for tianyang/repobench_python_v1.1
Compares cross-file and in-file code structure patterns across different complexity levels, revealing how file organization strategies vary with code size and potentially informing better code architecture decisions.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have complete performance data across all seven code complexity levels, revealing consistent benchmarking patterns across different code sizes.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that contain all 7 distinct quality levels (2k through 32k), revealing complete datasets that might be useful for comprehensive analysis.