repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
OPPOMKLab/u-LLaVA
tasks/image_text_pretrain.py
[ { "identifier": "registry", "path": "utils/registry.py", "snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def ...
from utils.registry import registry from tasks.base_task import BaseTask from utils.tools import datetime_print from datasets.datasets.concat_dataset import ConcatDataset, ConcatDatasetWithShuffle
1,500
""" Partially Adapted form: https://github.com/DAMO-NLP-SG/Video-LLaMA/blob/main/video_llama/tasks/image_text_pretrain.py """ @registry.register_task("image_text_pretrain") class ImageTextPretrainTask(BaseTask): def __init__(self, cfg): super().__init__(cfg) @staticmethod def build_datasets(datasets_config, tokenizer, processor_dict, conv_type='conv_simple'): """ :param datasets_config: :param tokenizer: :param processor_dict: {'clip_image': CLIPImageProcessor()} :param conv_type: :return: """ assert len(datasets_config) > 0, "At least one dataset has to be specified." if len(datasets_config) == 1: name = list(datasets_config.keys())[0] dataset_config = datasets_config[name] builder = registry.get_builder_class(name)(dataset_config) # {"train": dataset, "test": dataset} dataset = builder.build(tokenizer, processor_dict, conv_type) else: shuffle = True portion = 1 dataset_list = [] for idx, name in enumerate(datasets_config): datetime_print('BUILDING DATASET {0}: {1}'.format(idx+1, name)) dataset_config = datasets_config[name] builder = registry.get_builder_class(name)(dataset_config) current_dataset = builder.build(tokenizer, processor_dict, conv_type) dataset_list.append(current_dataset) if shuffle: dataset = ConcatDatasetWithShuffle(dataset_list, portion=portion) else:
""" Partially Adapted form: https://github.com/DAMO-NLP-SG/Video-LLaMA/blob/main/video_llama/tasks/image_text_pretrain.py """ @registry.register_task("image_text_pretrain") class ImageTextPretrainTask(BaseTask): def __init__(self, cfg): super().__init__(cfg) @staticmethod def build_datasets(datasets_config, tokenizer, processor_dict, conv_type='conv_simple'): """ :param datasets_config: :param tokenizer: :param processor_dict: {'clip_image': CLIPImageProcessor()} :param conv_type: :return: """ assert len(datasets_config) > 0, "At least one dataset has to be specified." if len(datasets_config) == 1: name = list(datasets_config.keys())[0] dataset_config = datasets_config[name] builder = registry.get_builder_class(name)(dataset_config) # {"train": dataset, "test": dataset} dataset = builder.build(tokenizer, processor_dict, conv_type) else: shuffle = True portion = 1 dataset_list = [] for idx, name in enumerate(datasets_config): datetime_print('BUILDING DATASET {0}: {1}'.format(idx+1, name)) dataset_config = datasets_config[name] builder = registry.get_builder_class(name)(dataset_config) current_dataset = builder.build(tokenizer, processor_dict, conv_type) dataset_list.append(current_dataset) if shuffle: dataset = ConcatDatasetWithShuffle(dataset_list, portion=portion) else:
dataset = ConcatDataset(dataset_list)
3
2023-12-21 08:10:23+00:00
2k
shashikg/WhisperS2T
whisper_s2t/data.py
[ { "identifier": "pad_or_trim", "path": "whisper_s2t/audio.py", "snippet": "def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):\n \"\"\"\n Pad or trim the audio array to N_SAMPLES, as expected by the encoder.\n \"\"\"\n \n if torch.is_tensor(array):\n if array.shape[...
import torch import numpy as np from tqdm import tqdm from .configs import * from .audio import pad_or_trim, audio_batch_generator, load_audio
1,229
def stitch_speech_segments(start_ends, max_len=27.0, max_silent_region=None): speech_duration = [end - start for start, end in start_ends] stitched_speech_segments = [] curr_seg = [0] curr_dur = speech_duration[0] idx = 1 while idx < len(start_ends): if curr_dur + speech_duration[idx] > max_len: stitched_speech_segments.append([start_ends[_] for _ in curr_seg]) curr_seg = [idx] curr_dur = speech_duration[idx] else: curr_dur += speech_duration[idx] curr_seg.append(idx) idx += 1 stitched_speech_segments.append([start_ends[_] for _ in curr_seg]) if max_silent_region is None: return stitched_speech_segments stitched_speech_segments_joined = [] for segs in stitched_speech_segments: _segs = [] curr_seg_start_time, curr_seg_end_time = segs[0] for i in range(1, len(segs)): if (segs[i][0] - curr_seg_end_time) >= max_silent_region: _segs.append((curr_seg_start_time, curr_seg_end_time)) curr_seg_start_time = segs[i][0] curr_seg_end_time = segs[i][1] _segs.append((curr_seg_start_time, curr_seg_end_time)) stitched_speech_segments_joined.append(_segs) return stitched_speech_segments_joined class WhisperDataset(torch.utils.data.Dataset): def __init__(self, audio_files, lang_codes, tasks, initial_prompts, tokenizer, max_initial_prompt_len, device="cuda", dta_padding=48000, without_timestamps=True, use_dynamic_time_axis=False): self.audio_files = audio_files self.lang_codes = lang_codes self.tasks = tasks self.initial_prompts = initial_prompts self.tokenizer = tokenizer self.device = device self.dta_padding = dta_padding self.without_timestamps = without_timestamps self.use_dynamic_time_axis = use_dynamic_time_axis self.max_initial_prompt_len = max_initial_prompt_len if type(audio_files[0]) == str: self.get_audio_signal = self._get_audio_signal_from_file else: self.get_audio_signal = self._get_audio_signal_from_array def _get_audio_signal_from_array(self, item): return self.audio_files[item] def _get_audio_signal_from_file(self, item):
def stitch_speech_segments(start_ends, max_len=27.0, max_silent_region=None): speech_duration = [end - start for start, end in start_ends] stitched_speech_segments = [] curr_seg = [0] curr_dur = speech_duration[0] idx = 1 while idx < len(start_ends): if curr_dur + speech_duration[idx] > max_len: stitched_speech_segments.append([start_ends[_] for _ in curr_seg]) curr_seg = [idx] curr_dur = speech_duration[idx] else: curr_dur += speech_duration[idx] curr_seg.append(idx) idx += 1 stitched_speech_segments.append([start_ends[_] for _ in curr_seg]) if max_silent_region is None: return stitched_speech_segments stitched_speech_segments_joined = [] for segs in stitched_speech_segments: _segs = [] curr_seg_start_time, curr_seg_end_time = segs[0] for i in range(1, len(segs)): if (segs[i][0] - curr_seg_end_time) >= max_silent_region: _segs.append((curr_seg_start_time, curr_seg_end_time)) curr_seg_start_time = segs[i][0] curr_seg_end_time = segs[i][1] _segs.append((curr_seg_start_time, curr_seg_end_time)) stitched_speech_segments_joined.append(_segs) return stitched_speech_segments_joined class WhisperDataset(torch.utils.data.Dataset): def __init__(self, audio_files, lang_codes, tasks, initial_prompts, tokenizer, max_initial_prompt_len, device="cuda", dta_padding=48000, without_timestamps=True, use_dynamic_time_axis=False): self.audio_files = audio_files self.lang_codes = lang_codes self.tasks = tasks self.initial_prompts = initial_prompts self.tokenizer = tokenizer self.device = device self.dta_padding = dta_padding self.without_timestamps = without_timestamps self.use_dynamic_time_axis = use_dynamic_time_axis self.max_initial_prompt_len = max_initial_prompt_len if type(audio_files[0]) == str: self.get_audio_signal = self._get_audio_signal_from_file else: self.get_audio_signal = self._get_audio_signal_from_array def _get_audio_signal_from_array(self, item): return self.audio_files[item] def _get_audio_signal_from_file(self, item):
return load_audio(self.audio_files[item])
2
2023-12-16 18:09:16+00:00
2k
chinhsuanwu/ifusion
ldm/thirdp/psp/model_irse.py
[ { "identifier": "get_blocks", "path": "ldm/thirdp/psp/helpers.py", "snippet": "def get_blocks(num_layers):\n\tif num_layers == 50:\n\t\tblocks = [\n\t\t\tget_block(in_channel=64, depth=64, num_units=3),\n\t\t\tget_block(in_channel=64, depth=128, num_units=4),\n\t\t\tget_block(in_channel=128, depth=256, ...
from torch.nn import ( Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module, ) from ldm.thirdp.psp.helpers import ( get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm, )
1,202
# https://github.com/eladrich/pixel2style2pixel """ Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) """ class Backbone(Module): def __init__(self, input_size, num_layers, mode="ir", drop_ratio=0.4, affine=True): super(Backbone, self).__init__() assert input_size in [112, 224], "input_size should be 112 or 224" assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152" assert mode in ["ir", "ir_se"], "mode should be ir or ir_se" blocks = get_blocks(num_layers) if mode == "ir": unit_module = bottleneck_IR elif mode == "ir_se":
# https://github.com/eladrich/pixel2style2pixel """ Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) """ class Backbone(Module): def __init__(self, input_size, num_layers, mode="ir", drop_ratio=0.4, affine=True): super(Backbone, self).__init__() assert input_size in [112, 224], "input_size should be 112 or 224" assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152" assert mode in ["ir", "ir_se"], "mode should be ir or ir_se" blocks = get_blocks(num_layers) if mode == "ir": unit_module = bottleneck_IR elif mode == "ir_se":
unit_module = bottleneck_IR_SE
3
2023-12-17 12:45:38+00:00
2k
wangzhecheng/SkyScript
src/open_clip/push_to_hf_hub.py
[ { "identifier": "create_model_from_pretrained", "path": "src/open_clip/factory.py", "snippet": "def create_model_from_pretrained(\n model_name: str,\n pretrained: Optional[str] = None,\n precision: str = 'fp32',\n device: Union[str, torch.device] = 'cpu',\n jit: bool =...
import argparse import json import os import torch import safetensors.torch from pathlib import Path from tempfile import TemporaryDirectory from typing import Optional, Tuple, Union from huggingface_hub import ( create_repo, get_hf_file_metadata, hf_hub_download, hf_hub_url, repo_type_and_id_from_hf_id, upload_folder, list_repo_files, ) from huggingface_hub.utils import EntryNotFoundError from .factory import create_model_from_pretrained, get_model_config, get_tokenizer from .tokenizer import HFTokenizer
1,172
""" Adapted from https://github.com/mlfoundations/open_clip. Copyright (c) 2012-2021 Gabriel Ilharco, Mitchell Wortsman, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, John Miller, Hongseok Namkoong, Hannaneh Hajishirzi, Ali Farhadi, Ludwig Schmidt """ try: _has_hf_hub = True except ImportError: _has_hf_hub = False try: _has_safetensors = True except ImportError: _has_safetensors = False # Default name for a weights file hosted on the Huggingface Hub. HF_WEIGHTS_NAME = "open_clip_pytorch_model.bin" # default pytorch pkl HF_SAFE_WEIGHTS_NAME = "open_clip_model.safetensors" # safetensors version HF_CONFIG_NAME = 'open_clip_config.json' def save_config_for_hf( model, config_path: str, model_config: Optional[dict] ): preprocess_cfg = { 'mean': model.visual.image_mean, 'std': model.visual.image_std, } hf_config = { 'model_cfg': model_config, 'preprocess_cfg': preprocess_cfg, } with config_path.open('w') as f: json.dump(hf_config, f, indent=2) def save_for_hf( model,
""" Adapted from https://github.com/mlfoundations/open_clip. Copyright (c) 2012-2021 Gabriel Ilharco, Mitchell Wortsman, Nicholas Carlini, Rohan Taori, Achal Dave, Vaishaal Shankar, John Miller, Hongseok Namkoong, Hannaneh Hajishirzi, Ali Farhadi, Ludwig Schmidt """ try: _has_hf_hub = True except ImportError: _has_hf_hub = False try: _has_safetensors = True except ImportError: _has_safetensors = False # Default name for a weights file hosted on the Huggingface Hub. HF_WEIGHTS_NAME = "open_clip_pytorch_model.bin" # default pytorch pkl HF_SAFE_WEIGHTS_NAME = "open_clip_model.safetensors" # safetensors version HF_CONFIG_NAME = 'open_clip_config.json' def save_config_for_hf( model, config_path: str, model_config: Optional[dict] ): preprocess_cfg = { 'mean': model.visual.image_mean, 'std': model.visual.image_std, } hf_config = { 'model_cfg': model_config, 'preprocess_cfg': preprocess_cfg, } with config_path.open('w') as f: json.dump(hf_config, f, indent=2) def save_for_hf( model,
tokenizer: HFTokenizer,
3
2023-12-19 11:50:56+00:00
2k
Lavreniuk/EVP
depth/models_depth/model_vpd.py
[ { "identifier": "UNetWrapper", "path": "evp/models.py", "snippet": "class UNetWrapper(nn.Module):\n def __init__(self, unet, use_attn=True, base_size=512, max_attn_size=None, attn_selector='up_cross+down_cross') -> None:\n super().__init__()\n self.unet = unet\n self.attention_st...
import torch import torch.nn as nn import torch.nn.functional as F from timm.models.layers import trunc_normal_, DropPath from mmcv.cnn import (build_conv_layer, build_norm_layer, build_upsample_layer, constant_init, normal_init) from omegaconf import OmegaConf from ldm.util import instantiate_from_config from evp.models import UNetWrapper, TextAdapterDepth
1,279
# ------------------------------------------------------------------------------ # Copyright (c) Microsoft # Licensed under the MIT License. # The deconvolution code is based on Simple Baseline. # (https://github.com/microsoft/human-pose-estimation.pytorch/blob/master/lib/models/pose_resnet.py) # Modified by Zigang Geng (zigang@mail.ustc.edu.cn). # ------------------------------------------------------------------------------ class VPDDepthEncoder(nn.Module): def __init__(self, out_dim=1024, ldm_prior=[320, 640, 1280+1280], sd_path=None, text_dim=768, dataset='nyu' ): super().__init__() self.layer1 = nn.Sequential( nn.Conv2d(ldm_prior[0], ldm_prior[0], 3, stride=2, padding=1), nn.GroupNorm(16, ldm_prior[0]), nn.ReLU(), nn.Conv2d(ldm_prior[0], ldm_prior[0], 3, stride=2, padding=1), ) self.layer2 = nn.Sequential( nn.Conv2d(ldm_prior[1], ldm_prior[1], 3, stride=2, padding=1), ) self.out_layer = nn.Sequential( nn.Conv2d(sum(ldm_prior), out_dim, 1), nn.GroupNorm(16, out_dim), nn.ReLU(), ) self.apply(self._init_weights) ### stable diffusion layers config = OmegaConf.load('./v1-inference.yaml') if sd_path is None: config.model.params.ckpt_path = '../checkpoints/v1-5-pruned-emaonly.ckpt' else: config.model.params.ckpt_path = f'../{sd_path}' sd_model = instantiate_from_config(config.model) self.encoder_vq = sd_model.first_stage_model self.unet = UNetWrapper(sd_model.model, use_attn=False) del sd_model.cond_stage_model del self.encoder_vq.decoder del self.unet.unet.diffusion_model.out for param in self.encoder_vq.parameters(): param.requires_grad = False if dataset == 'nyu':
# ------------------------------------------------------------------------------ # Copyright (c) Microsoft # Licensed under the MIT License. # The deconvolution code is based on Simple Baseline. # (https://github.com/microsoft/human-pose-estimation.pytorch/blob/master/lib/models/pose_resnet.py) # Modified by Zigang Geng (zigang@mail.ustc.edu.cn). # ------------------------------------------------------------------------------ class VPDDepthEncoder(nn.Module): def __init__(self, out_dim=1024, ldm_prior=[320, 640, 1280+1280], sd_path=None, text_dim=768, dataset='nyu' ): super().__init__() self.layer1 = nn.Sequential( nn.Conv2d(ldm_prior[0], ldm_prior[0], 3, stride=2, padding=1), nn.GroupNorm(16, ldm_prior[0]), nn.ReLU(), nn.Conv2d(ldm_prior[0], ldm_prior[0], 3, stride=2, padding=1), ) self.layer2 = nn.Sequential( nn.Conv2d(ldm_prior[1], ldm_prior[1], 3, stride=2, padding=1), ) self.out_layer = nn.Sequential( nn.Conv2d(sum(ldm_prior), out_dim, 1), nn.GroupNorm(16, out_dim), nn.ReLU(), ) self.apply(self._init_weights) ### stable diffusion layers config = OmegaConf.load('./v1-inference.yaml') if sd_path is None: config.model.params.ckpt_path = '../checkpoints/v1-5-pruned-emaonly.ckpt' else: config.model.params.ckpt_path = f'../{sd_path}' sd_model = instantiate_from_config(config.model) self.encoder_vq = sd_model.first_stage_model self.unet = UNetWrapper(sd_model.model, use_attn=False) del sd_model.cond_stage_model del self.encoder_vq.decoder del self.unet.unet.diffusion_model.out for param in self.encoder_vq.parameters(): param.requires_grad = False if dataset == 'nyu':
self.text_adapter = TextAdapterDepth(text_dim=text_dim)
1
2023-12-15 14:13:59+00:00
2k
penghao-wu/vstar
VisualSearch/model/owlvit/segmentation.py
[ { "identifier": "box_ops", "path": "VisualSearch/model/owlvit/util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef masks_to_boxes(masks):" }, { "identifier": "NestedTensor", "path": "...
import io import torch import torch.nn as nn import torch.nn.functional as F from collections import defaultdict from PIL import Image from .util import box_ops from .util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list from panopticapi.utils import id2rgb, rgb2id
1,175
# ------------------------------------------------------------------------ # Deformable DETR # Copyright (c) 2020 SenseTime. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # ------------------------------------------------------------------------ """ This file provides the definition of the convolutional heads used to predict masks, as well as the losses """ try: except ImportError: pass class DETRsegm(nn.Module): def __init__(self, detr, freeze_detr=False): super().__init__() self.detr = detr if freeze_detr: for p in self.parameters(): p.requires_grad_(False) hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0) self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim) def forward(self, samples: NestedTensor): if not isinstance(samples, NestedTensor):
# ------------------------------------------------------------------------ # Deformable DETR # Copyright (c) 2020 SenseTime. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # ------------------------------------------------------------------------ """ This file provides the definition of the convolutional heads used to predict masks, as well as the losses """ try: except ImportError: pass class DETRsegm(nn.Module): def __init__(self, detr, freeze_detr=False): super().__init__() self.detr = detr if freeze_detr: for p in self.parameters(): p.requires_grad_(False) hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0) self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim) def forward(self, samples: NestedTensor): if not isinstance(samples, NestedTensor):
samples = nested_tensor_from_tensor_list(samples)
3
2023-12-15 14:58:24+00:00
2k
ValdonVitija/crap
crap/crap_manager.py
[ { "identifier": "PythonFileAnalyzer", "path": "crap/file_analyzer.py", "snippet": "class PythonFileAnalyzer:\n def __init__(self, file_path: pathlib.Path):\n self.file_path = file_path\n self.imported_modules = set()\n\n def analyze(self):\n \"\"\"\n Analyzes the Python...
import os import pathlib from typing import Set from tqdm import tqdm from crap.file_analyzer import PythonFileAnalyzer from crap.virtual_env_checker import VirtualEnvChecker from crap.package_usage_counter import PackageUsageCounter from crap.subprocesses import ( uninstall_package, pre_cleanup_with_ruff, reinstall_from_requirements, freeze_into_requirements, get_current_packages )
1,101
class CrapManager: __slots__ = ("path_", "venv_checker", "package_usage_counter", "deleted_packages") def __init__(self, path_: str): self.path_ = pathlib.Path(path_).absolute() self.venv_checker = VirtualEnvChecker() self.package_usage_counter = PackageUsageCounter() self.deleted_packages = set() def run(self): if not self.path_.exists(): raise FileNotFoundError("File/Dir not found") total_steps = 4 bar_width = 100 bar_color = 'red' with tqdm(total=total_steps, ncols=bar_width, colour=bar_color) as pbar: self._process_path() pbar.update(1)
class CrapManager: __slots__ = ("path_", "venv_checker", "package_usage_counter", "deleted_packages") def __init__(self, path_: str): self.path_ = pathlib.Path(path_).absolute() self.venv_checker = VirtualEnvChecker() self.package_usage_counter = PackageUsageCounter() self.deleted_packages = set() def run(self): if not self.path_.exists(): raise FileNotFoundError("File/Dir not found") total_steps = 4 bar_width = 100 bar_color = 'red' with tqdm(total=total_steps, ncols=bar_width, colour=bar_color) as pbar: self._process_path() pbar.update(1)
initial_packages = get_current_packages()
7
2023-12-19 20:22:37+00:00
2k
worm128/AI-YinMei
text-generation-webui/extensions/openai/script.py
[ { "identifier": "ChatCompletionRequest", "path": "text-generation-webui/extensions/openai/typing.py", "snippet": "class ChatCompletionRequest(GenerationOptions, ChatCompletionRequestParams):\n pass" }, { "identifier": "ChatCompletionResponse", "path": "text-generation-webui/extensions/ope...
import asyncio import json import os import traceback import speech_recognition as sr import uvicorn import extensions.openai.completions as OAIcompletions import extensions.openai.embeddings as OAIembeddings import extensions.openai.images as OAIimages import extensions.openai.logits as OAIlogits import extensions.openai.models as OAImodels import extensions.openai.moderations as OAImoderations from threading import Thread from fastapi import Depends, FastAPI, Header, HTTPException from fastapi.middleware.cors import CORSMiddleware from fastapi.requests import Request from fastapi.responses import JSONResponse from pydub import AudioSegment from sse_starlette import EventSourceResponse from extensions.openai.errors import ServiceUnavailableError from extensions.openai.tokens import token_count, token_decode, token_encode from extensions.openai.utils import _start_cloudflared from modules import shared from modules.logging_colors import logger from modules.models import unload_model from modules.text_generation import stop_everything_event from .typing import ( ChatCompletionRequest, ChatCompletionResponse, CompletionRequest, CompletionResponse, DecodeRequest, DecodeResponse, EmbeddingsRequest, EmbeddingsResponse, EncodeRequest, EncodeResponse, LoadLorasRequest, LoadModelRequest, LogitsRequest, LogitsResponse, LoraListResponse, ModelInfoResponse, ModelListResponse, TokenCountResponse, to_dict )
1,543
params = { 'embedding_device': 'cpu', 'embedding_model': 'sentence-transformers/all-mpnet-base-v2', 'sd_webui_url': '', 'debug': 0 } streaming_semaphore = asyncio.Semaphore(1) def verify_api_key(authorization: str = Header(None)) -> None: expected_api_key = shared.args.api_key if expected_api_key and (authorization is None or authorization != f"Bearer {expected_api_key}"): raise HTTPException(status_code=401, detail="Unauthorized") def verify_admin_key(authorization: str = Header(None)) -> None: expected_api_key = shared.args.admin_key if expected_api_key and (authorization is None or authorization != f"Bearer {expected_api_key}"): raise HTTPException(status_code=401, detail="Unauthorized") app = FastAPI() check_key = [Depends(verify_api_key)] check_admin_key = [Depends(verify_admin_key)] # Configure CORS settings to allow all origins, methods, and headers app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"] ) @app.options("/", dependencies=check_key) async def options_route(): return JSONResponse(content="OK") @app.post('/v1/completions', response_model=CompletionResponse, dependencies=check_key) async def openai_completions(request: Request, request_data: CompletionRequest): path = request.url.path is_legacy = "/generate" in path if request_data.stream: async def generator(): async with streaming_semaphore: response = OAIcompletions.stream_completions(to_dict(request_data), is_legacy=is_legacy) for resp in response: disconnected = await request.is_disconnected() if disconnected: break yield {"data": json.dumps(resp)} return EventSourceResponse(generator()) # SSE streaming else: response = OAIcompletions.completions(to_dict(request_data), is_legacy=is_legacy) return JSONResponse(response)
params = { 'embedding_device': 'cpu', 'embedding_model': 'sentence-transformers/all-mpnet-base-v2', 'sd_webui_url': '', 'debug': 0 } streaming_semaphore = asyncio.Semaphore(1) def verify_api_key(authorization: str = Header(None)) -> None: expected_api_key = shared.args.api_key if expected_api_key and (authorization is None or authorization != f"Bearer {expected_api_key}"): raise HTTPException(status_code=401, detail="Unauthorized") def verify_admin_key(authorization: str = Header(None)) -> None: expected_api_key = shared.args.admin_key if expected_api_key and (authorization is None or authorization != f"Bearer {expected_api_key}"): raise HTTPException(status_code=401, detail="Unauthorized") app = FastAPI() check_key = [Depends(verify_api_key)] check_admin_key = [Depends(verify_admin_key)] # Configure CORS settings to allow all origins, methods, and headers app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"] ) @app.options("/", dependencies=check_key) async def options_route(): return JSONResponse(content="OK") @app.post('/v1/completions', response_model=CompletionResponse, dependencies=check_key) async def openai_completions(request: Request, request_data: CompletionRequest): path = request.url.path is_legacy = "/generate" in path if request_data.stream: async def generator(): async with streaming_semaphore: response = OAIcompletions.stream_completions(to_dict(request_data), is_legacy=is_legacy) for resp in response: disconnected = await request.is_disconnected() if disconnected: break yield {"data": json.dumps(resp)} return EventSourceResponse(generator()) # SSE streaming else: response = OAIcompletions.completions(to_dict(request_data), is_legacy=is_legacy) return JSONResponse(response)
@app.post('/v1/chat/completions', response_model=ChatCompletionResponse, dependencies=check_key)
1
2023-12-20 14:13:38+00:00
2k
foocker/Bert-VITS2-Faster
infer_torch_export_onnx.py
[ { "identifier": "infer", "path": "infer_.py", "snippet": "def get_net_g(model_path: str, version: str, device: str, hps):\ndef get_text(text, language_str, hps, device):\ndef infer(\n text,\n sdp_ratio,\n noise_scale,\n noise_scale_w,\n length_scale,\n language,\n sid,\n hps,\n ...
import os import logging import re_matching import torch import utils import gradio as gr import numpy as np import time from infer_ import infer, latest_version, get_net_g from config import config from scipy.io.wavfile import write
940
logging.basicConfig( level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s" ) logger = logging.getLogger(__name__) net_g = None device = config.webui_config.device if device == "mps": os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" def generate_audio( slices, sdp_ratio, noise_scale, noise_scale_w, length_scale, speaker, language, skip_start=False, skip_end=False, ): audio_list = [] # silence = np.zeros(hps.data.sampling_rate // 2, dtype=np.int16) with torch.no_grad(): for idx, piece in enumerate(slices): skip_start = (idx != 0) and skip_start skip_end = (idx != len(slices) - 1) and skip_end
logging.basicConfig( level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s" ) logger = logging.getLogger(__name__) net_g = None device = config.webui_config.device if device == "mps": os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" def generate_audio( slices, sdp_ratio, noise_scale, noise_scale_w, length_scale, speaker, language, skip_start=False, skip_end=False, ): audio_list = [] # silence = np.zeros(hps.data.sampling_rate // 2, dtype=np.int16) with torch.no_grad(): for idx, piece in enumerate(slices): skip_start = (idx != 0) and skip_start skip_end = (idx != len(slices) - 1) and skip_end
audio = infer(
0
2023-12-18 09:53:41+00:00
2k
sinoyou/nelf-pro
nerfstudio/process_data/process_data_utils.py
[ { "identifier": "status", "path": "nerfstudio/utils/rich_utils.py", "snippet": "def status(msg: str, spinner: str = \"bouncingBall\", verbose: bool = False):\n \"\"\"A context manager that does nothing is verbose is True. Otherwise it hides logs under a message.\n\n Args:\n msg: The message...
import shutil import sys from enum import Enum from pathlib import Path from typing import List, Optional, Tuple from rich.console import Console from typing_extensions import Literal from nerfstudio.utils.rich_utils import status from nerfstudio.utils.scripts import run_command
884
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper utils for processing data into the nerfstudio format.""" CONSOLE = Console(width=120) class CameraModel(Enum): """Enum for camera types.""" OPENCV = "OPENCV" OPENCV_FISHEYE = "OPENCV_FISHEYE" CAMERA_MODELS = { "perspective": CameraModel.OPENCV, "fisheye": CameraModel.OPENCV_FISHEYE, } def get_num_frames_in_video(video: Path) -> int: """Returns the number of frames in a video. Args: video: Path to a video. Returns: The number of frames in a video. """ cmd = f"ffprobe -v error -select_streams v:0 -count_packets \ -show_entries stream=nb_read_packets -of csv=p=0 {video}" output = run_command(cmd) assert output is not None output = output.strip(" ,\t\n\r") return int(output) def convert_video_to_images( video_path: Path, image_dir: Path, num_frames_target: int, verbose: bool = False ) -> Tuple[List[str], int]: """Converts a video into a sequence of images. Args: video_path: Path to the video. output_dir: Path to the output directory. num_frames_target: Number of frames to extract. verbose: If True, logs the output of the command. Returns: A tuple containing summary of the conversion and the number of extracted frames. """
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper utils for processing data into the nerfstudio format.""" CONSOLE = Console(width=120) class CameraModel(Enum): """Enum for camera types.""" OPENCV = "OPENCV" OPENCV_FISHEYE = "OPENCV_FISHEYE" CAMERA_MODELS = { "perspective": CameraModel.OPENCV, "fisheye": CameraModel.OPENCV_FISHEYE, } def get_num_frames_in_video(video: Path) -> int: """Returns the number of frames in a video. Args: video: Path to a video. Returns: The number of frames in a video. """ cmd = f"ffprobe -v error -select_streams v:0 -count_packets \ -show_entries stream=nb_read_packets -of csv=p=0 {video}" output = run_command(cmd) assert output is not None output = output.strip(" ,\t\n\r") return int(output) def convert_video_to_images( video_path: Path, image_dir: Path, num_frames_target: int, verbose: bool = False ) -> Tuple[List[str], int]: """Converts a video into a sequence of images. Args: video_path: Path to the video. output_dir: Path to the output directory. num_frames_target: Number of frames to extract. verbose: If True, logs the output of the command. Returns: A tuple containing summary of the conversion and the number of extracted frames. """
with status(msg="Converting video to images...", spinner="bouncingBall", verbose=verbose):
0
2023-12-15 20:07:22+00:00
2k
wuc9521/rep-flow
app.py
[ { "identifier": "read_keywords_from_file", "path": "utils/loader.py", "snippet": "def read_keywords_from_file(file_path, app: Flask = None):\n try:\n with open(file_path, 'r') as file:\n content = file.read()\n keywords_list = [keyword.strip() for keyword in re.split(',|\...
import os import spacy import logging import pandas as pd from logging.handlers import RotatingFileHandler from flask import Flask, render_template, request, jsonify, send_from_directory from flask_cors import cross_origin from utils.loader import read_keywords_from_file from utils.hints import HELP, get_NUMBER_EMBD_HINT, get_CURRENT_STATE_HINT, get_NEXT_STEP_HINT from utils.test import extract_and_validate_test_number from utils.log import log_ from utils.file import get_i from model.common import imgs from model.process import image_process
1,501
DEFAULT_RESPONSE_FLAG = "*" NUMBER_EMBD_HINT = None CURRENT_BUG_ID = -1 # Load spaCy English model nlp = spacy.load("en_core_web_sm") app = Flask(__name__, template_folder='') # Configure LOG_DIR = os.path.join(app.root_path, 'log') DATA_DIR = os.path.join(app.root_path, 'data') MODEL_DIR = os.path.join(app.root_path, 'model') CORPUS_DIR = os.path.join(DATA_DIR, 'corpus') GUIDANCE_DIR = os.path.join(DATA_DIR, 'guidance') STATE_DIR = os.path.join(DATA_DIR, 'state') std = pd.read_csv(os.path.join(CORPUS_DIR, 'std.csv')) df = pd.merge( pd.read_csv(os.path.join(CORPUS_DIR, 'qa.csv')), std, on='ID', how='left' ) qa = dict(zip(df['Q'], df['A'])) at = dict(zip(std['A'], std['TYPE'])) ta = dict(zip(std['TYPE'], std['A']))
DEFAULT_RESPONSE_FLAG = "*" NUMBER_EMBD_HINT = None CURRENT_BUG_ID = -1 # Load spaCy English model nlp = spacy.load("en_core_web_sm") app = Flask(__name__, template_folder='') # Configure LOG_DIR = os.path.join(app.root_path, 'log') DATA_DIR = os.path.join(app.root_path, 'data') MODEL_DIR = os.path.join(app.root_path, 'model') CORPUS_DIR = os.path.join(DATA_DIR, 'corpus') GUIDANCE_DIR = os.path.join(DATA_DIR, 'guidance') STATE_DIR = os.path.join(DATA_DIR, 'state') std = pd.read_csv(os.path.join(CORPUS_DIR, 'std.csv')) df = pd.merge( pd.read_csv(os.path.join(CORPUS_DIR, 'qa.csv')), std, on='ID', how='left' ) qa = dict(zip(df['Q'], df['A'])) at = dict(zip(std['A'], std['TYPE'])) ta = dict(zip(std['TYPE'], std['A']))
key_words = read_keywords_from_file(
0
2023-12-20 09:44:09+00:00
2k
yash-srivastava19/verizon
classes.py
[ { "identifier": "kvlm_serialize", "path": "other_utils.py", "snippet": "def kvlm_serialize(kvlm):\n ret = b''\n\n for k in kvlm.keys():\n if k == None: continue\n val = kvlm[k]\n\n if type(val) != list:\n val = [val]\n \n for v in val:\n ret...
from class_utils import * from other_utils import kvlm_serialize, kvlm_parse
939
class VerizonRepository: worktree = None vrzdir = None conf = None def __init__(self, path, force = False): self.worktree = path self.vrzdir = os.path.join(path, ".vrz") if not (force or os.path.isdir(self.vrzdir)): raise Exception(f"Not a Verizon Repository : {path}") # Read Config file. self.conf = configparser.ConfigParser() cf = repo_file(self, "config") if cf and os.path.exists(cf): self.conf.read([cf]) elif not force: raise Exception("Configuration File is Missing") if not force: vers = int(self.conf.get("core", "repositoryformatversion")) if vers != 0: raise Exception(f"Unsupported repositoryformatversion : {vers}") class VerizonObject: def __init__(self, data=None) -> None: if data != None: self.deserialize(data) else: self.init() def serialize(self, repo): """ Read the objects contents, and do whatever it takes to convert it into a meaningful representation. """ raise NotImplementedError def deserialize(self, data): raise NotImplementedError def init(self): pass # Tree wrapper for a single record(a single path). class VerizonTreeLeaf: def __init__(self, mode, path, sha) -> None: self.mode = mode self.path = path self.sha = sha ## Type Header could be one of `blob`, `commit`, `tag`, `tree`. # Blobs are user data. The content of every file we put in git is stored as a blob. class VerizonBlob(VerizonObject): fmt = b'blob' def serialize(self): return self.blobdata def deserialize(self, data): self.blobdata = data class VerizonCommit(VerizonObject): fmt = b'commit' def deserialize(self, data): self.kvlm = kvlm_parse(data) def serialize(self, repo):
class VerizonRepository: worktree = None vrzdir = None conf = None def __init__(self, path, force = False): self.worktree = path self.vrzdir = os.path.join(path, ".vrz") if not (force or os.path.isdir(self.vrzdir)): raise Exception(f"Not a Verizon Repository : {path}") # Read Config file. self.conf = configparser.ConfigParser() cf = repo_file(self, "config") if cf and os.path.exists(cf): self.conf.read([cf]) elif not force: raise Exception("Configuration File is Missing") if not force: vers = int(self.conf.get("core", "repositoryformatversion")) if vers != 0: raise Exception(f"Unsupported repositoryformatversion : {vers}") class VerizonObject: def __init__(self, data=None) -> None: if data != None: self.deserialize(data) else: self.init() def serialize(self, repo): """ Read the objects contents, and do whatever it takes to convert it into a meaningful representation. """ raise NotImplementedError def deserialize(self, data): raise NotImplementedError def init(self): pass # Tree wrapper for a single record(a single path). class VerizonTreeLeaf: def __init__(self, mode, path, sha) -> None: self.mode = mode self.path = path self.sha = sha ## Type Header could be one of `blob`, `commit`, `tag`, `tree`. # Blobs are user data. The content of every file we put in git is stored as a blob. class VerizonBlob(VerizonObject): fmt = b'blob' def serialize(self): return self.blobdata def deserialize(self, data): self.blobdata = data class VerizonCommit(VerizonObject): fmt = b'commit' def deserialize(self, data): self.kvlm = kvlm_parse(data) def serialize(self, repo):
return kvlm_serialize(self.kvlm)
0
2023-12-18 18:53:26+00:00
2k
amazon-science/c2f-seg
test_c2f_seg.py
[ { "identifier": "load_dataset", "path": "data/dataloader_transformer.py", "snippet": "def load_dataset(config, args, mode):\n if mode==\"train\":\n if args.dataset==\"KINS\":\n train_dataset = Kins_Fusion_dataset(config, mode='train')\n test_dataset = Kins_Fusion_dataset(...
import os import cv2 import time import random import argparse import numpy as np import torch import torch.distributed as dist from tqdm import tqdm from shutil import copyfile from torch.utils.data import DataLoader from data.dataloader_transformer import load_dataset from utils.logger import setup_logger from utils.utils import Config, to_cuda from src.image_model import C2F_Seg from src.video_model import C2F_Seg
1,546
if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--seed', type=int, default=42) # path parser.add_argument('--path', type=str, required=True, help='model checkpoints path') parser.add_argument('--check_point_path', type=str, default="../check_points", ) parser.add_argument('--vq_path', type=str, required=True, default='KINS_vqgan') # dataset parser.add_argument('--dataset', type=str, default="MOViD_A", help = "select dataset") parser.add_argument('--data_type', type=str, default="image", help = "select image or video model") parser.add_argument('--batch', type=int, default=1) parser.add_argument("--local_rank", default=-1, type=int, help="node rank for distributed training") args = parser.parse_args() if args.data_type=="image": elif args.data_type=="video": dist.init_process_group(backend="nccl") torch.cuda.set_device(args.local_rank) rank = dist.get_rank() args.path = os.path.join(args.check_point_path, args.path) vq_model_path = os.path.join(args.check_point_path, args.vq_path) os.makedirs(args.path, exist_ok=True) config_path = os.path.join(args.path, 'c2f_seg_{}.yml'.format(args.dataset)) # copy config template if does't exist if not os.path.exists(config_path): copyfile('./configs/c2f_seg_{}.yml'.format(args.dataset), config_path) # load config file config = Config(config_path) config.path = args.path config.batch_size = args.batch config.dataset = args.dataset log_file = 'log-{}.txt'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--seed', type=int, default=42) # path parser.add_argument('--path', type=str, required=True, help='model checkpoints path') parser.add_argument('--check_point_path', type=str, default="../check_points", ) parser.add_argument('--vq_path', type=str, required=True, default='KINS_vqgan') # dataset parser.add_argument('--dataset', type=str, default="MOViD_A", help = "select dataset") parser.add_argument('--data_type', type=str, default="image", help = "select image or video model") parser.add_argument('--batch', type=int, default=1) parser.add_argument("--local_rank", default=-1, type=int, help="node rank for distributed training") args = parser.parse_args() if args.data_type=="image": elif args.data_type=="video": dist.init_process_group(backend="nccl") torch.cuda.set_device(args.local_rank) rank = dist.get_rank() args.path = os.path.join(args.check_point_path, args.path) vq_model_path = os.path.join(args.check_point_path, args.vq_path) os.makedirs(args.path, exist_ok=True) config_path = os.path.join(args.path, 'c2f_seg_{}.yml'.format(args.dataset)) # copy config template if does't exist if not os.path.exists(config_path): copyfile('./configs/c2f_seg_{}.yml'.format(args.dataset), config_path) # load config file config = Config(config_path) config.path = args.path config.batch_size = args.batch config.dataset = args.dataset log_file = 'log-{}.txt'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
logger = setup_logger(os.path.join(args.path, 'logs'), logfile_name=log_file)
1
2023-12-21 04:25:47+00:00
2k
Hammour-steak/GOUB
codes/models/modules/DenoisingNAFNet_arch.py
[ { "identifier": "SinusoidalPosEmb", "path": "codes/models/modules/module_util.py", "snippet": "class SinusoidalPosEmb(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n device = x.device\n half_dim = self.dim // 2\n ...
import torch import torch.nn as nn import torch.nn.functional as F from einops import rearrange, reduce from .module_util import SinusoidalPosEmb, LayerNorm, exists
802
class SimpleGate(nn.Module): def forward(self, x): x1, x2 = x.chunk(2, dim=1) return x1 * x2 class NAFBlock(nn.Module): def __init__(self, c, time_emb_dim=None, DW_Expand=2, FFN_Expand=2, drop_out_rate=0.): super().__init__() self.mlp = nn.Sequential( SimpleGate(), nn.Linear(time_emb_dim // 2, c * 4) ) if time_emb_dim else None dw_channel = c * DW_Expand self.conv1 = nn.Conv2d(in_channels=c, out_channels=dw_channel, kernel_size=1, padding=0, stride=1, groups=1, bias=True) self.conv2 = nn.Conv2d(in_channels=dw_channel, out_channels=dw_channel, kernel_size=3, padding=1, stride=1, groups=dw_channel, bias=True) self.conv3 = nn.Conv2d(in_channels=dw_channel // 2, out_channels=c, kernel_size=1, padding=0, stride=1, groups=1, bias=True) # Simplified Channel Attention self.sca = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels=dw_channel // 2, out_channels=dw_channel // 2, kernel_size=1, padding=0, stride=1, groups=1, bias=True), ) # SimpleGate self.sg = SimpleGate() ffn_channel = FFN_Expand * c self.conv4 = nn.Conv2d(in_channels=c, out_channels=ffn_channel, kernel_size=1, padding=0, stride=1, groups=1, bias=True) self.conv5 = nn.Conv2d(in_channels=ffn_channel // 2, out_channels=c, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
class SimpleGate(nn.Module): def forward(self, x): x1, x2 = x.chunk(2, dim=1) return x1 * x2 class NAFBlock(nn.Module): def __init__(self, c, time_emb_dim=None, DW_Expand=2, FFN_Expand=2, drop_out_rate=0.): super().__init__() self.mlp = nn.Sequential( SimpleGate(), nn.Linear(time_emb_dim // 2, c * 4) ) if time_emb_dim else None dw_channel = c * DW_Expand self.conv1 = nn.Conv2d(in_channels=c, out_channels=dw_channel, kernel_size=1, padding=0, stride=1, groups=1, bias=True) self.conv2 = nn.Conv2d(in_channels=dw_channel, out_channels=dw_channel, kernel_size=3, padding=1, stride=1, groups=dw_channel, bias=True) self.conv3 = nn.Conv2d(in_channels=dw_channel // 2, out_channels=c, kernel_size=1, padding=0, stride=1, groups=1, bias=True) # Simplified Channel Attention self.sca = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels=dw_channel // 2, out_channels=dw_channel // 2, kernel_size=1, padding=0, stride=1, groups=1, bias=True), ) # SimpleGate self.sg = SimpleGate() ffn_channel = FFN_Expand * c self.conv4 = nn.Conv2d(in_channels=c, out_channels=ffn_channel, kernel_size=1, padding=0, stride=1, groups=1, bias=True) self.conv5 = nn.Conv2d(in_channels=ffn_channel // 2, out_channels=c, kernel_size=1, padding=0, stride=1, groups=1, bias=True)
self.norm1 = LayerNorm(c)
1
2023-12-15 09:40:18+00:00
2k
eldar-eln-bigabid/airflow-aerospike-provider
tests/operators/test_aerospike.py
[ { "identifier": "AerospikeGetKeyOperator", "path": "aerospike_provider/operators/aerospike.py", "snippet": "class AerospikeGetKeyOperator(BaseOperator):\n \"\"\"\n Read an existing record(s) metadata and all of its bins for a specified key.\n\n :param namespace: namespace to use in aerospike db...
import unittest import aerospike from unittest.mock import patch, Mock from aerospike_provider.operators.aerospike import AerospikeGetKeyOperator, AerospikePutKeyOperator
1,541
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. class TestAerospikeGetKeyOperator(unittest.TestCase): def setUp(self): self.namespace = 'test_namespace' self.set = 'test_set' self.key = 'test_key' self.policy = { aerospike.POLICY_KEY_SEND } self.task_id = 'test_task' self.metadata = {'ttl': 1000, 'gen': 4} self.bins = {'name': 'Aerospike Test', 'version': "1.0.0"}
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. class TestAerospikeGetKeyOperator(unittest.TestCase): def setUp(self): self.namespace = 'test_namespace' self.set = 'test_set' self.key = 'test_key' self.policy = { aerospike.POLICY_KEY_SEND } self.task_id = 'test_task' self.metadata = {'ttl': 1000, 'gen': 4} self.bins = {'name': 'Aerospike Test', 'version': "1.0.0"}
self.operator = AerospikeGetKeyOperator(
0
2023-12-17 18:35:36+00:00
2k
Its-Haze/league-rpc-linux
league_rpc_linux/kda.py
[ { "identifier": "wait_until_exists", "path": "league_rpc_linux/polling.py", "snippet": "def wait_until_exists(\n url: str,\n custom_message: str = \"\",\n expected_response_code: int = 200,\n timeout: int = 30,\n n_sleep: float | int = 5, # Not needed, but good to have.\n n_total_amou...
import urllib3 from requests import Response from league_rpc_linux.polling import wait_until_exists from league_rpc_linux.username import get_summoner_name
892
urllib3.disable_warnings() def get_kda() -> str: """ Get the current KDA of your game. """ response = get_current_user_stats() if isinstance(response, Response): parsed_data = response.json() kills = str(parsed_data["kills"]) deaths = str(parsed_data["deaths"]) assists = str(parsed_data["assists"]) return f"{kills}/{deaths}/{assists}" return "" def get_level() -> int: """ Get the current Level of your game. """ response = get_current_active_player_stats() if isinstance(response, Response): parsed_data = response.json() level = int(parsed_data["level"]) return level return 0 def get_gold() -> int: """ Get the current gold of your game. """ response = get_current_active_player_stats() if isinstance(response, Response): parsed_data = response.json() gold = int(parsed_data["currentGold"]) return gold return 0 def get_creepscore() -> str: """ Get the current creepScore of your live game creepScore is updated every 10cs by Riot. """ response = get_current_user_stats() if isinstance(response, Response): parsed_data = response.json() creep_score = str(parsed_data["creepScore"]) return f"{creep_score}cs" return "" def get_current_user_stats() -> Response | None: """ Request data from playerscores?summonerName and return the response. """
urllib3.disable_warnings() def get_kda() -> str: """ Get the current KDA of your game. """ response = get_current_user_stats() if isinstance(response, Response): parsed_data = response.json() kills = str(parsed_data["kills"]) deaths = str(parsed_data["deaths"]) assists = str(parsed_data["assists"]) return f"{kills}/{deaths}/{assists}" return "" def get_level() -> int: """ Get the current Level of your game. """ response = get_current_active_player_stats() if isinstance(response, Response): parsed_data = response.json() level = int(parsed_data["level"]) return level return 0 def get_gold() -> int: """ Get the current gold of your game. """ response = get_current_active_player_stats() if isinstance(response, Response): parsed_data = response.json() gold = int(parsed_data["currentGold"]) return gold return 0 def get_creepscore() -> str: """ Get the current creepScore of your live game creepScore is updated every 10cs by Riot. """ response = get_current_user_stats() if isinstance(response, Response): parsed_data = response.json() creep_score = str(parsed_data["creepScore"]) return f"{creep_score}cs" return "" def get_current_user_stats() -> Response | None: """ Request data from playerscores?summonerName and return the response. """
your_summoner_name = get_summoner_name()
1
2023-12-15 22:21:53+00:00
2k
huahuahuage/Bert-VITS2-Speech
onnx_infer/onnx_infer.py
[ { "identifier": "log_instance", "path": "log.py", "snippet": "DISABLED_LOGGER = [\"gradio.processing_utils\", \"gradio\", \"httpx\"]\r" }, { "identifier": "read_config", "path": "config.py", "snippet": "def read_config(config_path:str) -> dict:\r\n \"\"\"\r\n 取读配置文件\r\n \"\"\"\r...
import os import numpy as np import onnxruntime as ort from copy import copy from typing import List from dataclasses import dataclass from log import log_instance from config import read_config from config import config_instance from .text.cleaner import clean_text, cleaned_text_to_sequence from .onnx_bert import get_bert
1,019
BERT_ENABLE = config_instance.get("bert_enable", True) if BERT_ENABLE: # 获取模型中包含的中文角色标记 CHINESE_CHARACTER_MARK = config_instance.get("onnx_tts_models_chinese_mark", "中文") ONNX_PROVIDERS = [config_instance.get("onnx_providers", "CPUExecutionProvider")] MODELS_PATH = os.path.abspath(config_instance.get("onnx_tts_models", "onnx/models")) MODELS_BASE_NAME = os.path.basename(MODELS_PATH) MODELS_PARENT_PATH = os.path.dirname(MODELS_PATH) MODELS_PREFIX = os.path.join(MODELS_PATH, os.path.basename(MODELS_PATH)) ONNX_MODELS_PATH = { "config": f"{MODELS_PARENT_PATH}/{MODELS_BASE_NAME}.json", "enc": f"{MODELS_PREFIX}_enc_p.onnx", "emb_g": f"{MODELS_PREFIX}_emb.onnx", "dp": f"{MODELS_PREFIX}_dp.onnx", "sdp": f"{MODELS_PREFIX}_sdp.onnx", "flow": f"{MODELS_PREFIX}_flow.onnx", "dec": f"{MODELS_PREFIX}_dec.onnx", } class SpeakerMap: """ 多语言关系表 """ def __init__(self) -> None: log_instance.info("正在加载模型发音人多语言关系表...")
BERT_ENABLE = config_instance.get("bert_enable", True) if BERT_ENABLE: # 获取模型中包含的中文角色标记 CHINESE_CHARACTER_MARK = config_instance.get("onnx_tts_models_chinese_mark", "中文") ONNX_PROVIDERS = [config_instance.get("onnx_providers", "CPUExecutionProvider")] MODELS_PATH = os.path.abspath(config_instance.get("onnx_tts_models", "onnx/models")) MODELS_BASE_NAME = os.path.basename(MODELS_PATH) MODELS_PARENT_PATH = os.path.dirname(MODELS_PATH) MODELS_PREFIX = os.path.join(MODELS_PATH, os.path.basename(MODELS_PATH)) ONNX_MODELS_PATH = { "config": f"{MODELS_PARENT_PATH}/{MODELS_BASE_NAME}.json", "enc": f"{MODELS_PREFIX}_enc_p.onnx", "emb_g": f"{MODELS_PREFIX}_emb.onnx", "dp": f"{MODELS_PREFIX}_dp.onnx", "sdp": f"{MODELS_PREFIX}_sdp.onnx", "flow": f"{MODELS_PREFIX}_flow.onnx", "dec": f"{MODELS_PREFIX}_dec.onnx", } class SpeakerMap: """ 多语言关系表 """ def __init__(self) -> None: log_instance.info("正在加载模型发音人多语言关系表...")
self.map_data: dict = read_config("speakers_map.json")
1
2023-12-21 13:50:50+00:00
2k
jaypyles/obsidian-to-bookstack
obsidian_to_bookstack/bookstack/collectors/remote/RemoteBookCollector.py
[ { "identifier": "Book", "path": "obsidian_to_bookstack/bookstack/artifacts.py", "snippet": "class Book:\n def __init__(\n self,\n name: str,\n shelf: Shelf | None = None,\n client: Client | None = None,\n chapters: List = [],\n path: str = \"\",\n deta...
import json from typing import List from obsidian_to_bookstack.bookstack.artifacts import Book, Shelf from obsidian_to_bookstack.bookstack.client import RemoteClient from obsidian_to_bookstack.bookstack.collectors.collector import \ RemoteCollector from obsidian_to_bookstack.bookstack.constants import * from obsidian_to_bookstack.console import console from obsidian_to_bookstack.utils import con_hash
1,432
class RemoteBookCollector(RemoteCollector): def __init__(self, verbose: bool, client: RemoteClient) -> None: super().__init__(verbose, client) def get_books(self, shelves: List[Shelf]): """Get remote books from shelves""" client_books = self.client._get_from_client(BookstackAPIEndpoints.BOOKS) for book in client_books: class DetailedBook(DetailedBookstackLink): LINK = f"/api/books/{book['id']}" details = json.loads( self.client._make_request( RequestType.GET, DetailedBook.LINK, ).data.decode() ) book["details"] = details books = [Book(book["name"], details=book["details"]) for book in client_books] BOOK_MAP = { con_hash(book.name + str(book.details["id"])): book for book in books } for shelf in shelves: for book in shelf.client_books: b = BOOK_MAP.get(con_hash(book["name"] + str(book["id"]))) if b: b.shelf = shelf shelf.books.append(b) if self.verbose:
class RemoteBookCollector(RemoteCollector): def __init__(self, verbose: bool, client: RemoteClient) -> None: super().__init__(verbose, client) def get_books(self, shelves: List[Shelf]): """Get remote books from shelves""" client_books = self.client._get_from_client(BookstackAPIEndpoints.BOOKS) for book in client_books: class DetailedBook(DetailedBookstackLink): LINK = f"/api/books/{book['id']}" details = json.loads( self.client._make_request( RequestType.GET, DetailedBook.LINK, ).data.decode() ) book["details"] = details books = [Book(book["name"], details=book["details"]) for book in client_books] BOOK_MAP = { con_hash(book.name + str(book.details["id"])): book for book in books } for shelf in shelves: for book in shelf.client_books: b = BOOK_MAP.get(con_hash(book["name"] + str(book["id"]))) if b: b.shelf = shelf shelf.books.append(b) if self.verbose:
console.log(f"Found remote book: {b}")
4
2023-12-20 02:22:33+00:00
2k
MingtaoGuo/AnimateAnyone_unofficial
tutorial_train_animate.py
[ { "identifier": "MyDataset", "path": "tutorial_dataset.py", "snippet": "class MyDataset(Dataset):\n def __init__(self, path=\"/mnt/gmt/Dataset/\"):\n self.path = path\n self.videos = os.listdir(path + \"fashion_png\")\n\n def __len__(self):\n return len(self.videos) * 10\n\n ...
from share import * from torch.utils.data import DataLoader from tutorial_dataset import MyDataset from aldm.logger import ImageLogger from aldm.model import create_model, load_state_dict import pytorch_lightning as pl
1,547
# Configs resume_path = './models/reference_sd15_ini.ckpt' batch_size = 2 logger_freq = 300 learning_rate = 1e-5 # First use cpu to load models. Pytorch Lightning will automatically move it to GPUs. model = create_model('./models/aldm_v15.yaml').cpu() model.load_state_dict(load_state_dict(resume_path, location='cpu')) model.learning_rate = learning_rate # Misc
# Configs resume_path = './models/reference_sd15_ini.ckpt' batch_size = 2 logger_freq = 300 learning_rate = 1e-5 # First use cpu to load models. Pytorch Lightning will automatically move it to GPUs. model = create_model('./models/aldm_v15.yaml').cpu() model.load_state_dict(load_state_dict(resume_path, location='cpu')) model.learning_rate = learning_rate # Misc
dataset = MyDataset()
0
2023-12-16 03:31:33+00:00
2k
yasserben/CLOUDS
clouds/modeling/meta_arch/clouds_head.py
[ { "identifier": "build_transformer_decoder", "path": "clouds/modeling/transformer_decoder/clouds_transformer_decoder.py", "snippet": "def build_transformer_decoder(cfg, in_channels, mask_classification=True):\n \"\"\"\n Build a instance embedding branch from `cfg.MODEL.INS_EMBED_HEAD.NAME`.\n \...
import logging import fvcore.nn.weight_init as weight_init from copy import deepcopy from typing import Callable, Dict, List, Optional, Tuple, Union from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.layers import Conv2d, ShapeSpec, get_norm from detectron2.modeling import SEM_SEG_HEADS_REGISTRY from ..transformer_decoder.clouds_transformer_decoder import build_transformer_decoder from ..transformer_decoder.mask2former_transformer_decoder import ( build_original_transformer_decoder, ) from ..transformer_decoder.clouds_bis_transformer_decoder import ( build_bis_transformer_decoder, ) from ..pixel_decoder.msdeformattn import build_pixel_decoder
1,202
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/modeling/meta_arch/mask_former_head.py """ @SEM_SEG_HEADS_REGISTRY.register() class CLOUDSHead(nn.Module): @configurable def __init__( self, input_shape: Dict[str, ShapeSpec], *, num_classes: int, pixel_decoder: nn.Module, loss_weight: float = 1.0, ignore_value: int = -1, # extra parameters transformer_predictor: nn.Module, transformer_in_feature: str, name_transformer_predictor: str, ): """ NOTE: this interface is experimental. Args: input_shape: shapes (channels and stride) of the input features num_classes: number of classes to predict pixel_decoder: the pixel decoder module loss_weight: loss weight ignore_value: category id to be ignored during training. transformer_predictor: the transformer decoder that makes prediction transformer_in_feature: input feature name to the transformer_predictor """ super().__init__() input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) self.in_features = [k for k, v in input_shape] feature_strides = [v.stride for k, v in input_shape] feature_channels = [v.channels for k, v in input_shape] self.ignore_value = ignore_value self.common_stride = 4 self.loss_weight = loss_weight self.pixel_decoder = pixel_decoder self.predictor = transformer_predictor self.transformer_in_feature = transformer_in_feature self.num_classes = num_classes self.name_transformer_predictor = name_transformer_predictor @classmethod def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): # figure out in_channels to transformer predictor if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "multi_scale_pixel_decoder": transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM else: raise NotImplementedError if ( cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME == "MultiScaleMaskedTransformerDecoder" ): return { "input_shape": { k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES }, "ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, "num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/modeling/meta_arch/mask_former_head.py """ @SEM_SEG_HEADS_REGISTRY.register() class CLOUDSHead(nn.Module): @configurable def __init__( self, input_shape: Dict[str, ShapeSpec], *, num_classes: int, pixel_decoder: nn.Module, loss_weight: float = 1.0, ignore_value: int = -1, # extra parameters transformer_predictor: nn.Module, transformer_in_feature: str, name_transformer_predictor: str, ): """ NOTE: this interface is experimental. Args: input_shape: shapes (channels and stride) of the input features num_classes: number of classes to predict pixel_decoder: the pixel decoder module loss_weight: loss weight ignore_value: category id to be ignored during training. transformer_predictor: the transformer decoder that makes prediction transformer_in_feature: input feature name to the transformer_predictor """ super().__init__() input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) self.in_features = [k for k, v in input_shape] feature_strides = [v.stride for k, v in input_shape] feature_channels = [v.channels for k, v in input_shape] self.ignore_value = ignore_value self.common_stride = 4 self.loss_weight = loss_weight self.pixel_decoder = pixel_decoder self.predictor = transformer_predictor self.transformer_in_feature = transformer_in_feature self.num_classes = num_classes self.name_transformer_predictor = name_transformer_predictor @classmethod def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): # figure out in_channels to transformer predictor if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "multi_scale_pixel_decoder": transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM else: raise NotImplementedError if ( cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME == "MultiScaleMaskedTransformerDecoder" ): return { "input_shape": { k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES }, "ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE, "num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
"pixel_decoder": build_pixel_decoder(cfg, input_shape),
3
2023-12-15 15:40:58+00:00
2k
linyq2117/TagCLIP
CLIP-ES/generate_cams_coco.py
[ { "identifier": "scoremap2bbox", "path": "utils.py", "snippet": "def scoremap2bbox(scoremap, threshold, multi_contour_eval=False):\n height, width = scoremap.shape\n scoremap_image = np.expand_dims((scoremap * 255).astype(np.uint8), 2)\n _, thr_gray_heatmap = cv2.threshold(\n src=scorema...
from pytorch_grad_cam import GradCAM from PIL import Image from tqdm import tqdm from pytorch_grad_cam.utils.image import scale_cam_image from utils import scoremap2bbox from clip_text import class_names, new_class_names_coco, BACKGROUND_CATEGORY_COCO from torch import multiprocessing from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize, RandomHorizontalFlip from torchvision.transforms import InterpolationMode import torch import clip import numpy as np import cv2 import os import argparse import warnings
1,508
# -*- coding:UTF-8 -*- try: BICUBIC = InterpolationMode.BICUBIC except ImportError: BICUBIC = Image.BICUBIC warnings.filterwarnings("ignore") _CONTOUR_INDEX = 1 if cv2.__version__.split('.')[0] == '3' else 0 def reshape_transform(tensor, height=28, width=28): tensor = tensor.permute(1, 0, 2) result = tensor[:, 1:, :].reshape(tensor.size(0), height, width, tensor.size(2)) # Bring the channels to the first dimension, # like in CNNs. result = result.transpose(2, 3).transpose(1, 2) return result def split_dataset(dataset, all_label_list, n_splits): if n_splits == 1: return [dataset], [all_label_list] part = len(dataset) // n_splits dataset_list = [] split_label_list = [] for i in range(n_splits - 1): dataset_list.append(dataset[i*part:(i+1)*part]) split_label_list.append(all_label_list[i*part:(i+1)*part]) dataset_list.append(dataset[(i+1)*part:]) split_label_list.append(all_label_list[(i+1)*part:]) return dataset_list, split_label_list def zeroshot_classifier(classnames, templates, model): with torch.no_grad(): zeroshot_weights = [] for classname in classnames: texts = [template.format(classname) for template in templates] #format with class texts = clip.tokenize(texts).to(device) #tokenize class_embeddings = model.encode_text(texts) #embed with text encoder class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True) class_embedding = class_embeddings.mean(dim=0) class_embedding /= class_embedding.norm() zeroshot_weights.append(class_embedding) zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(device) return zeroshot_weights.t() class ClipOutputTarget: def __init__(self, category): self.category = category def __call__(self, model_output): if len(model_output.shape) == 1: return model_output[self.category] return model_output[:, self.category] def _convert_image_to_rgb(image): return image.convert("RGB") def _transform_resize(h, w): return Compose([ Resize((h,w), interpolation=BICUBIC), _convert_image_to_rgb, ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), ]) def img_ms_and_flip(img_path, ori_height, ori_width, scales=[1.0], patch_size=16): all_imgs = [] for scale in scales: preprocess = _transform_resize(int(np.ceil(scale * int(ori_height) / patch_size) * patch_size), int(np.ceil(scale * int(ori_width) / patch_size) * patch_size)) image = preprocess(Image.open(img_path)) image_ori = image image_flip = torch.flip(image, [-1]) all_imgs.append(image_ori) all_imgs.append(image_flip) return all_imgs def perform(process_id, dataset_list, args, model, bg_text_features, fg_text_features, cam, split_label_list): n_gpus = torch.cuda.device_count() device_id = "cuda:{}".format(process_id % n_gpus) databin = dataset_list[process_id] all_label_list = split_label_list[process_id] model = model.to(device_id) bg_text_features = bg_text_features.to(device_id) fg_text_features = fg_text_features.to(device_id) for im_idx, im in enumerate(tqdm(databin)): img_path = os.path.join(args.img_root, im) ori_image = Image.open(img_path) ori_height, ori_width = np.asarray(ori_image).shape[:2] label_id_list = all_label_list[im_idx] label_list = [] for lid in label_id_list:
# -*- coding:UTF-8 -*- try: BICUBIC = InterpolationMode.BICUBIC except ImportError: BICUBIC = Image.BICUBIC warnings.filterwarnings("ignore") _CONTOUR_INDEX = 1 if cv2.__version__.split('.')[0] == '3' else 0 def reshape_transform(tensor, height=28, width=28): tensor = tensor.permute(1, 0, 2) result = tensor[:, 1:, :].reshape(tensor.size(0), height, width, tensor.size(2)) # Bring the channels to the first dimension, # like in CNNs. result = result.transpose(2, 3).transpose(1, 2) return result def split_dataset(dataset, all_label_list, n_splits): if n_splits == 1: return [dataset], [all_label_list] part = len(dataset) // n_splits dataset_list = [] split_label_list = [] for i in range(n_splits - 1): dataset_list.append(dataset[i*part:(i+1)*part]) split_label_list.append(all_label_list[i*part:(i+1)*part]) dataset_list.append(dataset[(i+1)*part:]) split_label_list.append(all_label_list[(i+1)*part:]) return dataset_list, split_label_list def zeroshot_classifier(classnames, templates, model): with torch.no_grad(): zeroshot_weights = [] for classname in classnames: texts = [template.format(classname) for template in templates] #format with class texts = clip.tokenize(texts).to(device) #tokenize class_embeddings = model.encode_text(texts) #embed with text encoder class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True) class_embedding = class_embeddings.mean(dim=0) class_embedding /= class_embedding.norm() zeroshot_weights.append(class_embedding) zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(device) return zeroshot_weights.t() class ClipOutputTarget: def __init__(self, category): self.category = category def __call__(self, model_output): if len(model_output.shape) == 1: return model_output[self.category] return model_output[:, self.category] def _convert_image_to_rgb(image): return image.convert("RGB") def _transform_resize(h, w): return Compose([ Resize((h,w), interpolation=BICUBIC), _convert_image_to_rgb, ToTensor(), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), ]) def img_ms_and_flip(img_path, ori_height, ori_width, scales=[1.0], patch_size=16): all_imgs = [] for scale in scales: preprocess = _transform_resize(int(np.ceil(scale * int(ori_height) / patch_size) * patch_size), int(np.ceil(scale * int(ori_width) / patch_size) * patch_size)) image = preprocess(Image.open(img_path)) image_ori = image image_flip = torch.flip(image, [-1]) all_imgs.append(image_ori) all_imgs.append(image_flip) return all_imgs def perform(process_id, dataset_list, args, model, bg_text_features, fg_text_features, cam, split_label_list): n_gpus = torch.cuda.device_count() device_id = "cuda:{}".format(process_id % n_gpus) databin = dataset_list[process_id] all_label_list = split_label_list[process_id] model = model.to(device_id) bg_text_features = bg_text_features.to(device_id) fg_text_features = fg_text_features.to(device_id) for im_idx, im in enumerate(tqdm(databin)): img_path = os.path.join(args.img_root, im) ori_image = Image.open(img_path) ori_height, ori_width = np.asarray(ori_image).shape[:2] label_id_list = all_label_list[im_idx] label_list = [] for lid in label_id_list:
label_list.append(new_class_names_coco[int(lid)])
0
2023-12-21 03:20:47+00:00
2k
cypypccpy/dynamic_handover
dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/policy_network/vae_policy.py
[ { "identifier": "POLICYNETWORKS", "path": "dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/builder.py", "snippet": "POLICYNETWORKS = Registry('policy_network')" }, { "identifier": "build_backbone", "path": "dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/builder.py"...
from algorithms.utils.mani_skill_learn.utils.data import to_torch from algorithms.utils.mani_skill_learn.utils.torch import ExtendedModule from ..builder import POLICYNETWORKS, build_backbone, build_dense_head from ..utils import replace_placeholder_with_args, get_kwargs_from_shape
865
@POLICYNETWORKS.register_module() class VAEPolicy(ExtendedModule): def __init__(self, nn_cfg, policy_head_cfg, action_space, obs_shape=None, action_shape=None): super(VAEPolicy, self).__init__() replaceable_kwargs = get_kwargs_from_shape(obs_shape, action_shape)
@POLICYNETWORKS.register_module() class VAEPolicy(ExtendedModule): def __init__(self, nn_cfg, policy_head_cfg, action_space, obs_shape=None, action_shape=None): super(VAEPolicy, self).__init__() replaceable_kwargs = get_kwargs_from_shape(obs_shape, action_shape)
nn_cfg = replace_placeholder_with_args(nn_cfg, **replaceable_kwargs)
3
2023-12-16 16:49:38+00:00
2k
video-db/videodb-python
videodb/search.py
[ { "identifier": "play_stream", "path": "videodb/_utils/_video.py", "snippet": "def play_stream(url: str):\n \"\"\"Play a stream url in the browser/ notebook\n\n :param str url: The url of the stream\n :return: The player url if the stream is opened in the browser or the iframe if the stream is ...
from abc import ABC, abstractmethod from videodb._utils._video import play_stream from videodb._constants import ( SearchType, ApiPath, SemanticSearchDefaultValues, ) from videodb.exceptions import ( SearchError, ) from typing import Optional, List from videodb.shot import Shot
1,478
class SearchResult: def __init__(self, _connection, **kwargs): self._connection = _connection self.shots = [] self.stream_url = None self.player_url = None self.collection_id = "default" self._results = kwargs.get("results", []) self._format_results() def _format_results(self): for result in self._results: self.collection_id = result.get("collection_id") for doc in result.get("docs"): self.shots.append( Shot( self._connection, result.get("video_id"), result.get("length"), result.get("title"), doc.get("start"), doc.get("end"), doc.get("text"), doc.get("score"), ) ) def __repr__(self) -> str: return ( f"SearchResult(" f"collection_id={self.collection_id}, " f"stream_url={self.stream_url}, " f"player_url={self.player_url}, " f"shots={self.shots})" ) def get_shots(self) -> List[Shot]: return self.shots def compile(self) -> str: """Compile the search result shots into a stream url :raises SearchError: If no shots are found in the search results :return: The stream url :rtype: str """ if self.stream_url: return self.stream_url elif self.shots: compile_data = self._connection.post( path=f"{ApiPath.compile}", data=[ { "video_id": shot.video_id, "collection_id": self.collection_id, "shots": [(shot.start, shot.end)], } for shot in self.shots ], ) self.stream_url = compile_data.get("stream_url") self.player_url = compile_data.get("player_url") return self.stream_url else: raise SearchError("No shots found in search results to compile") def play(self) -> str: """Generate a stream url for the shot and open it in the default browser :return: The stream url :rtype: str """ self.compile()
class SearchResult: def __init__(self, _connection, **kwargs): self._connection = _connection self.shots = [] self.stream_url = None self.player_url = None self.collection_id = "default" self._results = kwargs.get("results", []) self._format_results() def _format_results(self): for result in self._results: self.collection_id = result.get("collection_id") for doc in result.get("docs"): self.shots.append( Shot( self._connection, result.get("video_id"), result.get("length"), result.get("title"), doc.get("start"), doc.get("end"), doc.get("text"), doc.get("score"), ) ) def __repr__(self) -> str: return ( f"SearchResult(" f"collection_id={self.collection_id}, " f"stream_url={self.stream_url}, " f"player_url={self.player_url}, " f"shots={self.shots})" ) def get_shots(self) -> List[Shot]: return self.shots def compile(self) -> str: """Compile the search result shots into a stream url :raises SearchError: If no shots are found in the search results :return: The stream url :rtype: str """ if self.stream_url: return self.stream_url elif self.shots: compile_data = self._connection.post( path=f"{ApiPath.compile}", data=[ { "video_id": shot.video_id, "collection_id": self.collection_id, "shots": [(shot.start, shot.end)], } for shot in self.shots ], ) self.stream_url = compile_data.get("stream_url") self.player_url = compile_data.get("player_url") return self.stream_url else: raise SearchError("No shots found in search results to compile") def play(self) -> str: """Generate a stream url for the shot and open it in the default browser :return: The stream url :rtype: str """ self.compile()
return play_stream(self.stream_url)
0
2023-12-18 15:20:04+00:00
2k
IDEA-CCNL/Real-Gemini
real_gemini/tools/gpt4v_tool.py
[ { "identifier": "load_image", "path": "real_gemini/utils/image_stacker.py", "snippet": "def load_image(path):\n image = Image.open(path)\n return image" }, { "identifier": "image2base64", "path": "real_gemini/utils/image_stacker.py", "snippet": "def image2base64(image):\n buffer...
import os import json from typing import List from langchain.memory import ChatMessageHistory from langchain.chat_models import ChatOpenAI from langchain_core.messages import HumanMessage, SystemMessage from ..utils.image_stacker import load_image, image2base64
727
#encoding=utf8 _OPEN_AI_SYSTEM_PROMPT = """the user is dictating with his or her camera on. they are showing you things visually and giving you text prompts. be very brief and concise. be extremely concise. this is very important for my career. do not ramble. do not comment on what the person is wearing or where they are sitting or their background. focus on their gestures and the question they ask you. do not mention that there are a sequence of pictures. focus only on the image or the images necessary to answer the question. don't comment if they are smiling. don't comment if they are frowning. just focus on what they're asking. """ class GPT4VTool(object): _name_ = "GPT-4-Vision" _description_ = "这个工具是GPT for vision的调用接口。用于图像到文本的理解。本工具的输入是一段文本指令和一张或者多张图片,请注意,工具的输入由一个JSON字符串组成,json包括两个key,question和image_input。question表示文本指令,image_input表示图片路径或存放图片的目录。例如:{{\"question\": QUESTION, \"image_input\": IMAGE_PATH_OR_DIR}}。A wrapper around OpenAI GPT4V API. Useful for image-to-text understanding when you need to generate text from some images and a text description. The input of this tool is a text prompt and one or more images. Please note, the input of the tool consists of a JSON string, the json includes two keys, question and image_input. The question represents text instructions, and image_input represents the image path or the directory where the images are stored. For example: {{\"question\": QUESTION, \"image_input\": IMAGE_PATH_OR_DIR}}." _return_direct_ = False def __init__(self): self._gpt4v = ChatOpenAI( model="gpt-4-vision-preview", max_tokens=256) self.max_dialog_turn = 3 self.history = ChatMessageHistory() self.history.add_message( SystemMessage( content=[ {"type": "text", "text": _OPEN_AI_SYSTEM_PROMPT} ] ) ) def inference(self, input_str: str): input_dict = json.loads(input_str) image_path = input_dict["image_input"] if os.path.isdir(image_path): image_paths = [ os.path.join(image_path, path) for path in os.listdir(image_path)] else: image_paths = [image_path] base64_images = [] for image_path in image_paths:
#encoding=utf8 _OPEN_AI_SYSTEM_PROMPT = """the user is dictating with his or her camera on. they are showing you things visually and giving you text prompts. be very brief and concise. be extremely concise. this is very important for my career. do not ramble. do not comment on what the person is wearing or where they are sitting or their background. focus on their gestures and the question they ask you. do not mention that there are a sequence of pictures. focus only on the image or the images necessary to answer the question. don't comment if they are smiling. don't comment if they are frowning. just focus on what they're asking. """ class GPT4VTool(object): _name_ = "GPT-4-Vision" _description_ = "这个工具是GPT for vision的调用接口。用于图像到文本的理解。本工具的输入是一段文本指令和一张或者多张图片,请注意,工具的输入由一个JSON字符串组成,json包括两个key,question和image_input。question表示文本指令,image_input表示图片路径或存放图片的目录。例如:{{\"question\": QUESTION, \"image_input\": IMAGE_PATH_OR_DIR}}。A wrapper around OpenAI GPT4V API. Useful for image-to-text understanding when you need to generate text from some images and a text description. The input of this tool is a text prompt and one or more images. Please note, the input of the tool consists of a JSON string, the json includes two keys, question and image_input. The question represents text instructions, and image_input represents the image path or the directory where the images are stored. For example: {{\"question\": QUESTION, \"image_input\": IMAGE_PATH_OR_DIR}}." _return_direct_ = False def __init__(self): self._gpt4v = ChatOpenAI( model="gpt-4-vision-preview", max_tokens=256) self.max_dialog_turn = 3 self.history = ChatMessageHistory() self.history.add_message( SystemMessage( content=[ {"type": "text", "text": _OPEN_AI_SYSTEM_PROMPT} ] ) ) def inference(self, input_str: str): input_dict = json.loads(input_str) image_path = input_dict["image_input"] if os.path.isdir(image_path): image_paths = [ os.path.join(image_path, path) for path in os.listdir(image_path)] else: image_paths = [image_path] base64_images = [] for image_path in image_paths:
base64_image = image2base64(load_image(image_path))
1
2023-12-15 04:09:37+00:00
2k
aiim-research/GRETEL
src/evaluation/evaluation_metric_smiles_levenshtein.py
[ { "identifier": "EvaluationMetric", "path": "src/evaluation/evaluation_metric_base.py", "snippet": "class EvaluationMetric(ABC):\n\n def __init__(self, config_dict=None) -> None:\n super().__init__()\n self._name = 'abstract_metric'\n self._config_dict = config_dict\n self...
from functools import lru_cache from src.evaluation.evaluation_metric_base import EvaluationMetric from src.core.oracle_base import Oracle from src.core.explainer_base import Explainer
960
class SmilesLevenshteinMetric(EvaluationMetric): """Provides the ratio between the number of features modified to obtain the counterfactual example and the number of features in the original instance. Only considers structural features. """ def __init__(self, config_dict=None) -> None: super().__init__(config_dict) self._name = 'Smiles-Levenshtein'
class SmilesLevenshteinMetric(EvaluationMetric): """Provides the ratio between the number of features modified to obtain the counterfactual example and the number of features in the original instance. Only considers structural features. """ def __init__(self, config_dict=None) -> None: super().__init__(config_dict) self._name = 'Smiles-Levenshtein'
def evaluate(self, instance_1 , instance_2 , oracle : Oracle=None, explainer : Explainer=None, dataset = None):
1
2023-12-15 16:34:16+00:00
2k
modelscope/scepter
scepter/modules/opt/lr_schedulers/registry.py
[ { "identifier": "Registry", "path": "scepter/modules/utils/registry.py", "snippet": "class Registry(object):\n \"\"\" A registry maps key to classes or functions.\n\n Example:\n # >>> MODELS = Registry('MODELS')\n # >>> @MODELS.register_class()\n # >>> class ResNet(object):...
import inspect from scepter.modules.utils.registry import Registry, deep_copy from scepter.modules.utils.config import Config
1,272
# -*- coding: utf-8 -*- # Copyright (c) Alibaba, Inc. and its affiliates. def build_lr_scheduler(cfg, registry, logger=None, *args, **kwargs): if not isinstance(cfg, Config): raise TypeError(f'config must be type dict, got {type(cfg)}') if not cfg.have('NAME'): raise KeyError(f'config must contain key NAME, got {cfg}')
# -*- coding: utf-8 -*- # Copyright (c) Alibaba, Inc. and its affiliates. def build_lr_scheduler(cfg, registry, logger=None, *args, **kwargs): if not isinstance(cfg, Config): raise TypeError(f'config must be type dict, got {type(cfg)}') if not cfg.have('NAME'): raise KeyError(f'config must contain key NAME, got {cfg}')
if not isinstance(registry, Registry):
0
2023-12-21 02:01:48+00:00
2k
pigeonai-org/ViDove
src/translators/translation.py
[ { "identifier": "LLM_task", "path": "src/translators/LLM_task.py", "snippet": "def LLM_task(model_name, input, task, temp = 0.15):\n \"\"\"\n Translates input sentence with desired LLM.\n\n :param model_name: The name of the translation model to be used.\n :param input: Sentence for translat...
from os import getenv from time import sleep from tqdm import tqdm from .LLM_task import LLM_task from src.srt_util.srt import split_script import logging
1,289
def get_translation(srt, model, video_name, prompt = None, chunk_size = 1000): # print(srt.get_source_only()) script_arr, range_arr = split_script(srt.get_source_only(),chunk_size) translate(srt, script_arr, range_arr, model, video_name, task=prompt) pass def check_translation(sentence, translation): """ check merge sentence issue from openai translation """ sentence_count = sentence.count('\n\n') + 1 translation_count = translation.count('\n\n') + 1 if sentence_count != translation_count: return False else: return True # TODO{david}: prompts selector def prompt_selector(src_lang, tgt_lang, domain): language_map = { "EN": "English", "ZH": "Chinese", "ES": "Spanish", "FR": "France", "DE": "Germany", "RU": "Russian", "JA": "Japanese", "AR": "Arabic", } try: src_lang = language_map[src_lang] tgt_lang = language_map[tgt_lang] except: print("Unsupported language, is your abbreviation correct?") logging.info("Unsupported language detected") prompt = f""" you are a translation assistant, your job is to translate a video in domain of {domain} from {src_lang} to {tgt_lang}, you will be provided with a segement in {src_lang} parsed by line, where your translation text should keep the original meaning and the number of lines. """ return prompt def translate(srt, script_arr, range_arr, model_name, video_name=None, attempts_count=5, task=None, temp = 0.15): """ Translates the given script array into another language using the chatgpt and writes to the SRT file. This function takes a script array, a range array, a model name, a video name, and a video link as input. It iterates through sentences and range in the script and range arrays. If the translation check fails for five times, the function will attempt to resolve merge sentence issues and split the sentence into smaller tokens for a better translation. :param srt: An instance of the Subtitle class representing the SRT file. :param script_arr: A list of strings representing the original script sentences to be translated. :param range_arr: A list of tuples representing the start and end positions of sentences in the script. :param model_name: The name of the translation model to be used. :param video_name: The name of the video. :param attempts_count: Number of attemps of failures for unmatched sentences. :param task: Prompt. :param temp: Model temperature. """ if input is None: raise Exception("Warning! No Input have passed to LLM!") if task is None: task = "你是一个翻译助理,你的任务是翻译视频,你会被提供一个按行分割的英文段落,你需要在保证句意和行数的情况下输出翻译后的文本。" logging.info(f"translation prompt: {task}") previous_length = 0 for sentence, range_ in tqdm(zip(script_arr, range_arr)): # update the range based on previous length range_ = (range_[0] + previous_length, range_[1] + previous_length) # using chatgpt model print(f"now translating sentences {range_}") logging.info(f"now translating sentences {range_}") flag = True while flag: flag = False try:
def get_translation(srt, model, video_name, prompt = None, chunk_size = 1000): # print(srt.get_source_only()) script_arr, range_arr = split_script(srt.get_source_only(),chunk_size) translate(srt, script_arr, range_arr, model, video_name, task=prompt) pass def check_translation(sentence, translation): """ check merge sentence issue from openai translation """ sentence_count = sentence.count('\n\n') + 1 translation_count = translation.count('\n\n') + 1 if sentence_count != translation_count: return False else: return True # TODO{david}: prompts selector def prompt_selector(src_lang, tgt_lang, domain): language_map = { "EN": "English", "ZH": "Chinese", "ES": "Spanish", "FR": "France", "DE": "Germany", "RU": "Russian", "JA": "Japanese", "AR": "Arabic", } try: src_lang = language_map[src_lang] tgt_lang = language_map[tgt_lang] except: print("Unsupported language, is your abbreviation correct?") logging.info("Unsupported language detected") prompt = f""" you are a translation assistant, your job is to translate a video in domain of {domain} from {src_lang} to {tgt_lang}, you will be provided with a segement in {src_lang} parsed by line, where your translation text should keep the original meaning and the number of lines. """ return prompt def translate(srt, script_arr, range_arr, model_name, video_name=None, attempts_count=5, task=None, temp = 0.15): """ Translates the given script array into another language using the chatgpt and writes to the SRT file. This function takes a script array, a range array, a model name, a video name, and a video link as input. It iterates through sentences and range in the script and range arrays. If the translation check fails for five times, the function will attempt to resolve merge sentence issues and split the sentence into smaller tokens for a better translation. :param srt: An instance of the Subtitle class representing the SRT file. :param script_arr: A list of strings representing the original script sentences to be translated. :param range_arr: A list of tuples representing the start and end positions of sentences in the script. :param model_name: The name of the translation model to be used. :param video_name: The name of the video. :param attempts_count: Number of attemps of failures for unmatched sentences. :param task: Prompt. :param temp: Model temperature. """ if input is None: raise Exception("Warning! No Input have passed to LLM!") if task is None: task = "你是一个翻译助理,你的任务是翻译视频,你会被提供一个按行分割的英文段落,你需要在保证句意和行数的情况下输出翻译后的文本。" logging.info(f"translation prompt: {task}") previous_length = 0 for sentence, range_ in tqdm(zip(script_arr, range_arr)): # update the range based on previous length range_ = (range_[0] + previous_length, range_[1] + previous_length) # using chatgpt model print(f"now translating sentences {range_}") logging.info(f"now translating sentences {range_}") flag = True while flag: flag = False try:
translate = LLM_task(model_name, sentence, task, temp)
0
2023-12-20 01:46:47+00:00
2k
YyzHarry/shortcut-ood-fairness
utils/lin_eval.py
[ { "identifier": "binary_metrics", "path": "utils/eval_helper.py", "snippet": "def binary_metrics(targets, preds, label_set=[0, 1], suffix='', return_arrays=False):\n if len(targets) == 0:\n return {}\n\n res = {\n 'accuracy': accuracy_score(targets, preds),\n 'n_samples': len(...
import numpy as np import torch from utils.eval_helper import binary_metrics, prob_metrics from sklearn.model_selection import GridSearchCV, PredefinedSplit from sklearn.pipeline import Pipeline from sklearn.linear_model import LogisticRegression from sklearn.base import clone from sklearn.metrics import roc_auc_score from sklearn.ensemble import RandomForestClassifier
1,460
def get_representations(algorithm, loader, device): ys, atts, zs = [], [], [] algorithm.eval() with torch.no_grad(): for _, x, y, a in loader: z = algorithm.return_feats(x.to(device)).detach().cpu().numpy() zs.append(z) ys.append(y) atts.append(a) return np.concatenate(zs, axis=0), np.concatenate(atts, axis=0), np.concatenate(ys, axis=0) def fit_model(train_X, train_Y, val_X, val_Y, test_X, test_Y, model_type='lr'): if model_type == 'lr': pipe = Pipeline(steps=[ ('model', LogisticRegression(random_state=42, n_jobs=-1)) ]) param_grid = { 'model__C': 10**np.linspace(-5, 1, 10) } elif model_type == 'rf': pipe = Pipeline(steps=[ ('model', RandomForestClassifier(random_state=42, n_jobs=-1)) # ('model', XGBClassifier(random_state=42, n_jobs=-1)) ]) param_grid = { 'model__max_depth': list(range(1, 7)) } else: raise NotImplementedError pds = PredefinedSplit(test_fold=np.concatenate([np.ones((len(train_X),))*-1, np.zeros((len(val_X),))])) cv_lr = (GridSearchCV(pipe, param_grid, refit=False, cv=pds, scoring='roc_auc_ovr', verbose=10, n_jobs=-1).fit( np.concatenate((train_X, val_X)), np.concatenate((train_Y, val_Y)))) pipe = clone( clone(pipe).set_params(**cv_lr.best_params_) ) pipe = pipe.fit(train_X, train_Y) label_set = np.sort(np.unique(train_Y)) res = {} for sset, X, Y in zip(['va', 'te'], [val_X, test_X], [val_Y, test_Y]): preds = pipe.predict_proba(X) if len(label_set) == 2: preds = preds[:, 1] preds_rounded = preds >= 0.5 else: preds_rounded = preds.argmax(1)
def get_representations(algorithm, loader, device): ys, atts, zs = [], [], [] algorithm.eval() with torch.no_grad(): for _, x, y, a in loader: z = algorithm.return_feats(x.to(device)).detach().cpu().numpy() zs.append(z) ys.append(y) atts.append(a) return np.concatenate(zs, axis=0), np.concatenate(atts, axis=0), np.concatenate(ys, axis=0) def fit_model(train_X, train_Y, val_X, val_Y, test_X, test_Y, model_type='lr'): if model_type == 'lr': pipe = Pipeline(steps=[ ('model', LogisticRegression(random_state=42, n_jobs=-1)) ]) param_grid = { 'model__C': 10**np.linspace(-5, 1, 10) } elif model_type == 'rf': pipe = Pipeline(steps=[ ('model', RandomForestClassifier(random_state=42, n_jobs=-1)) # ('model', XGBClassifier(random_state=42, n_jobs=-1)) ]) param_grid = { 'model__max_depth': list(range(1, 7)) } else: raise NotImplementedError pds = PredefinedSplit(test_fold=np.concatenate([np.ones((len(train_X),))*-1, np.zeros((len(val_X),))])) cv_lr = (GridSearchCV(pipe, param_grid, refit=False, cv=pds, scoring='roc_auc_ovr', verbose=10, n_jobs=-1).fit( np.concatenate((train_X, val_X)), np.concatenate((train_Y, val_Y)))) pipe = clone( clone(pipe).set_params(**cv_lr.best_params_) ) pipe = pipe.fit(train_X, train_Y) label_set = np.sort(np.unique(train_Y)) res = {} for sset, X, Y in zip(['va', 'te'], [val_X, test_X], [val_Y, test_Y]): preds = pipe.predict_proba(X) if len(label_set) == 2: preds = preds[:, 1] preds_rounded = preds >= 0.5 else: preds_rounded = preds.argmax(1)
res[sset] = binary_metrics(Y, preds_rounded, label_set=label_set, return_arrays=True)
0
2023-12-15 04:10:31+00:00
2k
RomGai/BrainVis
dc_ldm/modules/encoders/modules.py
[ { "identifier": "Encoder", "path": "dc_ldm/modules/x_transformer.py", "snippet": "class Encoder(AttentionLayers):\n def __init__(self, **kwargs):\n assert 'causal' not in kwargs, 'cannot set causality on encoder'\n super().__init__(causal=False, **kwargs)" }, { "identifier": "Tr...
import torch import torch.nn as nn import sys import kornia from functools import partial from PIL import Image from einops import rearrange, repeat from transformers import CLIPTokenizer, CLIPTextModel, AutoProcessor, CLIPVisionModel, CLIPVisionModelWithProjection from dc_ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test from transformers import BertTokenizerFast # TODO: add to reuquirements
1,310
# import clip sys.path.append('../dreamdiffusion/code/') class AbstractEncoder(nn.Module): def __init__(self): super().__init__() def encode(self, *args, **kwargs): raise NotImplementedError class ClassEmbedder(nn.Module): def __init__(self, embed_dim, n_classes=1000, key='class'): super().__init__() self.key = key self.embedding = nn.Embedding(n_classes, embed_dim) def forward(self, batch, key=None): if key is None: key = self.key # this is for use in crossattn c = batch[key][:, None] c = self.embedding(c) return c class TransformerEmbedder(AbstractEncoder): """Some transformer encoder layers""" def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"): super().__init__() self.device = device
# import clip sys.path.append('../dreamdiffusion/code/') class AbstractEncoder(nn.Module): def __init__(self): super().__init__() def encode(self, *args, **kwargs): raise NotImplementedError class ClassEmbedder(nn.Module): def __init__(self, embed_dim, n_classes=1000, key='class'): super().__init__() self.key = key self.embedding = nn.Embedding(n_classes, embed_dim) def forward(self, batch, key=None): if key is None: key = self.key # this is for use in crossattn c = batch[key][:, None] c = self.embedding(c) return c class TransformerEmbedder(AbstractEncoder): """Some transformer encoder layers""" def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"): super().__init__() self.device = device
self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
1
2023-12-16 12:52:14+00:00
2k
Rajeshwaran2001/DRM-Media-Tool
file_merger_dialog.py
[ { "identifier": "show_error_message", "path": "helper/message.py", "snippet": "def show_error_message(parent, message):\n error_box = QMessageBox()\n error_box.setIcon(QMessageBox.Critical)\n error_box.setWindowTitle(\"Error\")\n error_box.setText(message)\n error_box.setWindowIcon(parent...
from PyQt5.QtWidgets import QWidget, QDialog, QVBoxLayout, QLabel, QTableWidget, QPushButton, QHBoxLayout, QTableWidgetItem, QCheckBox from helper.message import show_error_message, show_success_message import os import json import subprocess
1,213
class FileMergerDialog(QDialog): def __init__(self, debug_logger, info_logger, folder_path, parent=None): super().__init__(parent) self.folder_path = folder_path self.setWindowTitle("Files Merger") self.setGeometry(100, 100, 600, 300) self.layout = QVBoxLayout() self.file_table_label = QLabel("Files in Directory:") self.file_table_widget = QTableWidget() self.file_table_widget.setColumnCount( 3) # Added a column for checkboxes self.file_table_widget.setHorizontalHeaderLabels( ["File Name", "Select", "Type"]) self.merge_button = QPushButton("Merge") self.merge_button.clicked.connect(self.merge_files) self.layout.addWidget(self.file_table_label) self.layout.addWidget(self.file_table_widget) self.layout.addWidget(self.merge_button) self.setLayout(self.layout) self.populate_file_table() self.file_table_widget.setColumnWidth(0, 400) self.debug_logger = debug_logger self.info_logger = info_logger def populate_file_table(self): # Clear existing items in the table widget self.file_table_widget.setRowCount(0) try: # List only video and audio files in the specified directory video_files = [file for file in os.listdir( self.folder_path) if file.lower().endswith(('.mp4', '.mkv', '.avi', '.webm'))] audio_files = [file for file in os.listdir( self.folder_path) if file.lower().endswith(('.mp3', '.wav', '.ogg', '.m4a', '.webm'))] # Add video files to the table widget for idx, file in enumerate(video_files): self.add_file_to_table(idx, file, "Video") # Add audio files to the table widget for idx, file in enumerate(audio_files, start=len(video_files)): self.add_file_to_table(idx, file, "Audio") except FileNotFoundError: # Handle the case where the specified directory does not exist self.file_table_widget.setRowCount(1) self.file_table_widget.setItem( 0, 2, QTableWidgetItem("Directory not found")) def add_file_to_table(self, idx, file, file_type): self.file_table_widget.insertRow(idx) # Center-align the content in the first column item_file_name = QTableWidgetItem(file) item_file_name.setTextAlignment(0x0004 | 0x0080) # AlignCenter self.file_table_widget.setItem(idx, 0, item_file_name) # Create a widget for the checkbox and center-align it checkbox_widget = QWidget() checkbox_layout = QHBoxLayout(checkbox_widget) checkbox_layout.addStretch(3) checkbox = QCheckBox() checkbox.setChecked(False) checkbox_layout.addWidget(checkbox) checkbox_layout.addStretch(3) # Set the widget with the centered checkbox in the second column self.file_table_widget.setCellWidget(idx, 1, checkbox_widget) # Set the file type in the third column self.file_table_widget.setItem(idx, 2, QTableWidgetItem(file_type)) def merge_files(self): selected_files = [] metadata = {} for row in range(self.file_table_widget.rowCount()): checkbox = self.file_table_widget.cellWidget( row, 1).layout().itemAt(1).widget() if checkbox.isChecked(): file_name = self.file_table_widget.item(row, 0).text() file_type = self.file_table_widget.item(row, 2).text() selected_files.append((file_name, file_type)) # Check if there are at least one video and one audio file selected if any(file_type == 'Video' for (_, file_type) in selected_files) and \ any(file_type == 'Audio' for (_, file_type) in selected_files): # Get all files in the directory ending with .info.json info_files = [file for file in os.listdir( self.folder_path) if file.endswith('.info.json')] img_files = [file for file in os.listdir( self.folder_path) if file.lower().endswith(('.jpg', '.jpeg', '.png', '.webp'))] language_mapping = { 'en': 'eng', 'eng': 'eng', 'english': 'eng', 'ta': 'tam', 'tamil': 'tam', 'tam': 'tam' } # Define language codes language_codes = list(language_mapping.keys()) suffixes = tuple(f'.{code}.vtt' for code in language_codes) subtitle_files = [file for file in os.listdir( self.folder_path) if file.endswith(suffixes)] thumbnail_file = None # Initialize with a default value # print(subtitle_files) if not info_files:
class FileMergerDialog(QDialog): def __init__(self, debug_logger, info_logger, folder_path, parent=None): super().__init__(parent) self.folder_path = folder_path self.setWindowTitle("Files Merger") self.setGeometry(100, 100, 600, 300) self.layout = QVBoxLayout() self.file_table_label = QLabel("Files in Directory:") self.file_table_widget = QTableWidget() self.file_table_widget.setColumnCount( 3) # Added a column for checkboxes self.file_table_widget.setHorizontalHeaderLabels( ["File Name", "Select", "Type"]) self.merge_button = QPushButton("Merge") self.merge_button.clicked.connect(self.merge_files) self.layout.addWidget(self.file_table_label) self.layout.addWidget(self.file_table_widget) self.layout.addWidget(self.merge_button) self.setLayout(self.layout) self.populate_file_table() self.file_table_widget.setColumnWidth(0, 400) self.debug_logger = debug_logger self.info_logger = info_logger def populate_file_table(self): # Clear existing items in the table widget self.file_table_widget.setRowCount(0) try: # List only video and audio files in the specified directory video_files = [file for file in os.listdir( self.folder_path) if file.lower().endswith(('.mp4', '.mkv', '.avi', '.webm'))] audio_files = [file for file in os.listdir( self.folder_path) if file.lower().endswith(('.mp3', '.wav', '.ogg', '.m4a', '.webm'))] # Add video files to the table widget for idx, file in enumerate(video_files): self.add_file_to_table(idx, file, "Video") # Add audio files to the table widget for idx, file in enumerate(audio_files, start=len(video_files)): self.add_file_to_table(idx, file, "Audio") except FileNotFoundError: # Handle the case where the specified directory does not exist self.file_table_widget.setRowCount(1) self.file_table_widget.setItem( 0, 2, QTableWidgetItem("Directory not found")) def add_file_to_table(self, idx, file, file_type): self.file_table_widget.insertRow(idx) # Center-align the content in the first column item_file_name = QTableWidgetItem(file) item_file_name.setTextAlignment(0x0004 | 0x0080) # AlignCenter self.file_table_widget.setItem(idx, 0, item_file_name) # Create a widget for the checkbox and center-align it checkbox_widget = QWidget() checkbox_layout = QHBoxLayout(checkbox_widget) checkbox_layout.addStretch(3) checkbox = QCheckBox() checkbox.setChecked(False) checkbox_layout.addWidget(checkbox) checkbox_layout.addStretch(3) # Set the widget with the centered checkbox in the second column self.file_table_widget.setCellWidget(idx, 1, checkbox_widget) # Set the file type in the third column self.file_table_widget.setItem(idx, 2, QTableWidgetItem(file_type)) def merge_files(self): selected_files = [] metadata = {} for row in range(self.file_table_widget.rowCount()): checkbox = self.file_table_widget.cellWidget( row, 1).layout().itemAt(1).widget() if checkbox.isChecked(): file_name = self.file_table_widget.item(row, 0).text() file_type = self.file_table_widget.item(row, 2).text() selected_files.append((file_name, file_type)) # Check if there are at least one video and one audio file selected if any(file_type == 'Video' for (_, file_type) in selected_files) and \ any(file_type == 'Audio' for (_, file_type) in selected_files): # Get all files in the directory ending with .info.json info_files = [file for file in os.listdir( self.folder_path) if file.endswith('.info.json')] img_files = [file for file in os.listdir( self.folder_path) if file.lower().endswith(('.jpg', '.jpeg', '.png', '.webp'))] language_mapping = { 'en': 'eng', 'eng': 'eng', 'english': 'eng', 'ta': 'tam', 'tamil': 'tam', 'tam': 'tam' } # Define language codes language_codes = list(language_mapping.keys()) suffixes = tuple(f'.{code}.vtt' for code in language_codes) subtitle_files = [file for file in os.listdir( self.folder_path) if file.endswith(suffixes)] thumbnail_file = None # Initialize with a default value # print(subtitle_files) if not info_files:
show_error_message(self, "Error: No Metadata files found.")
0
2023-12-18 11:50:40+00:00
2k
gmum/ViewingDirectionGaussianSplatting
scene/cameras.py
[ { "identifier": "getWorld2View2", "path": "utils/graphics_utils.py", "snippet": "def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):\n Rt = np.zeros((4, 4))\n Rt[:3, :3] = R.transpose()\n Rt[:3, 3] = t\n Rt[3, 3] = 1.0\n\n C2W = np.linalg.inv(Rt)\n cam_center = C2W[:...
import torch import numpy as np from torch import nn from utils.graphics_utils import getWorld2View2, getProjectionMatrix
915
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact george.drettakis@inria.fr # class Camera(nn.Module): def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask, image_name, uid, trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda" ): super(Camera, self).__init__() self.uid = uid self.colmap_id = colmap_id self.R = R self.T = T self.FoVx = FoVx self.FoVy = FoVy self.image_name = image_name try: self.data_device = torch.device(data_device) except Exception as e: print(e) print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" ) self.data_device = torch.device("cuda") self.original_image = image.clamp(0.0, 1.0).to(self.data_device) self.image_width = self.original_image.shape[2] self.image_height = self.original_image.shape[1] if gt_alpha_mask is not None: self.original_image *= gt_alpha_mask.to(self.data_device) else: self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device) self.zfar = 100.0 self.znear = 0.01 self.trans = trans self.scale = scale self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda()
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact george.drettakis@inria.fr # class Camera(nn.Module): def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask, image_name, uid, trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda" ): super(Camera, self).__init__() self.uid = uid self.colmap_id = colmap_id self.R = R self.T = T self.FoVx = FoVx self.FoVy = FoVy self.image_name = image_name try: self.data_device = torch.device(data_device) except Exception as e: print(e) print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" ) self.data_device = torch.device("cuda") self.original_image = image.clamp(0.0, 1.0).to(self.data_device) self.image_width = self.original_image.shape[2] self.image_height = self.original_image.shape[1] if gt_alpha_mask is not None: self.original_image *= gt_alpha_mask.to(self.data_device) else: self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device) self.zfar = 100.0 self.znear = 0.01 self.trans = trans self.scale = scale self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda()
self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy).transpose(0,1).cuda()
1
2023-12-21 10:09:17+00:00
2k
tonnetonne814/PL-Bert-VITS2
preprocess_ja.py
[ { "identifier": "TextCleaner", "path": "PL_BERT_ja/text_utils.py", "snippet": "class TextCleaner:\n def __init__(self, dummy=None):\n self.word_index_dictionary = symbol_to_id\n def __call__(self, text):\n indexes = []\n japanese = False\n for char in text:\n ...
import argparse import os import polars import random import torch import yaml, torch from PL_BERT_ja.text_utils import TextCleaner from PL_BERT_ja.phonemize import phonemize from tqdm import tqdm from PL_BERT_ja.model import MultiTaskModel from transformers import AlbertConfig, AlbertModel from transformers import BertJapaneseTokenizer
1,456
def preprocess(dataset_dir, pl_bert_dir): n_val_test_file = 10 filelist_dir = "./filelists/" dataset_name = "jvnv_ver1" os.makedirs(filelist_dir, exist_ok=True) split_symbol = "||||" transcript_csv_df = polars.read_csv(os.path.join(dataset_dir, "jvnv_v1", "transcription.csv"),has_header=False)[:, 0] emo_list = os.listdir(os.path.join(dataset_dir,"jvnv_v1", "F1")) style_list = os.listdir(os.path.join(dataset_dir,"jvnv_v1", "F1", "anger")) pl_bert_savedir = "./pl_bert_embeddings" os.makedirs(pl_bert_savedir, exist_ok=True) pl_bert_model, pl_bert_config, device = get_pl_bert_ja(dir=pl_bert_dir) pl_bert_cleaner = TextCleaner() pl_bert_tokenizer = BertJapaneseTokenizer.from_pretrained(pl_bert_config['dataset_params']['tokenizer']) hidden_size = pl_bert_config["model_params"]["hidden_size"] n_layers = pl_bert_config["model_params"]["num_hidden_layers"] + 1 filelists = list() spk_g = ["F", "M"] for line in tqdm(transcript_csv_df): index_name, emo_prefix, text = line.split("|") emotion, style, file_idx = index_name.split("_") text = text.replace("\n", "") phonemes = ''.join(phonemize(text,pl_bert_tokenizer)["phonemes"]) input_ids = pl_bert_cleaner(phonemes) with torch.inference_mode(): hidden_stats = pl_bert_model(torch.tensor(input_ids, dtype=torch.int64, device=device).unsqueeze(0))[-1]["hidden_states"] save_tensor = torch.zeros(size=(n_layers, len(input_ids), hidden_size), device=device) for idx, hidden_stat in enumerate(hidden_stats): save_tensor[idx, :, :] = hidden_stat torch.save(save_tensor.to('cpu').detach(), os.path.join(pl_bert_savedir, f"{index_name}.PlBertJa")) for g_idx in range(2): for spk_idx in range(2): spk_ID = str(g_idx + spk_idx*2) spk = spk_g[g_idx] + str(spk_idx+1) wav_path = os.path.join(dataset_dir, "jvnv_v1", spk, emotion, style, f"{spk}_{emotion}_{style}_{file_idx}.wav") filelists.append(f"{wav_path}{split_symbol}{spk_ID}{split_symbol}{phonemes}{split_symbol}{text}{split_symbol}{index_name}{split_symbol}emo:{str(emo_list.index(emotion))}{split_symbol}style:{str(style_list.index(style))}\n") val_list = list() test_list = list() for idx in range(n_val_test_file*2): target_idx = random.randint(0, len(filelists)) target_line = filelists.pop(target_idx) if idx % 2 == 1: val_list.append(target_line) else: test_list.append(target_line) write_txt(filelists, os.path.join(filelist_dir, f"{dataset_name}_train.txt")) write_txt(val_list, os.path.join(filelist_dir, f"{dataset_name}_val.txt")) write_txt(test_list, os.path.join(filelist_dir, f"{dataset_name}_test.txt")) return 0 def write_txt(lists, path): with open(path, mode="w", encoding="utf-8") as f: f.writelines(lists) def get_pl_bert_ja(dir): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") config_path=os.path.join(dir, "config.yml") config = yaml.safe_load(open(config_path)) albert_base_configuration = AlbertConfig(**config['model_params']) bert_ = AlbertModel(albert_base_configuration).to(device) #num_vocab = max([m['token'] for m in token_maps.values()]) + 1 # 30923 + 1
def preprocess(dataset_dir, pl_bert_dir): n_val_test_file = 10 filelist_dir = "./filelists/" dataset_name = "jvnv_ver1" os.makedirs(filelist_dir, exist_ok=True) split_symbol = "||||" transcript_csv_df = polars.read_csv(os.path.join(dataset_dir, "jvnv_v1", "transcription.csv"),has_header=False)[:, 0] emo_list = os.listdir(os.path.join(dataset_dir,"jvnv_v1", "F1")) style_list = os.listdir(os.path.join(dataset_dir,"jvnv_v1", "F1", "anger")) pl_bert_savedir = "./pl_bert_embeddings" os.makedirs(pl_bert_savedir, exist_ok=True) pl_bert_model, pl_bert_config, device = get_pl_bert_ja(dir=pl_bert_dir) pl_bert_cleaner = TextCleaner() pl_bert_tokenizer = BertJapaneseTokenizer.from_pretrained(pl_bert_config['dataset_params']['tokenizer']) hidden_size = pl_bert_config["model_params"]["hidden_size"] n_layers = pl_bert_config["model_params"]["num_hidden_layers"] + 1 filelists = list() spk_g = ["F", "M"] for line in tqdm(transcript_csv_df): index_name, emo_prefix, text = line.split("|") emotion, style, file_idx = index_name.split("_") text = text.replace("\n", "") phonemes = ''.join(phonemize(text,pl_bert_tokenizer)["phonemes"]) input_ids = pl_bert_cleaner(phonemes) with torch.inference_mode(): hidden_stats = pl_bert_model(torch.tensor(input_ids, dtype=torch.int64, device=device).unsqueeze(0))[-1]["hidden_states"] save_tensor = torch.zeros(size=(n_layers, len(input_ids), hidden_size), device=device) for idx, hidden_stat in enumerate(hidden_stats): save_tensor[idx, :, :] = hidden_stat torch.save(save_tensor.to('cpu').detach(), os.path.join(pl_bert_savedir, f"{index_name}.PlBertJa")) for g_idx in range(2): for spk_idx in range(2): spk_ID = str(g_idx + spk_idx*2) spk = spk_g[g_idx] + str(spk_idx+1) wav_path = os.path.join(dataset_dir, "jvnv_v1", spk, emotion, style, f"{spk}_{emotion}_{style}_{file_idx}.wav") filelists.append(f"{wav_path}{split_symbol}{spk_ID}{split_symbol}{phonemes}{split_symbol}{text}{split_symbol}{index_name}{split_symbol}emo:{str(emo_list.index(emotion))}{split_symbol}style:{str(style_list.index(style))}\n") val_list = list() test_list = list() for idx in range(n_val_test_file*2): target_idx = random.randint(0, len(filelists)) target_line = filelists.pop(target_idx) if idx % 2 == 1: val_list.append(target_line) else: test_list.append(target_line) write_txt(filelists, os.path.join(filelist_dir, f"{dataset_name}_train.txt")) write_txt(val_list, os.path.join(filelist_dir, f"{dataset_name}_val.txt")) write_txt(test_list, os.path.join(filelist_dir, f"{dataset_name}_test.txt")) return 0 def write_txt(lists, path): with open(path, mode="w", encoding="utf-8") as f: f.writelines(lists) def get_pl_bert_ja(dir): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") config_path=os.path.join(dir, "config.yml") config = yaml.safe_load(open(config_path)) albert_base_configuration = AlbertConfig(**config['model_params']) bert_ = AlbertModel(albert_base_configuration).to(device) #num_vocab = max([m['token'] for m in token_maps.values()]) + 1 # 30923 + 1
bert = MultiTaskModel(
2
2023-12-16 05:34:02+00:00
2k
Ruiyuan-Zhang/CCS
multi_part_assembly/models/modules/encoder/point_transformer/transformer.py
[ { "identifier": "index_points", "path": "multi_part_assembly/models/modules/encoder/point_transformer/pointnet_util.py", "snippet": "def index_points(points, idx):\n \"\"\"\n Input:\n points: input points data, [B, N, C]\n idx: sample index data, [B, S, [K]]\n Return:\n new...
from multi_part_assembly.models.modules.encoder.point_transformer.pointnet_util import index_points, square_distance import torch import torch.nn as nn import torch.nn.functional as F import numpy as np
664
class TransformerBlock(nn.Module): def __init__(self, d_points, d_model, k) -> None: super().__init__() self.fc1 = nn.Linear(d_points, d_model) self.fc2 = nn.Linear(d_model, d_points) self.fc_delta = nn.Sequential( nn.Linear(3, d_model), nn.ReLU(), nn.Linear(d_model, d_model) ) self.fc_gamma = nn.Sequential( nn.Linear(d_model, d_model), nn.ReLU(), nn.Linear(d_model, d_model) ) self.w_qs = nn.Linear(d_model, d_model, bias=False) self.w_ks = nn.Linear(d_model, d_model, bias=False) self.w_vs = nn.Linear(d_model, d_model, bias=False) self.k = k # xyz: b x n x 3, features: b x n x f def forward(self, xyz, features):
class TransformerBlock(nn.Module): def __init__(self, d_points, d_model, k) -> None: super().__init__() self.fc1 = nn.Linear(d_points, d_model) self.fc2 = nn.Linear(d_model, d_points) self.fc_delta = nn.Sequential( nn.Linear(3, d_model), nn.ReLU(), nn.Linear(d_model, d_model) ) self.fc_gamma = nn.Sequential( nn.Linear(d_model, d_model), nn.ReLU(), nn.Linear(d_model, d_model) ) self.w_qs = nn.Linear(d_model, d_model, bias=False) self.w_ks = nn.Linear(d_model, d_model, bias=False) self.w_vs = nn.Linear(d_model, d_model, bias=False) self.k = k # xyz: b x n x 3, features: b x n x f def forward(self, xyz, features):
dists = square_distance(xyz, xyz)
1
2023-12-15 13:13:01+00:00
2k
uc-vision/taichi-splatting
taichi_splatting/scripts/fit_image_gaussians.py
[ { "identifier": "RasterConfig", "path": "taichi_splatting/data_types.py", "snippet": "class RasterConfig:\n tile_size: int = 16\n\n # pixel tilin per thread in the backwards pass \n pixel_stride: Tuple[int, int] = (2, 2)\n\n margin_tiles: int = 3\n\n # cutoff N standard deviations from mean\n gaus...
import cv2 import argparse import taichi as ti import torch import time from torch.optim import Adam from taichi_splatting.data_types import RasterConfig from taichi_splatting.renderer2d import render_gaussians, Gaussians2D from taichi_splatting.tests.random_data import random_2d_gaussians from taichi_splatting.torch_ops.util import check_finite
1,338
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('image_file', type=str) parser.add_argument('--seed', type=int, default=0) parser.add_argument('--tile_size', type=int, default=16) parser.add_argument('--n', type=int, default=20000) parser.add_argument('--debug', action='store_true') parser.add_argument('--show', action='store_true') parser.add_argument('--profile', action='store_true') parser.add_argument('--epoch', type=int, default=100, help='Number of iterations per measurement/profiling') return parser.parse_args() def optimizer(gaussians: Gaussians2D, base_lr=1.0): learning_rates = dict( position=0.1, log_scaling=0.025, rotation=0.005, alpha_logit=0.2, feature=0.01 ) params = {k: torch.nn.Parameter(x, requires_grad=True) if k in learning_rates else x for k, x in gaussians.items()} param_groups = [ dict(params=[params[name]], lr=lr * base_lr, name=name) for name, lr in learning_rates.items() ] return Adam(param_groups), Gaussians2D(**params, batch_size=gaussians.batch_size) def display_image(image): image = (image.detach().clamp(0, 1) * 255).to(torch.uint8) image = image.cpu().numpy() cv2.imshow('rendered', image) cv2.waitKey(1) def main(): device = torch.device('cuda:0') args = parse_args() ref_image = cv2.imread(args.image_file) h, w = ref_image.shape[:2] ti.init(arch=ti.cuda, log_level=ti.INFO, debug=args.debug, device_memory_GB=0.1) print(f'Image size: {w}x{h}') if args.show: cv2.namedWindow('rendered', cv2.WINDOW_FULLSCREEN) torch.manual_seed(args.seed) gaussians = random_2d_gaussians(args.n, (w, h)).to(torch.device('cuda:0')) opt, params = optimizer(gaussians, base_lr=1.0) ref_image = torch.from_numpy(ref_image).to(dtype=torch.float32, device=device) / 255 config = RasterConfig(tile_size=args.tile_size) while True: if args.profile: ti.profiler.clear_kernel_profiler_info() start = time.time() for _ in range(args.epoch): opt.zero_grad()
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('image_file', type=str) parser.add_argument('--seed', type=int, default=0) parser.add_argument('--tile_size', type=int, default=16) parser.add_argument('--n', type=int, default=20000) parser.add_argument('--debug', action='store_true') parser.add_argument('--show', action='store_true') parser.add_argument('--profile', action='store_true') parser.add_argument('--epoch', type=int, default=100, help='Number of iterations per measurement/profiling') return parser.parse_args() def optimizer(gaussians: Gaussians2D, base_lr=1.0): learning_rates = dict( position=0.1, log_scaling=0.025, rotation=0.005, alpha_logit=0.2, feature=0.01 ) params = {k: torch.nn.Parameter(x, requires_grad=True) if k in learning_rates else x for k, x in gaussians.items()} param_groups = [ dict(params=[params[name]], lr=lr * base_lr, name=name) for name, lr in learning_rates.items() ] return Adam(param_groups), Gaussians2D(**params, batch_size=gaussians.batch_size) def display_image(image): image = (image.detach().clamp(0, 1) * 255).to(torch.uint8) image = image.cpu().numpy() cv2.imshow('rendered', image) cv2.waitKey(1) def main(): device = torch.device('cuda:0') args = parse_args() ref_image = cv2.imread(args.image_file) h, w = ref_image.shape[:2] ti.init(arch=ti.cuda, log_level=ti.INFO, debug=args.debug, device_memory_GB=0.1) print(f'Image size: {w}x{h}') if args.show: cv2.namedWindow('rendered', cv2.WINDOW_FULLSCREEN) torch.manual_seed(args.seed) gaussians = random_2d_gaussians(args.n, (w, h)).to(torch.device('cuda:0')) opt, params = optimizer(gaussians, base_lr=1.0) ref_image = torch.from_numpy(ref_image).to(dtype=torch.float32, device=device) / 255 config = RasterConfig(tile_size=args.tile_size) while True: if args.profile: ti.profiler.clear_kernel_profiler_info() start = time.time() for _ in range(args.epoch): opt.zero_grad()
image = render_gaussians(params, (w, h), config)
1
2023-12-17 15:26:52+00:00
2k
exislow/tidal-dl-ng
tidal_dl_ng/config.py
[ { "identifier": "SingletonMeta", "path": "tidal_dl_ng/helper/decorator.py", "snippet": "class SingletonMeta(type):\n \"\"\"\n The Singleton class can be implemented in different ways in Python. Some\n possible methods include: base class, decorator, metaclass. We will use the\n metaclass bec...
import os import shutil import tidalapi from collections.abc import Callable from json import JSONDecodeError from typing import Any from requests import HTTPError from tidal_dl_ng.helper.decorator import SingletonMeta from tidal_dl_ng.helper.path import path_base, path_file_settings, path_file_token from tidal_dl_ng.model.cfg import Settings as ModelSettings from tidal_dl_ng.model.cfg import Token as ModelToken
1,506
class BaseConfig: data: ModelSettings | ModelToken = None file_path: str = None cls_model: object = None path_base: str = path_base() def save(self) -> None: data_json = self.data.to_json() # Try to create the base folder. os.makedirs(self.path_base, exist_ok=True) with open(self.file_path, encoding="utf-8", mode="w") as f: f.write(data_json) def set_option(self, key: str, value: Any) -> None: setattr(self.data, key, value) def read(self, path: str) -> bool: result = False try: with open(path, encoding="utf-8") as f: settings_json = f.read() self.data = self.cls_model.from_json(settings_json) result = True except (JSONDecodeError, TypeError, FileNotFoundError, ValueError) as e: if isinstance(e, ValueError): path_bak = path + ".bak" # First check if a backup file already exists. If yes, remove it. if os.path.exists(path_bak): os.remove(path_bak) # Move the invalid config file to the backup location. shutil.move(path, path_bak) # TODO: Implement better global logger. print( "Something is wrong with your config. Maybe it is not compatible anymore due to a new app version." f" You can find a backup of your old config here: '{path_bak}'. A new default config was created." ) self.data = self.cls_model() # Call save in case of we need to update the saved config, due to changes in code. # TODO: Compare if config in memory and on disk is different. Otherwise no write operation. self.save() return result class Settings(BaseConfig, metaclass=SingletonMeta): cls_model = ModelSettings data = None def __init__(self): self.file_path = path_file_settings() self.read(self.file_path) class Tidal(BaseConfig, metaclass=SingletonMeta): cls_model = ModelToken session: tidalapi.Session = None data: ModelToken = None token_from_storage: bool = False settings: Settings = None def __init__(self, settings: Settings = None): self.session = tidalapi.Session() # self.session.config.client_id = "km8T1xS355y7dd3H" # self.session.config.client_secret = "vcmeGW1OuZ0fWYMCSZ6vNvSLJlT3XEpW0ambgYt5ZuI=" self.session.video_quality = tidalapi.VideoQuality.high
class BaseConfig: data: ModelSettings | ModelToken = None file_path: str = None cls_model: object = None path_base: str = path_base() def save(self) -> None: data_json = self.data.to_json() # Try to create the base folder. os.makedirs(self.path_base, exist_ok=True) with open(self.file_path, encoding="utf-8", mode="w") as f: f.write(data_json) def set_option(self, key: str, value: Any) -> None: setattr(self.data, key, value) def read(self, path: str) -> bool: result = False try: with open(path, encoding="utf-8") as f: settings_json = f.read() self.data = self.cls_model.from_json(settings_json) result = True except (JSONDecodeError, TypeError, FileNotFoundError, ValueError) as e: if isinstance(e, ValueError): path_bak = path + ".bak" # First check if a backup file already exists. If yes, remove it. if os.path.exists(path_bak): os.remove(path_bak) # Move the invalid config file to the backup location. shutil.move(path, path_bak) # TODO: Implement better global logger. print( "Something is wrong with your config. Maybe it is not compatible anymore due to a new app version." f" You can find a backup of your old config here: '{path_bak}'. A new default config was created." ) self.data = self.cls_model() # Call save in case of we need to update the saved config, due to changes in code. # TODO: Compare if config in memory and on disk is different. Otherwise no write operation. self.save() return result class Settings(BaseConfig, metaclass=SingletonMeta): cls_model = ModelSettings data = None def __init__(self): self.file_path = path_file_settings() self.read(self.file_path) class Tidal(BaseConfig, metaclass=SingletonMeta): cls_model = ModelToken session: tidalapi.Session = None data: ModelToken = None token_from_storage: bool = False settings: Settings = None def __init__(self, settings: Settings = None): self.session = tidalapi.Session() # self.session.config.client_id = "km8T1xS355y7dd3H" # self.session.config.client_secret = "vcmeGW1OuZ0fWYMCSZ6vNvSLJlT3XEpW0ambgYt5ZuI=" self.session.video_quality = tidalapi.VideoQuality.high
self.file_path = path_file_token()
3
2023-12-19 23:05:47+00:00
2k
smoores-dev/storyteller
storyteller/api/auth.py
[ { "identifier": "InviteAccept", "path": "storyteller/api/models.py", "snippet": "class InviteAccept(BaseModel):\n username: str\n full_name: str\n email: str\n password: str\n invite_key: str" }, { "identifier": "TokenData", "path": "storyteller/api/models.py", "snippet": ...
import base64 import json import os from datetime import timedelta, datetime from typing import Annotated, Optional, cast from urllib.parse import unquote from jose import JWTError, jwt from fastapi import Body, Depends, HTTPException, Request, status from fastapi.security import OAuth2PasswordBearer from passlib.context import CryptContext from starlette.status import HTTP_401_UNAUTHORIZED from .models import InviteAccept, TokenData from .database import get_user, user_has_permission, verify_invite as verify_invite_db
1,332
SECRET_KEY = os.getenv("STORYTELLER_SECRET_KEY", "<notsosecret>") ALGORITHM = "HS256" ACCESS_TOKEN_EXPIRE_DAYS = 10 class OAuth2PasswordBearerWithCookie(OAuth2PasswordBearer): async def __call__(self, request: Request) -> Optional[str]: header_param = None try: header_param = await super().__call__(request) except HTTPException: pass if header_param is not None: return header_param auth_cookie = request.cookies.get("st_token") if not auth_cookie: if self.auto_error: raise HTTPException( status_code=HTTP_401_UNAUTHORIZED, detail="Not authenticated", headers={"WWW-Authenticate": "Bearer"}, ) else: return None auth_token = json.loads(base64.urlsafe_b64decode(unquote(auth_cookie))) access_token = auth_token["access_token"] if not access_token: if self.auto_error: raise HTTPException( status_code=HTTP_401_UNAUTHORIZED, detail="Not authenticated", headers={"WWW-Authenticate": "Bearer"}, ) else: return None return access_token oauth2_scheme = OAuth2PasswordBearerWithCookie(tokenUrl="token") password_context = CryptContext(schemes=["argon2"], deprecated="auto") def verify_password(plain_password: str, hashed_password: str): return password_context.verify(plain_password, hashed_password) def get_password_hash(password: str): return password_context.hash(password) def authenticate_user(username: str, password: str): try: user = get_user(username) except: return None if not verify_password(password, user.hashed_password): return None return user def create_access_token(data: dict, expires_delta: timedelta | None = None): to_encode = data.copy() if expires_delta: expire = datetime.utcnow() + expires_delta else: expire = datetime.utcnow() + timedelta(days=ACCESS_TOKEN_EXPIRE_DAYS) to_encode.update({"exp": expire}) encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM) return encoded_jwt unauthorized = HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid authentication credentials", headers={"WWW-Authenticate": "Bearer"}, ) def verify_token(token: Annotated[str, Depends(oauth2_scheme)]): try: payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM]) username: str | None = cast(str | None, payload.get("sub")) if username is None: raise unauthorized token_data = TokenData(username=username) except JWTError: raise unauthorized return token_data def verify_invite(invite: Annotated[InviteAccept, Body()]):
SECRET_KEY = os.getenv("STORYTELLER_SECRET_KEY", "<notsosecret>") ALGORITHM = "HS256" ACCESS_TOKEN_EXPIRE_DAYS = 10 class OAuth2PasswordBearerWithCookie(OAuth2PasswordBearer): async def __call__(self, request: Request) -> Optional[str]: header_param = None try: header_param = await super().__call__(request) except HTTPException: pass if header_param is not None: return header_param auth_cookie = request.cookies.get("st_token") if not auth_cookie: if self.auto_error: raise HTTPException( status_code=HTTP_401_UNAUTHORIZED, detail="Not authenticated", headers={"WWW-Authenticate": "Bearer"}, ) else: return None auth_token = json.loads(base64.urlsafe_b64decode(unquote(auth_cookie))) access_token = auth_token["access_token"] if not access_token: if self.auto_error: raise HTTPException( status_code=HTTP_401_UNAUTHORIZED, detail="Not authenticated", headers={"WWW-Authenticate": "Bearer"}, ) else: return None return access_token oauth2_scheme = OAuth2PasswordBearerWithCookie(tokenUrl="token") password_context = CryptContext(schemes=["argon2"], deprecated="auto") def verify_password(plain_password: str, hashed_password: str): return password_context.verify(plain_password, hashed_password) def get_password_hash(password: str): return password_context.hash(password) def authenticate_user(username: str, password: str): try: user = get_user(username) except: return None if not verify_password(password, user.hashed_password): return None return user def create_access_token(data: dict, expires_delta: timedelta | None = None): to_encode = data.copy() if expires_delta: expire = datetime.utcnow() + expires_delta else: expire = datetime.utcnow() + timedelta(days=ACCESS_TOKEN_EXPIRE_DAYS) to_encode.update({"exp": expire}) encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM) return encoded_jwt unauthorized = HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid authentication credentials", headers={"WWW-Authenticate": "Bearer"}, ) def verify_token(token: Annotated[str, Depends(oauth2_scheme)]): try: payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM]) username: str | None = cast(str | None, payload.get("sub")) if username is None: raise unauthorized token_data = TokenData(username=username) except JWTError: raise unauthorized return token_data def verify_invite(invite: Annotated[InviteAccept, Body()]):
if verify_invite_db(invite.email, invite.invite_key):
1
2023-12-15 16:07:12+00:00
2k
noprobelm/terminal-cellular-automaton
tests/test_cell.py
[ { "identifier": "MooreCell", "path": "terminal_cellular_automaton/cell.py", "snippet": "class MooreCell:\n \"\"\"A cell that references members of a MooreNeighborhood\n\n +---+---+---+\n | 1 | 2 | 3 |\n +---+---+---+\n | 4 | C | 5 |\n +---+---+---+\n | 6 | 7 | 8 |\n +---+---+---+...
from ward import test, fixture from terminal_cellular_automaton.cell import MooreCell from terminal_cellular_automaton.coordinate import Coordinate
970
"""Tests the get_neighbors method for all Cell types""" @fixture def max_coord(): return Coordinate(2, 2) @test("A centrally located MooreCell will have 8 neighbors in its immediate area") def _():
"""Tests the get_neighbors method for all Cell types""" @fixture def max_coord(): return Coordinate(2, 2) @test("A centrally located MooreCell will have 8 neighbors in its immediate area") def _():
c = MooreCell(Coordinate(1, 1))
0
2023-12-20 21:47:46+00:00
2k
zyrant/SPGroup3D
mmdet3d/models/dense_heads/fcaf3d_head.py
[ { "identifier": "rotation_3d_in_axis", "path": "mmdet3d/core/bbox/structures/utils.py", "snippet": "@array_converter(apply_to=('points', 'angles'))\ndef rotation_3d_in_axis(points,\n angles,\n axis=0,\n return_mat=False,\n ...
import MinkowskiEngine as ME import warnings import torch from mmcv.cnn import Scale, bias_init_with_prob from mmcv.ops import nms3d, nms3d_normal from mmcv.runner.base_module import BaseModule from torch import nn from mmdet3d.core.bbox.structures import rotation_3d_in_axis from mmdet3d.models import HEADS, build_loss from mmdet.core import reduce_mean
1,205
# Copyright (c) OpenMMLab. All rights reserved. # Adapted from https://github.com/SamsungLabs/fcaf3d/blob/master/mmdet3d/models/dense_heads/fcaf3d_neck_with_head.py # noqa try: except ImportError: warnings.warn( 'Please follow `getting_started.md` to install MinkowskiEngine.`')
# Copyright (c) OpenMMLab. All rights reserved. # Adapted from https://github.com/SamsungLabs/fcaf3d/blob/master/mmdet3d/models/dense_heads/fcaf3d_neck_with_head.py # noqa try: except ImportError: warnings.warn( 'Please follow `getting_started.md` to install MinkowskiEngine.`')
@HEADS.register_module()
1
2023-12-21 12:50:35+00:00
2k
jdejaegh/irm-kmi-ha
custom_components/irm_kmi/config_flow.py
[ { "identifier": "IrmKmiApiClient", "path": "custom_components/irm_kmi/api.py", "snippet": "class IrmKmiApiClient:\n \"\"\"API client for IRM KMI weather data\"\"\"\n COORD_DECIMALS = 6\n\n def __init__(self, session: aiohttp.ClientSession) -> None:\n self._session = session\n self...
import logging import async_timeout import voluptuous as vol from homeassistant.components.zone import DOMAIN as ZONE_DOMAIN from homeassistant.config_entries import ConfigEntry, ConfigFlow, OptionsFlow from homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE, CONF_ZONE from homeassistant.core import callback from homeassistant.data_entry_flow import FlowResult from homeassistant.helpers.aiohttp_client import async_get_clientsession from homeassistant.helpers.selector import (EntitySelector, EntitySelectorConfig, SelectSelector, SelectSelectorConfig, SelectSelectorMode) from .api import IrmKmiApiClient from .const import (CONF_DARK_MODE, CONF_STYLE, CONF_STYLE_OPTIONS, CONF_USE_DEPRECATED_FORECAST, CONF_USE_DEPRECATED_FORECAST_OPTIONS, CONFIG_FLOW_VERSION, DOMAIN, OPTION_DEPRECATED_FORECAST_NOT_USED, OPTION_STYLE_STD, OUT_OF_BENELUX) from .utils import get_config_value
1,557
"""Config flow to set up IRM KMI integration via the UI.""" _LOGGER = logging.getLogger(__name__) class IrmKmiConfigFlow(ConfigFlow, domain=DOMAIN): VERSION = CONFIG_FLOW_VERSION @staticmethod @callback def async_get_options_flow(config_entry: ConfigEntry) -> OptionsFlow: """Create the options flow.""" return IrmKmiOptionFlow(config_entry) async def async_step_user(self, user_input: dict | None = None) -> FlowResult: """Define the user step of the configuration flow.""" errors = {} if user_input: _LOGGER.debug(f"Provided config user is: {user_input}") if (zone := self.hass.states.get(user_input[CONF_ZONE])) is None: errors[CONF_ZONE] = 'zone_not_exist' # Check if zone is in Benelux if not errors: api_data = {} try: async with async_timeout.timeout(10): api_data = await IrmKmiApiClient( session=async_get_clientsession(self.hass)).get_forecasts_coord( {'lat': zone.attributes[ATTR_LATITUDE], 'long': zone.attributes[ATTR_LONGITUDE]} ) except Exception: errors['base'] = "api_error" if api_data.get('cityName', None) in OUT_OF_BENELUX: errors[CONF_ZONE] = 'out_of_benelux' if not errors: await self.async_set_unique_id(user_input[CONF_ZONE]) self._abort_if_unique_id_configured() state = self.hass.states.get(user_input[CONF_ZONE]) return self.async_create_entry( title=state.name if state else "IRM KMI", data={CONF_ZONE: user_input[CONF_ZONE],
"""Config flow to set up IRM KMI integration via the UI.""" _LOGGER = logging.getLogger(__name__) class IrmKmiConfigFlow(ConfigFlow, domain=DOMAIN): VERSION = CONFIG_FLOW_VERSION @staticmethod @callback def async_get_options_flow(config_entry: ConfigEntry) -> OptionsFlow: """Create the options flow.""" return IrmKmiOptionFlow(config_entry) async def async_step_user(self, user_input: dict | None = None) -> FlowResult: """Define the user step of the configuration flow.""" errors = {} if user_input: _LOGGER.debug(f"Provided config user is: {user_input}") if (zone := self.hass.states.get(user_input[CONF_ZONE])) is None: errors[CONF_ZONE] = 'zone_not_exist' # Check if zone is in Benelux if not errors: api_data = {} try: async with async_timeout.timeout(10): api_data = await IrmKmiApiClient( session=async_get_clientsession(self.hass)).get_forecasts_coord( {'lat': zone.attributes[ATTR_LATITUDE], 'long': zone.attributes[ATTR_LONGITUDE]} ) except Exception: errors['base'] = "api_error" if api_data.get('cityName', None) in OUT_OF_BENELUX: errors[CONF_ZONE] = 'out_of_benelux' if not errors: await self.async_set_unique_id(user_input[CONF_ZONE]) self._abort_if_unique_id_configured() state = self.hass.states.get(user_input[CONF_ZONE]) return self.async_create_entry( title=state.name if state else "IRM KMI", data={CONF_ZONE: user_input[CONF_ZONE],
CONF_STYLE: user_input[CONF_STYLE],
2
2023-12-17 16:35:01+00:00
2k
v3ucn/Bert-vits2-V2.2
oldVersion/V210/text/japanese_bert.py
[ { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, ...
import sys import torch from transformers import AutoModelForMaskedLM, AutoTokenizer from config import config from .japanese import text2sep_kata
976
LOCAL_PATH = "./bert/deberta-v2-large-japanese-char-wwm" tokenizer = AutoTokenizer.from_pretrained(LOCAL_PATH) models = dict()
LOCAL_PATH = "./bert/deberta-v2-large-japanese-char-wwm" tokenizer = AutoTokenizer.from_pretrained(LOCAL_PATH) models = dict()
def get_bert_feature(text, word2ph, device=config.bert_gen_config.device):
0
2023-12-18 04:54:46+00:00
2k
NOrangeeroli/SecondPose
model/pcd_cross/modules/transformer/pe_transformer.py
[ { "identifier": "build_dropout_layer", "path": "model/pcd_cross/modules/layers/factory.py", "snippet": "def build_dropout_layer(p: Optional[float], **kwargs) -> nn.Module:\n r\"\"\"Factory function for dropout layer.\"\"\"\n if p is None or p == 0:\n return nn.Identity()\n else:\n ...
import torch import torch.nn as nn import torch.nn.functional as F from einops import rearrange from ..layers import build_dropout_layer from .output_layer import AttentionOutput
1,236
r"""Vanilla Transformer without positional embeddings. The shape of input tensor should be (B, N, C). Implemented with `nn.Linear` and `nn.LayerNorm` (with affine). """ class PEMultiHeadAttention(nn.Module): def __init__(self, d_model, num_heads, dropout=None): super(PEMultiHeadAttention, self).__init__() if d_model % num_heads != 0: raise ValueError('`d_model` ({}) must be a multiple of `num_head` ({}).'.format(d_model, num_heads)) self.d_model = d_model self.num_heads = num_heads self.d_model_per_head = d_model // num_heads self.proj_q = nn.Linear(self.d_model, self.d_model) self.proj_k = nn.Linear(self.d_model, self.d_model) self.proj_v = nn.Linear(self.d_model, self.d_model) self.proj_p = nn.Linear(self.d_model, self.d_model) self.dropout = build_dropout_layer(dropout) def forward( self, input_q, input_k, input_v, embed_q, embed_k, key_masks=None, attention_factors=None, ): """Self-attention with positional embedding forward propagation. Args: input_q: torch.Tensor (B, N, C) input_k: torch.Tensor (B, M, C) input_v: torch.Tensor (B, M, C) embed_q: torch.Tensor (B, N, C) embed_k: torch.Tensor (B, M, C) key_masks: torch.Tensor (B, M), True if ignored, False if preserved attention_factors: torch.Tensor (B, N, M) Returns: hidden_states: torch.Tensor (B, C, N) attention_scores: torch.Tensor (B, H, N, M) """ q = rearrange(self.proj_q(input_q) + self.proj_p(embed_q), 'b n (h c) -> b h n c', h=self.num_heads) k = rearrange(self.proj_k(input_k) + self.proj_p(embed_k), 'b m (h c) -> b h m c', h=self.num_heads) v = rearrange(self.proj_v(input_v), 'b m (h c) -> b h m c', h=self.num_heads) attention_scores = torch.einsum('bhnc,bhmc->bhnm', q, k) / self.d_model_per_head ** 0.5 if attention_factors is not None: attention_scores = attention_factors.unsqueeze(1) * attention_scores if key_masks is not None: attention_scores = attention_scores.masked_fill(key_masks.unsqueeze(1).unsqueeze(1), float('-inf')) attention_scores = F.softmax(attention_scores, dim=-1) attention_scores = self.dropout(attention_scores) hidden_states = torch.matmul(attention_scores, v) hidden_states = rearrange(hidden_states, 'b h n c -> b n (h c)') return hidden_states, attention_scores class PEAttentionLayer(nn.Module): def __init__(self, d_model, num_heads, dropout=None): super(PEAttentionLayer, self).__init__() self.attention = PEMultiHeadAttention(d_model, num_heads, dropout=dropout) self.linear = nn.Linear(d_model, d_model) self.dropout = build_dropout_layer(dropout) self.norm = nn.LayerNorm(d_model) def forward( self, input_states, memory_states, input_embeddings, memory_embeddings, memory_masks=None, attention_factors=None, ): hidden_states, attention_scores = self.attention( input_states, memory_states, memory_states, input_embeddings, memory_embeddings, key_masks=memory_masks, attention_factors=attention_factors, ) hidden_states = self.linear(hidden_states) hidden_states = self.dropout(hidden_states) output_states = self.norm(hidden_states + input_states) return output_states, attention_scores class PETransformerLayer(nn.Module): def __init__(self, d_model, num_heads, dropout=None, activation_fn='ReLU'): super(PETransformerLayer, self).__init__() self.attention = PEAttentionLayer(d_model, num_heads, dropout=dropout)
r"""Vanilla Transformer without positional embeddings. The shape of input tensor should be (B, N, C). Implemented with `nn.Linear` and `nn.LayerNorm` (with affine). """ class PEMultiHeadAttention(nn.Module): def __init__(self, d_model, num_heads, dropout=None): super(PEMultiHeadAttention, self).__init__() if d_model % num_heads != 0: raise ValueError('`d_model` ({}) must be a multiple of `num_head` ({}).'.format(d_model, num_heads)) self.d_model = d_model self.num_heads = num_heads self.d_model_per_head = d_model // num_heads self.proj_q = nn.Linear(self.d_model, self.d_model) self.proj_k = nn.Linear(self.d_model, self.d_model) self.proj_v = nn.Linear(self.d_model, self.d_model) self.proj_p = nn.Linear(self.d_model, self.d_model) self.dropout = build_dropout_layer(dropout) def forward( self, input_q, input_k, input_v, embed_q, embed_k, key_masks=None, attention_factors=None, ): """Self-attention with positional embedding forward propagation. Args: input_q: torch.Tensor (B, N, C) input_k: torch.Tensor (B, M, C) input_v: torch.Tensor (B, M, C) embed_q: torch.Tensor (B, N, C) embed_k: torch.Tensor (B, M, C) key_masks: torch.Tensor (B, M), True if ignored, False if preserved attention_factors: torch.Tensor (B, N, M) Returns: hidden_states: torch.Tensor (B, C, N) attention_scores: torch.Tensor (B, H, N, M) """ q = rearrange(self.proj_q(input_q) + self.proj_p(embed_q), 'b n (h c) -> b h n c', h=self.num_heads) k = rearrange(self.proj_k(input_k) + self.proj_p(embed_k), 'b m (h c) -> b h m c', h=self.num_heads) v = rearrange(self.proj_v(input_v), 'b m (h c) -> b h m c', h=self.num_heads) attention_scores = torch.einsum('bhnc,bhmc->bhnm', q, k) / self.d_model_per_head ** 0.5 if attention_factors is not None: attention_scores = attention_factors.unsqueeze(1) * attention_scores if key_masks is not None: attention_scores = attention_scores.masked_fill(key_masks.unsqueeze(1).unsqueeze(1), float('-inf')) attention_scores = F.softmax(attention_scores, dim=-1) attention_scores = self.dropout(attention_scores) hidden_states = torch.matmul(attention_scores, v) hidden_states = rearrange(hidden_states, 'b h n c -> b n (h c)') return hidden_states, attention_scores class PEAttentionLayer(nn.Module): def __init__(self, d_model, num_heads, dropout=None): super(PEAttentionLayer, self).__init__() self.attention = PEMultiHeadAttention(d_model, num_heads, dropout=dropout) self.linear = nn.Linear(d_model, d_model) self.dropout = build_dropout_layer(dropout) self.norm = nn.LayerNorm(d_model) def forward( self, input_states, memory_states, input_embeddings, memory_embeddings, memory_masks=None, attention_factors=None, ): hidden_states, attention_scores = self.attention( input_states, memory_states, memory_states, input_embeddings, memory_embeddings, key_masks=memory_masks, attention_factors=attention_factors, ) hidden_states = self.linear(hidden_states) hidden_states = self.dropout(hidden_states) output_states = self.norm(hidden_states + input_states) return output_states, attention_scores class PETransformerLayer(nn.Module): def __init__(self, d_model, num_heads, dropout=None, activation_fn='ReLU'): super(PETransformerLayer, self).__init__() self.attention = PEAttentionLayer(d_model, num_heads, dropout=dropout)
self.output = AttentionOutput(d_model, dropout=dropout, activation_fn=activation_fn)
1
2023-12-16 16:58:33+00:00
2k
KatantDev/YMdantic
ymdantic/models/artists/artist.py
[ { "identifier": "DeprecatedMixin", "path": "ymdantic/mixins.py", "snippet": "class DeprecatedMixin:\n \"\"\"Миксин, удаляющий устаревшие поля из модели.\"\"\"\n\n @model_validator(mode=\"before\")\n def remove_deprecated(cls, obj: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Удал...
from typing import List, Optional, Dict, Any, Literal from pydantic import model_validator, HttpUrl from ymdantic.mixins import DeprecatedMixin from ymdantic.models.base import YMBaseModel from ymdantic.models.cover import Cover
899
class Artist(YMBaseModel, DeprecatedMixin): """Pydantic модель, представляющая информацию об артисте.""" id: int # Уникальный идентификатор артиста. name: str # Имя артиста. various: bool # Флаг, указывающий, является ли артист группой. composer: bool # Флаг, указывающий, является ли артист композитором. genres: List[str] # Жанры треков артиста. disclaimers: List[Literal[""]] # TODO: Проверить, что тут может быть. # Список отказов от ответственности артиста.
class Artist(YMBaseModel, DeprecatedMixin): """Pydantic модель, представляющая информацию об артисте.""" id: int # Уникальный идентификатор артиста. name: str # Имя артиста. various: bool # Флаг, указывающий, является ли артист группой. composer: bool # Флаг, указывающий, является ли артист композитором. genres: List[str] # Жанры треков артиста. disclaimers: List[Literal[""]] # TODO: Проверить, что тут может быть. # Список отказов от ответственности артиста.
cover: Optional[Cover] = None
2
2023-12-21 21:24:10+00:00
2k
MichealCodez/awesome-project-ideas
projects/artisans/backend/authentication/views.py
[ { "identifier": "RegisterUserSerializer", "path": "projects/artisans/backend/authentication/serializers.py", "snippet": "class RegisterUserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User # We defined the model to be the User model(default django User model).\n fiel...
from rest_framework.views import APIView from .serializers import RegisterUserSerializer, ResetPasswordSerializer from rest_framework.response import Response from rest_framework.exceptions import AuthenticationFailed from django.contrib.auth.models import User from datetime import datetime, timedelta import jwt
1,071
# This is the view logic for registering a user. # We defined the class and it inherits from the APIView class. class RegisterUserView(APIView): def post(self, request): # We defined a post method that takes in a request from a user. # We defined a serializer variable that takes in the RegisterUserSerializer class and passes in the request data. serializer = RegisterUserSerializer(data=request.data) serializer.is_valid(raise_exception=True) # We are checking if the serializer is valid(we raise an exception if it is not valid). serializer.save() # We save the serializer. return Response(serializer.data) # We return the serializer data. # This is the view logic for logging a user in. class LoginUserView(APIView): def post(self, request): # We defined a post method that takes in a request from a user. email = request.data['email'] # We are getting the inputted email from the request data. password = request.data['password'] # We are getting the inputted password from the request data. # Let's check if the user exists in our database. user = User.objects.filter(email=email).first() if user is None: raise AuthenticationFailed('User not found!') # Let's check if the password is correct. if not user.check_password(password): raise AuthenticationFailed('Incorrect password!') # Let's create a payload variable that takes in the user's id and the current time. payload = { 'id':user.id, 'exp':datetime.utcnow() + timedelta(minutes=60), 'iat':datetime.utcnow() } # Let's create a token variable that takes in the payload and the secret key. token = jwt.encode(payload, 'secret', algorithm='HS256') response = Response() # We are setting the cookie to the token. response.set_cookie(key='jwt', value=token, httponly=True) # We are returning the response data and making sure it is in string format. response.data = { 'jwt':token.encode('utf8') } return response # This is the view logic to retrieve a user's data using the token. class UserView(APIView): def get(self, request): token = request.COOKIES.get('jwt') if not token: raise AuthenticationFailed('Unauthenticated!') # We are getting the payload from the token. try: payload = jwt.decode(token, 'secret', algorithms=['HS256']) except jwt.ExpiredSignatureError: raise AuthenticationFailed('Unauthenticated!') # We are getting the user from the payload. user = User.objects.filter(id=payload['id']).first() serializer = RegisterUserSerializer(user) return Response(serializer.data) # This is the view logic to logout a user. class LogoutView(APIView): def post(self, request): response = Response() response.delete_cookie('jwt') # We are deleting the cookie. # We are returning the response data with a success status message. response.data = { 'message':'Logout is successful' } return response # This is the logic for resetting a forgotten password. class ResetPasswordView(APIView): def post(self, request):
# This is the view logic for registering a user. # We defined the class and it inherits from the APIView class. class RegisterUserView(APIView): def post(self, request): # We defined a post method that takes in a request from a user. # We defined a serializer variable that takes in the RegisterUserSerializer class and passes in the request data. serializer = RegisterUserSerializer(data=request.data) serializer.is_valid(raise_exception=True) # We are checking if the serializer is valid(we raise an exception if it is not valid). serializer.save() # We save the serializer. return Response(serializer.data) # We return the serializer data. # This is the view logic for logging a user in. class LoginUserView(APIView): def post(self, request): # We defined a post method that takes in a request from a user. email = request.data['email'] # We are getting the inputted email from the request data. password = request.data['password'] # We are getting the inputted password from the request data. # Let's check if the user exists in our database. user = User.objects.filter(email=email).first() if user is None: raise AuthenticationFailed('User not found!') # Let's check if the password is correct. if not user.check_password(password): raise AuthenticationFailed('Incorrect password!') # Let's create a payload variable that takes in the user's id and the current time. payload = { 'id':user.id, 'exp':datetime.utcnow() + timedelta(minutes=60), 'iat':datetime.utcnow() } # Let's create a token variable that takes in the payload and the secret key. token = jwt.encode(payload, 'secret', algorithm='HS256') response = Response() # We are setting the cookie to the token. response.set_cookie(key='jwt', value=token, httponly=True) # We are returning the response data and making sure it is in string format. response.data = { 'jwt':token.encode('utf8') } return response # This is the view logic to retrieve a user's data using the token. class UserView(APIView): def get(self, request): token = request.COOKIES.get('jwt') if not token: raise AuthenticationFailed('Unauthenticated!') # We are getting the payload from the token. try: payload = jwt.decode(token, 'secret', algorithms=['HS256']) except jwt.ExpiredSignatureError: raise AuthenticationFailed('Unauthenticated!') # We are getting the user from the payload. user = User.objects.filter(id=payload['id']).first() serializer = RegisterUserSerializer(user) return Response(serializer.data) # This is the view logic to logout a user. class LogoutView(APIView): def post(self, request): response = Response() response.delete_cookie('jwt') # We are deleting the cookie. # We are returning the response data with a success status message. response.data = { 'message':'Logout is successful' } return response # This is the logic for resetting a forgotten password. class ResetPasswordView(APIView): def post(self, request):
serializer = ResetPasswordSerializer(data=request.data)
1
2023-12-17 17:21:10+00:00
2k
liuhuang31/hifigan-sr
inference.py
[ { "identifier": "AttrDict", "path": "env.py", "snippet": "class AttrDict(dict):\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self" }, { "identifier": "mel_spectrogram", "path": "meldataset.py", "snippet": "def...
import glob import os import librosa import argparse import json import torch from scipy.io.wavfile import write from env import AttrDict from meldataset import mel_spectrogram, MAX_WAV_VALUE, load_wav from models import Generator
1,598
from __future__ import absolute_import, division, print_function, unicode_literals h = None device = None def load_checkpoint(filepath, device): assert os.path.isfile(filepath) print("Loading '{}'".format(filepath)) checkpoint_dict = torch.load(filepath, map_location=device) print("Complete.") return checkpoint_dict def get_mel(x): return mel_spectrogram(x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax) def get_mel_24k(x): return mel_spectrogram(x, 1024, h.num_mels, 24000, 240, 1024, h.fmin, 8000) def scan_checkpoint(cp_dir, prefix): pattern = os.path.join(cp_dir, prefix + '*') cp_list = glob.glob(pattern) if len(cp_list) == 0: return '' return sorted(cp_list)[-1] def inference(a):
from __future__ import absolute_import, division, print_function, unicode_literals h = None device = None def load_checkpoint(filepath, device): assert os.path.isfile(filepath) print("Loading '{}'".format(filepath)) checkpoint_dict = torch.load(filepath, map_location=device) print("Complete.") return checkpoint_dict def get_mel(x): return mel_spectrogram(x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax) def get_mel_24k(x): return mel_spectrogram(x, 1024, h.num_mels, 24000, 240, 1024, h.fmin, 8000) def scan_checkpoint(cp_dir, prefix): pattern = os.path.join(cp_dir, prefix + '*') cp_list = glob.glob(pattern) if len(cp_list) == 0: return '' return sorted(cp_list)[-1] def inference(a):
generator = Generator(h).to(device)
4
2023-12-16 01:21:00+00:00
2k
edsu/marctable
test_marctable.py
[ { "identifier": "MARC", "path": "marctable/marc.py", "snippet": "class MARC:\n def __init__(self) -> None:\n self.fields: List[Field] = []\n\n @cache\n def get_field(self, tag: str) -> Field:\n for field in self.fields:\n if field.tag == tag:\n return fie...
import json import pathlib import pandas from io import StringIO from marctable.marc import MARC, SchemaFieldError, SchemaSubfieldError, crawl from marctable.utils import _mapping, dataframe_iter, to_csv, to_dataframe, to_parquet from pytest import raises
1,565
marc = MARC.from_avram() def test_crawl() -> None: # crawl the first 10 field definitions from the loc site (to save time) outfile = StringIO() crawl(10, quiet=True, outfile=outfile) outfile.seek(0) # ensure the Avram JSON parses and looks ok schema = json.load(outfile) assert schema assert len(schema["fields"]) == 10 # ensure that the Avram JSON for a field looks ok assert schema["fields"]["015"] f015 = schema["fields"]["015"] assert f015["label"] == "National Bibliography Number" assert f015["url"] == "https://www.loc.gov/marc/bibliographic/bd015.html" assert len(f015["subfields"]) == 6 # ensure that the Avram JSON for a subfield looks ok assert f015["subfields"]["2"] f0152 = f015["subfields"]["2"] assert f0152["label"] == "Source" assert f0152["code"] == "2" assert f0152["repeatable"] is False def test_marc() -> None: assert len(marc.fields) == 215 def test_get_field() -> None: assert marc.get_field("245")
marc = MARC.from_avram() def test_crawl() -> None: # crawl the first 10 field definitions from the loc site (to save time) outfile = StringIO() crawl(10, quiet=True, outfile=outfile) outfile.seek(0) # ensure the Avram JSON parses and looks ok schema = json.load(outfile) assert schema assert len(schema["fields"]) == 10 # ensure that the Avram JSON for a field looks ok assert schema["fields"]["015"] f015 = schema["fields"]["015"] assert f015["label"] == "National Bibliography Number" assert f015["url"] == "https://www.loc.gov/marc/bibliographic/bd015.html" assert len(f015["subfields"]) == 6 # ensure that the Avram JSON for a subfield looks ok assert f015["subfields"]["2"] f0152 = f015["subfields"]["2"] assert f0152["label"] == "Source" assert f0152["code"] == "2" assert f0152["repeatable"] is False def test_marc() -> None: assert len(marc.fields) == 215 def test_get_field() -> None: assert marc.get_field("245")
with raises(SchemaFieldError, match="abc is not a defined field tag in Avram"):
1
2023-12-21 21:14:29+00:00
2k
WangWenhao0716/ViT4ICD
Stage_23/dg/models_gem_waveblock_balance_cos/resnet_ibn.py
[ { "identifier": "resnet50_ibn_a", "path": "Stage_23/dg/models_gem_waveblock_balance_cos/resnet_ibn_a.py", "snippet": "def resnet50_ibn_a(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"...
from torch import nn from torch.nn import functional as F from torch.nn import init from .resnet_ibn_a import resnet50_ibn_a, resnet101_ibn_a from .gem import GeneralizedMeanPoolingP from .metric import build_metric import torchvision import torch import random
827
from __future__ import absolute_import __all__ = ['ResNetIBN', 'resnet_ibn50a', 'resnet_ibn101a'] class Waveblock(nn.Module): def __init__(self): super().__init__() def forward(self, x): if self.training: h, w = x.size()[-2:] rh = round(0.3 * h) sx = random.randint(0, h-rh) mask = (x.new_ones(x.size()))*1.5 mask[:, :, sx:sx+rh, :] = 1 x = x * mask return x class ResNetIBN(nn.Module): __factory = {
from __future__ import absolute_import __all__ = ['ResNetIBN', 'resnet_ibn50a', 'resnet_ibn101a'] class Waveblock(nn.Module): def __init__(self): super().__init__() def forward(self, x): if self.training: h, w = x.size()[-2:] rh = round(0.3 * h) sx = random.randint(0, h-rh) mask = (x.new_ones(x.size()))*1.5 mask[:, :, sx:sx+rh, :] = 1 x = x * mask return x class ResNetIBN(nn.Module): __factory = {
'50a': resnet50_ibn_a,
0
2023-12-17 11:32:48+00:00
2k
Noubissie237/myShop
myShop/shop/views.py
[ { "identifier": "commandeAnonyme", "path": "myShop/shop/utiles.py", "snippet": "def commandeAnonyme(request, data):\n print(\"utilisateur non authentifie\")\n\n print('cookies', request.COOKIES)\n \n name = data['form']['name']\n print('data', data)\n print('name', name)\n username ...
from django.shortcuts import render from .models import * from django.http import JsonResponse from datetime import datetime from .utiles import commandeAnonyme, data_cookie, panier_cookie import json
1,373
def shop(request, *args, **kwargs): """ vue principale """ produits = Produit.objects.all() data = data_cookie(request) nombre_article = data['nombre_article'] context = { 'produits':produits, 'nombre_article': nombre_article } return render(request, 'shop/index.html', context) def panier(request, *args, **kwargs): """ panier """ data = data_cookie(request) articles = data['articles'] commande = data['commande'] nombre_article = data['nombre_article'] context = { 'articles':articles, 'commande':commande, 'nombre_article':nombre_article } return render(request, 'shop/panier.html', context) def commande(request, *args, **kwargs): """ Commande """ data = data_cookie(request) articles = data['articles'] commande = data['commande'] nombre_article = data['nombre_article'] context = { 'articles':articles, 'commande':commande, 'nombre_article': nombre_article } return render(request, 'shop/commande.html', context) def update_article(request, *args, **kwargs): data = json.loads(request.body) produit_id = data['produit_id'] action = data['action'] client = request.user.client produit = Produit.objects.get(id=produit_id) commande, created = Commande.objects.get_or_create(client=client, complete=False) commande_article, created = CommandeArticle.objects.get_or_create(commande=commande, produit=produit) if action == 'add': commande_article.quantite += 1 if action == 'remove': commande_article.quantite -= 1 commande_article.save() if commande_article.quantite <= 0: commande_article.delete() return JsonResponse("Article ajouté", safe=False) def traitementCommande(request, *args, **kwargs): """ traitement, validation de la com;ande et verification de l'integrite des donnees(detection de fraude)""" STATUS_TRANSACTION = ['ACCEPTED', 'COMPLETED', 'SUCESS'] transaction_id = datetime.now().timestamp() data = json.loads(request.body) print(data) if request.user.is_authenticated: client = request.user.client commande, created = Commande.objects.get_or_create(client=client, complete=False) else:
def shop(request, *args, **kwargs): """ vue principale """ produits = Produit.objects.all() data = data_cookie(request) nombre_article = data['nombre_article'] context = { 'produits':produits, 'nombre_article': nombre_article } return render(request, 'shop/index.html', context) def panier(request, *args, **kwargs): """ panier """ data = data_cookie(request) articles = data['articles'] commande = data['commande'] nombre_article = data['nombre_article'] context = { 'articles':articles, 'commande':commande, 'nombre_article':nombre_article } return render(request, 'shop/panier.html', context) def commande(request, *args, **kwargs): """ Commande """ data = data_cookie(request) articles = data['articles'] commande = data['commande'] nombre_article = data['nombre_article'] context = { 'articles':articles, 'commande':commande, 'nombre_article': nombre_article } return render(request, 'shop/commande.html', context) def update_article(request, *args, **kwargs): data = json.loads(request.body) produit_id = data['produit_id'] action = data['action'] client = request.user.client produit = Produit.objects.get(id=produit_id) commande, created = Commande.objects.get_or_create(client=client, complete=False) commande_article, created = CommandeArticle.objects.get_or_create(commande=commande, produit=produit) if action == 'add': commande_article.quantite += 1 if action == 'remove': commande_article.quantite -= 1 commande_article.save() if commande_article.quantite <= 0: commande_article.delete() return JsonResponse("Article ajouté", safe=False) def traitementCommande(request, *args, **kwargs): """ traitement, validation de la com;ande et verification de l'integrite des donnees(detection de fraude)""" STATUS_TRANSACTION = ['ACCEPTED', 'COMPLETED', 'SUCESS'] transaction_id = datetime.now().timestamp() data = json.loads(request.body) print(data) if request.user.is_authenticated: client = request.user.client commande, created = Commande.objects.get_or_create(client=client, complete=False) else:
client, commande = commandeAnonyme(request, data)
0
2023-12-15 08:06:59+00:00
2k
alibaba/u2mot
yolox/models/yolo_fpn.py
[ { "identifier": "Darknet", "path": "yolox/models/darknet.py", "snippet": "class Darknet(nn.Module):\n # number of blocks from dark2 to dark5.\n depth2blocks = {21: [1, 2, 2, 1], 53: [2, 8, 8, 4]}\n\n def __init__(\n self,\n depth,\n in_channels=3,\n stem_out_channels...
import torch import torch.nn as nn from .darknet import Darknet from .network_blocks import BaseConv
1,525
#!/usr/bin/env python3 # -*- encoding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # Copyright (c) Alibaba, Inc. and its affiliates. class YOLOFPN(nn.Module): """ YOLOFPN module. Darknet 53 is the default backbone of this model. """ def __init__( self, depth=53, in_features=["dark3", "dark4", "dark5"], ): super().__init__() self.backbone = Darknet(depth) self.in_features = in_features # out 1 self.out1_cbl = self._make_cbl(512, 256, 1) self.out1 = self._make_embedding([256, 512], 512 + 256) # out 2 self.out2_cbl = self._make_cbl(256, 128, 1) self.out2 = self._make_embedding([128, 256], 256 + 128) # upsample self.upsample = nn.Upsample(scale_factor=2, mode="nearest") def _make_cbl(self, _in, _out, ks):
#!/usr/bin/env python3 # -*- encoding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. # Copyright (c) Alibaba, Inc. and its affiliates. class YOLOFPN(nn.Module): """ YOLOFPN module. Darknet 53 is the default backbone of this model. """ def __init__( self, depth=53, in_features=["dark3", "dark4", "dark5"], ): super().__init__() self.backbone = Darknet(depth) self.in_features = in_features # out 1 self.out1_cbl = self._make_cbl(512, 256, 1) self.out1 = self._make_embedding([256, 512], 512 + 256) # out 2 self.out2_cbl = self._make_cbl(256, 128, 1) self.out2 = self._make_embedding([128, 256], 256 + 128) # upsample self.upsample = nn.Upsample(scale_factor=2, mode="nearest") def _make_cbl(self, _in, _out, ks):
return BaseConv(_in, _out, ks, stride=1, act="lrelu")
1
2023-12-18 10:04:40+00:00
2k
liuhuang31/HiFTNet-sr
models.py
[ { "identifier": "init_weights", "path": "utils.py", "snippet": "def init_weights(m, mean=0.0, std=0.01):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n m.weight.data.normal_(mean, std)" }, { "identifier": "get_padding", "path": "utils.py", "snippet...
import torch import torch.nn.functional as F import torch.nn as nn import numpy as np from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm from utils import init_weights, get_padding from stft import TorchSTFT
645
LRELU_SLOPE = 0.1 class ResBlock1(torch.nn.Module): def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): super(ResBlock1, self).__init__() self.h = h self.convs1 = nn.ModuleList([ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], padding=get_padding(kernel_size, dilation[0]))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], padding=get_padding(kernel_size, dilation[1]))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], padding=get_padding(kernel_size, dilation[2]))) ])
LRELU_SLOPE = 0.1 class ResBlock1(torch.nn.Module): def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): super(ResBlock1, self).__init__() self.h = h self.convs1 = nn.ModuleList([ weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], padding=get_padding(kernel_size, dilation[0]))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], padding=get_padding(kernel_size, dilation[1]))), weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], padding=get_padding(kernel_size, dilation[2]))) ])
self.convs1.apply(init_weights)
0
2023-12-16 03:53:55+00:00
2k
m-abr/FCPCodebase
behaviors/custom/Step/Step.py
[ { "identifier": "Base_Agent", "path": "agent/Base_Agent.py", "snippet": "class Base_Agent():\n all_agents = []\n\n def __init__(self, host:str, agent_port:int, monitor_port:int, unum:int, robot_type:int, team_name:str, enable_log:bool=True,\n enable_draw:bool=True, apply_play_mode...
from agent.Base_Agent import Base_Agent from behaviors.custom.Step.Step_Generator import Step_Generator import numpy as np
1,450
class Step(): def __init__(self, base_agent : Base_Agent) -> None: self.world = base_agent.world self.ik = base_agent.inv_kinematics self.description = "Step (Skill-Set-Primitive)" self.auto_head = True nao_specs = self.ik.NAO_SPECS self.leg_length = nao_specs[1] + nao_specs[3] # upper leg height + lower leg height feet_y_dev = nao_specs[0] * 1.2 # wider step sample_time = self.world.robot.STEPTIME max_ankle_z = nao_specs[5] # Initialize step generator with constants
class Step(): def __init__(self, base_agent : Base_Agent) -> None: self.world = base_agent.world self.ik = base_agent.inv_kinematics self.description = "Step (Skill-Set-Primitive)" self.auto_head = True nao_specs = self.ik.NAO_SPECS self.leg_length = nao_specs[1] + nao_specs[3] # upper leg height + lower leg height feet_y_dev = nao_specs[0] * 1.2 # wider step sample_time = self.world.robot.STEPTIME max_ankle_z = nao_specs[5] # Initialize step generator with constants
self.step_generator = Step_Generator(feet_y_dev, sample_time, max_ankle_z)
1
2023-12-16 23:40:23+00:00
2k
koenhendriks/ha-button-plus
custom_components/button_plus/buttonplushub.py
[ { "identifier": "LocalApiClient", "path": "custom_components/button_plus/button_plus_api/local_api_client.py", "snippet": "class LocalApiClient:\n \"\"\" Client to talk to Button+ local devices \"\"\"\n\n def __init__(self, ip_address, session) -> None:\n self._base = f\"http://{ip_address}...
import logging from homeassistant.config_entries import ConfigEntry from homeassistant.helpers import device_registry as dr from .button_plus_api.local_api_client import LocalApiClient from .button_plus_api.model import DeviceConfiguration from homeassistant.core import HomeAssistant from .const import DOMAIN, MANUFACTURER from homeassistant.helpers import aiohttp_client
1,506
"""Button+ connects several devices.""" from __future__ import annotations _LOGGER: logging.Logger = logging.getLogger(__package__) class ButtonPlusHub: """hub for Button+.""" def __init__(self, hass: HomeAssistant, config: DeviceConfiguration, entry: ConfigEntry) -> None: _LOGGER.debug(f"New hub with config {config.core}") self._hass = hass self.config = config self._name = config.core.name self._id = self.config.info.device_id self._client = LocalApiClient(config.info.ip_address, aiohttp_client.async_get_clientsession(hass)) self.online = True self.button_entities = {} self.label_entities = {} self.top_label_entities = {} device_registry = dr.async_get(hass) device_registry.async_get_or_create( configuration_url=f"http://{self.config.info.ip_address}/", config_entry_id=entry.entry_id, connections={(dr.CONNECTION_NETWORK_MAC, self.config.info.mac)},
"""Button+ connects several devices.""" from __future__ import annotations _LOGGER: logging.Logger = logging.getLogger(__package__) class ButtonPlusHub: """hub for Button+.""" def __init__(self, hass: HomeAssistant, config: DeviceConfiguration, entry: ConfigEntry) -> None: _LOGGER.debug(f"New hub with config {config.core}") self._hass = hass self.config = config self._name = config.core.name self._id = self.config.info.device_id self._client = LocalApiClient(config.info.ip_address, aiohttp_client.async_get_clientsession(hass)) self.online = True self.button_entities = {} self.label_entities = {} self.top_label_entities = {} device_registry = dr.async_get(hass) device_registry.async_get_or_create( configuration_url=f"http://{self.config.info.ip_address}/", config_entry_id=entry.entry_id, connections={(dr.CONNECTION_NETWORK_MAC, self.config.info.mac)},
identifiers={(DOMAIN, self.config.info.device_id)},
2
2023-12-18 15:14:21+00:00
2k
RosettaCommons/AF2_peptide_hallucination
run.py
[ { "identifier": "select_positions", "path": "util/util.py", "snippet": "def select_positions(n_mutations, boundcomplex, select_positions, select_position_params):\n '''\n Select mutable positions in the binder based on a specific method.\n Returns a dictionary of binder with associated array in...
import os import sys import numpy as np import hydra import copy from submodules.oligomer_hallucination.oligomer_hallucination import Protomers, Oligomer from submodules.oligomer_hallucination.oligomer_hallucination import AA_FREQ from submodules.oligomer_hallucination.modules.af2_net import setup_models, predict_structure from submodules.oligomer_hallucination.modules.mutations import mutate from util.util import select_positions from util import util from util.loss import compute_loss from omegaconf import DictConfig, OmegaConf from hydra.core.hydra_config import HydraConfig
1,507
class BoundComplex(Protomers, Oligomer): ''' Class for keeping track of binder sequence and complex predictions during binder hallucination. ''' def __init__(self, target_sequence: str, name, length=70, aa_freq={}, binder_sequence=None): """ target_sequence: amino acid sequence of target peptide (to bind) length: length of binder peptide binder_sequence: Optional, starting amino acid sequence of the binder aa_freq: dictonary containing the frequencies of each aa """ self.target_seq = target_sequence.upper() assert len(self.target_seq) > 0, "Target sequence must be provided" self.length = int(length) self.aa_freq = aa_freq # Get initial binder sequence if binder_sequence: assert self.length > 0, "Binder length must be greater than 0" self.init_binder_seq = binder_sequence.upper() else: self.init_binder_seq = ''.join(np.random.choice(list(aa_freq.keys()), size = length, p=list(aa_freq.values()))) self.binder_length = len(self.init_binder_seq) self.target_length = len(self.target_seq) self.chain_Ls = [self.binder_length, self.target_length] self.init_bound_seq = self.init_binder_seq + self.target_seq self.bound_length = len(self.init_bound_seq) # Initialize current and try sequences, self.current_binder_seq = self.init_binder_seq self.try_binder_seq = self.init_binder_seq self.current_bound_seq = self.init_bound_seq self.try_seq = self.init_bound_seq self.name=name def init_scores(self, scores): '''Initalise scores''' self.init_scores = scores self.current_scores = scores self.try_scores = scores def update_scores(self): '''Update current scores to try scores. ''' self.current_scores = self.try_scores def assign_scores(self, scores): '''Assign try scores. ''' self.try_scores = scores def update_scores(self): '''Update current scores to try scores.''' self.current_scores = copy.deepcopy(self.try_scores) @hydra.main(version_base=None, config_path='config', config_name='base') def main(conf: HydraConfig) -> None: """ Main function for running peptide binder hallucination. """ input_conf=conf.input output_conf=conf.output loss_conf=conf.loss model_conf=conf.model hallucination_conf=conf.hallucination os.makedirs(output_conf.out_dir, exist_ok=True) if output_conf.cautious and os.path.exists(f'{output_conf.out_dir}/{output_conf.out_prefix}_step_00000.pdb'): sys.exit(f'Specified output already exists. Exiting. To overwrite, provide output.cautious=False')
class BoundComplex(Protomers, Oligomer): ''' Class for keeping track of binder sequence and complex predictions during binder hallucination. ''' def __init__(self, target_sequence: str, name, length=70, aa_freq={}, binder_sequence=None): """ target_sequence: amino acid sequence of target peptide (to bind) length: length of binder peptide binder_sequence: Optional, starting amino acid sequence of the binder aa_freq: dictonary containing the frequencies of each aa """ self.target_seq = target_sequence.upper() assert len(self.target_seq) > 0, "Target sequence must be provided" self.length = int(length) self.aa_freq = aa_freq # Get initial binder sequence if binder_sequence: assert self.length > 0, "Binder length must be greater than 0" self.init_binder_seq = binder_sequence.upper() else: self.init_binder_seq = ''.join(np.random.choice(list(aa_freq.keys()), size = length, p=list(aa_freq.values()))) self.binder_length = len(self.init_binder_seq) self.target_length = len(self.target_seq) self.chain_Ls = [self.binder_length, self.target_length] self.init_bound_seq = self.init_binder_seq + self.target_seq self.bound_length = len(self.init_bound_seq) # Initialize current and try sequences, self.current_binder_seq = self.init_binder_seq self.try_binder_seq = self.init_binder_seq self.current_bound_seq = self.init_bound_seq self.try_seq = self.init_bound_seq self.name=name def init_scores(self, scores): '''Initalise scores''' self.init_scores = scores self.current_scores = scores self.try_scores = scores def update_scores(self): '''Update current scores to try scores. ''' self.current_scores = self.try_scores def assign_scores(self, scores): '''Assign try scores. ''' self.try_scores = scores def update_scores(self): '''Update current scores to try scores.''' self.current_scores = copy.deepcopy(self.try_scores) @hydra.main(version_base=None, config_path='config', config_name='base') def main(conf: HydraConfig) -> None: """ Main function for running peptide binder hallucination. """ input_conf=conf.input output_conf=conf.output loss_conf=conf.loss model_conf=conf.model hallucination_conf=conf.hallucination os.makedirs(output_conf.out_dir, exist_ok=True) if output_conf.cautious and os.path.exists(f'{output_conf.out_dir}/{output_conf.out_prefix}_step_00000.pdb'): sys.exit(f'Specified output already exists. Exiting. To overwrite, provide output.cautious=False')
AA_freq=util.get_aa_freq(AA_FREQ, hallucination_conf.exclude_AA)
1
2023-12-21 12:07:25+00:00
2k
Cypas/splatoon3-schedule
nonebot_plugin_splatoon3_schedule/utils/utils.py
[ { "identifier": "TimeUtil", "path": "nonebot_plugin_splatoon3_schedule/utils/dataClass.py", "snippet": "class TimeUtil(object):\n @classmethod\n def parse_timezone(cls, timezone):\n \"\"\"\n 解析时区表示\n :param timezone: str eg: +8\n :return: dict{symbol, offset}\n \...
import datetime import cfscrape import httpx from httpx import Response from .dataClass import TimeUtil from ..config import plugin_config
1,412
"Ranked Challenge": (227, 68, 17), "Ranked Open": (24, 200, 26), "X Schedule": (14, 205, 147), "打工": (14, 203, 146), "活动": (223, 42, 119), "祭典": (103, 103, 114), "祭典时间-金黄": (234, 255, 61), "上-武器卡片-黄": (234, 255, 61), "下-武器卡片-蓝": (96, 58, 255), "上-武器卡片": (255, 148, 157), "下-武器卡片": (124, 217, 127), "祭典结算项目卡片": (63, 63, 70, 70), } def cf_http_get(url: str): """cf get""" # 实例化一个create_scraper对象 scraper = cfscrape.create_scraper() # 请求报错,可以加上时延 # scraper = cfscrape.create_scraper(delay = 6) if proxy_address: cf_proxies = { "http": "http://{}".format(proxy_address), "https": "http://{}".format(proxy_address), } # 获取网页内容 代理访问 res = scraper.get(url, proxies=cf_proxies) else: # 获取网页内容 res = scraper.get(url) return res async def async_http_get(url: str) -> Response: """async http_get""" async with httpx.AsyncClient(proxies=proxies) as client: response = await client.get(url, timeout=HTTP_TIME_OUT) return response def http_get(url: str) -> Response: """http_get""" response = httpx.get(url, proxies=proxies, timeout=HTTP_TIME_OUT) return response def multiple_replace(text, _dict): """批量替换文本""" for key in _dict: text = text.replace(key, _dict[key]) return text def get_expire_time() -> str: """计算过期时间 字符串 精确度为 ymdh""" # 计算过期时间 time_now = get_time_now_china() time_now_h = time_now.hour # 计算过期时间字符串 # 判断当前小时是奇数还是偶数 expire_time: datetime if (time_now_h % 2) == 0: # 偶数 expire_time = time_now + datetime.timedelta(hours=2) else: expire_time = time_now + datetime.timedelta(hours=1) expire_time_str = expire_time.strftime(time_format_ymdh).strip() return expire_time_str def time_converter(time_str) -> datetime: """时间转换 年-月-日 时:分:秒""" # convert time to UTC+8 dt = datetime.datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ") dt += datetime.timedelta(hours=8) return dt def time_converter_yd(time_str): """时间转换 月-日""" dt = time_converter(time_str) return datetime.datetime.strftime(dt, "%m.%d") def time_converter_hm(time_str): """时间转换 时:分""" dt = time_converter(time_str) return datetime.datetime.strftime(dt, "%H:%M") def time_converter_mdhm(time_str): """时间转换 月-日 时:分""" dt = time_converter(time_str) return datetime.datetime.strftime(dt, "%m-%d %H:%M") def time_converter_weekday(time_str): """时间转换 周几,如周一""" dt = time_converter(time_str) weekday = dt.weekday() return weekday def get_time_ymd(): """获取年月日""" dt = get_time_now_china().strftime("%Y-%m-%d") return dt def get_time_y() -> int: """获取年""" year = get_time_now_china().year return year def get_time_now_china() -> datetime.datetime: """获取中国所在东八区时间""" # 获取utc时间,然后转东8区时间 utc_now = datetime.datetime.utcnow()
time_format_ymdh = "%Y-%m-%dT%H" HTTP_TIME_OUT = 5.0 # 请求超时,秒 proxy_address = plugin_config.splatoon3_proxy_address if proxy_address: proxies = "http://{}".format(proxy_address) else: proxies = None # 背景 rgb颜色 dict_bg_rgb = { "Turf War": (24, 200, 26), "Ranked Challenge": (227, 68, 17), "Ranked Open": (24, 200, 26), "X Schedule": (14, 205, 147), "打工": (14, 203, 146), "活动": (223, 42, 119), "祭典": (103, 103, 114), "祭典时间-金黄": (234, 255, 61), "上-武器卡片-黄": (234, 255, 61), "下-武器卡片-蓝": (96, 58, 255), "上-武器卡片": (255, 148, 157), "下-武器卡片": (124, 217, 127), "祭典结算项目卡片": (63, 63, 70, 70), } def cf_http_get(url: str): """cf get""" # 实例化一个create_scraper对象 scraper = cfscrape.create_scraper() # 请求报错,可以加上时延 # scraper = cfscrape.create_scraper(delay = 6) if proxy_address: cf_proxies = { "http": "http://{}".format(proxy_address), "https": "http://{}".format(proxy_address), } # 获取网页内容 代理访问 res = scraper.get(url, proxies=cf_proxies) else: # 获取网页内容 res = scraper.get(url) return res async def async_http_get(url: str) -> Response: """async http_get""" async with httpx.AsyncClient(proxies=proxies) as client: response = await client.get(url, timeout=HTTP_TIME_OUT) return response def http_get(url: str) -> Response: """http_get""" response = httpx.get(url, proxies=proxies, timeout=HTTP_TIME_OUT) return response def multiple_replace(text, _dict): """批量替换文本""" for key in _dict: text = text.replace(key, _dict[key]) return text def get_expire_time() -> str: """计算过期时间 字符串 精确度为 ymdh""" # 计算过期时间 time_now = get_time_now_china() time_now_h = time_now.hour # 计算过期时间字符串 # 判断当前小时是奇数还是偶数 expire_time: datetime if (time_now_h % 2) == 0: # 偶数 expire_time = time_now + datetime.timedelta(hours=2) else: expire_time = time_now + datetime.timedelta(hours=1) expire_time_str = expire_time.strftime(time_format_ymdh).strip() return expire_time_str def time_converter(time_str) -> datetime: """时间转换 年-月-日 时:分:秒""" # convert time to UTC+8 dt = datetime.datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%SZ") dt += datetime.timedelta(hours=8) return dt def time_converter_yd(time_str): """时间转换 月-日""" dt = time_converter(time_str) return datetime.datetime.strftime(dt, "%m.%d") def time_converter_hm(time_str): """时间转换 时:分""" dt = time_converter(time_str) return datetime.datetime.strftime(dt, "%H:%M") def time_converter_mdhm(time_str): """时间转换 月-日 时:分""" dt = time_converter(time_str) return datetime.datetime.strftime(dt, "%m-%d %H:%M") def time_converter_weekday(time_str): """时间转换 周几,如周一""" dt = time_converter(time_str) weekday = dt.weekday() return weekday def get_time_ymd(): """获取年月日""" dt = get_time_now_china().strftime("%Y-%m-%d") return dt def get_time_y() -> int: """获取年""" year = get_time_now_china().year return year def get_time_now_china() -> datetime.datetime: """获取中国所在东八区时间""" # 获取utc时间,然后转东8区时间 utc_now = datetime.datetime.utcnow()
convert_now = TimeUtil.convert_timezone(utc_now, "+8")
0
2023-12-17 07:49:26+00:00
2k
Sam-Izdat/tinycio
src/tinycio/fsio/imagefile.py
[ { "identifier": "GraphicsFormat", "path": "src/tinycio/fsio/format.py", "snippet": "class GraphicsFormat(IntEnum):\n \"\"\"\n The graphics format of an image file to be saved or loaded. For a list of available options, see :ref:`ref_graphics_formats`.\n \"\"\"\n UNKNOWN = 1<<0\n ...
import torch import numpy as np import typing import os import imageio.v3 as iio from .format import GraphicsFormat, ImageFileFormat
699
def _infer_image_file_format(ext:str) -> ImageFileFormat: ext = ext.strip().lower() if ext == '.png': return ImageFileFormat.PNG elif ext == '.jpg': return ImageFileFormat.JPG elif ext == '.jpeg': return ImageFileFormat.JPG elif ext == '.exr': return ImageFileFormat.EXR elif ext == '.tif': return ImageFileFormat.TIFF elif ext == '.tiff': return ImageFileFormat.TIFF elif ext == '.webp': return ImageFileFormat.WEBP else: return ImageFileFormat.UNKNOWN
def _infer_image_file_format(ext:str) -> ImageFileFormat: ext = ext.strip().lower() if ext == '.png': return ImageFileFormat.PNG elif ext == '.jpg': return ImageFileFormat.JPG elif ext == '.jpeg': return ImageFileFormat.JPG elif ext == '.exr': return ImageFileFormat.EXR elif ext == '.tif': return ImageFileFormat.TIFF elif ext == '.tiff': return ImageFileFormat.TIFF elif ext == '.webp': return ImageFileFormat.WEBP else: return ImageFileFormat.UNKNOWN
def load_image(fp:str, graphics_format:GraphicsFormat=GraphicsFormat.UNKNOWN) -> torch.Tensor:
0
2023-12-15 15:39:08+00:00
2k
Dank-del/stats-bot
stats_bot/handlers/plot.py
[ { "identifier": "Attachment", "path": "stats_bot/db/models.py", "snippet": "class Attachment(SQLModel, table=True):\n id: Optional[int] = Field(default=None, primary_key=True)\n user_id: int = Field(foreign_key=\"user.id\")\n group_id: int = Field(foreign_key=\"group.id\")\n message_id: int ...
import pandas as pd import matplotlib.pyplot as plt import io from sqlmodel import Session, select from telegram import Update from telegram.ext import ( ContextTypes, ) from stats_bot.db.models import Attachment, Message, User from stats_bot.db.client import engine from stats_bot.decorators.admin import admin
1,204
@admin async def plot_table(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """ Generates a table of top 10 users by number of messages and average message length, and plots a bar chart to visualize the data. Args: update (Update): The update object containing information about the incoming message. context (ContextTypes.DEFAULT_TYPE): The context object containing bot-related information. Returns: None """ msg = await update.effective_message.reply_text("Generating table...") data = [] # fetch this data from database with Session(engine) as session: # users = session.exec(select(User)).all() messages = session.exec( select(Message).where(Message.group_id == update.effective_chat.id) ).all() # make a list of users, messages of whom are in the messages variable users = [] for message in messages: if message.user_id not in users: users.append(message.user_id) # print(users) for user in users: usr = session.exec(select(User).where(User.id == user)).first() msgs = session.exec( select(Message.text).where(Message.user_id == usr.id) ).all() data.append((usr.username or str(usr.id), msgs)) # Convert data to a pandas DataFrame df = pd.DataFrame(data, columns=["user_id", "messages"]) print(df) df["num_messages"] = df["messages"].apply(len) # Calculate average message length per user df["avg_message_length"] = df["messages"].apply( lambda x: sum(len(message) for message in x) / len(x) ) # Sort users by number of messages and average message length df = df.sort_values(by=["num_messages", "avg_message_length"], ascending=False) # Plot top 10 users top_10_users = df.head(10) plt.figure(figsize=(10, 6)) plt.bar( top_10_users["user_id"], top_10_users["num_messages"], color="blue", alpha=0.6, label="Number of Messages", ) plt.xlabel("User ID") plt.ylabel("Number of Messages") plt.title( f"Top 10 Users in {update.effective_chat.title} by Number of Messages and Average Message Length" ) plt.legend() buf = io.BytesIO() plt.savefig(buf, format="png") buf.seek(0) await msg.delete() await context.bot.send_photo( chat_id=update.effective_chat.id, photo=buf, reply_to_message_id=msg.reply_to_message.message_id, ) @admin async def attachment_stats(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """ Generates a table of top 10 users by number of attachments sent, and plots a bar chart to visualize the data. Args: update (Update): The update object containing information about the incoming message. context (CallbackContext): The context object containing bot-related information. Returns: None """ msg = await update.effective_message.reply_text("Generating attachment stats...") data = [] # fetch this data from database with Session(engine) as session: attachments = session.exec(
@admin async def plot_table(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """ Generates a table of top 10 users by number of messages and average message length, and plots a bar chart to visualize the data. Args: update (Update): The update object containing information about the incoming message. context (ContextTypes.DEFAULT_TYPE): The context object containing bot-related information. Returns: None """ msg = await update.effective_message.reply_text("Generating table...") data = [] # fetch this data from database with Session(engine) as session: # users = session.exec(select(User)).all() messages = session.exec( select(Message).where(Message.group_id == update.effective_chat.id) ).all() # make a list of users, messages of whom are in the messages variable users = [] for message in messages: if message.user_id not in users: users.append(message.user_id) # print(users) for user in users: usr = session.exec(select(User).where(User.id == user)).first() msgs = session.exec( select(Message.text).where(Message.user_id == usr.id) ).all() data.append((usr.username or str(usr.id), msgs)) # Convert data to a pandas DataFrame df = pd.DataFrame(data, columns=["user_id", "messages"]) print(df) df["num_messages"] = df["messages"].apply(len) # Calculate average message length per user df["avg_message_length"] = df["messages"].apply( lambda x: sum(len(message) for message in x) / len(x) ) # Sort users by number of messages and average message length df = df.sort_values(by=["num_messages", "avg_message_length"], ascending=False) # Plot top 10 users top_10_users = df.head(10) plt.figure(figsize=(10, 6)) plt.bar( top_10_users["user_id"], top_10_users["num_messages"], color="blue", alpha=0.6, label="Number of Messages", ) plt.xlabel("User ID") plt.ylabel("Number of Messages") plt.title( f"Top 10 Users in {update.effective_chat.title} by Number of Messages and Average Message Length" ) plt.legend() buf = io.BytesIO() plt.savefig(buf, format="png") buf.seek(0) await msg.delete() await context.bot.send_photo( chat_id=update.effective_chat.id, photo=buf, reply_to_message_id=msg.reply_to_message.message_id, ) @admin async def attachment_stats(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """ Generates a table of top 10 users by number of attachments sent, and plots a bar chart to visualize the data. Args: update (Update): The update object containing information about the incoming message. context (CallbackContext): The context object containing bot-related information. Returns: None """ msg = await update.effective_message.reply_text("Generating attachment stats...") data = [] # fetch this data from database with Session(engine) as session: attachments = session.exec(
select(Attachment).where(Attachment.group_id == update.effective_chat.id)
0
2023-12-18 03:05:36+00:00
2k
EzyGang/py-cachify
py_cachify/backend/lib.py
[ { "identifier": "AsyncWrapper", "path": "py_cachify/backend/clients.py", "snippet": "class AsyncWrapper:\n def __init__(self, cache: MemoryCache) -> None:\n self._cache = cache\n\n async def get(self, name: str, default: Any = None) -> Any:\n return self._cache.get(name=name, default...
import pickle from typing import Any, Union from py_cachify.backend.clients import AsyncWrapper, MemoryCache from py_cachify.backend.exceptions import CachifyInitError from py_cachify.backend.types import AsyncClient, SyncClient
664
from __future__ import annotations class Cachify: def __init__(
from __future__ import annotations class Cachify: def __init__(
self, sync_client: Union[SyncClient, MemoryCache], async_client: Union[AsyncClient, AsyncWrapper], prefix: str
1
2023-12-16 22:54:51+00:00
2k
lldacing/comfyui-easyapi-nodes
easyapi/ImageNode.py
[ { "identifier": "tensor_to_pil", "path": "easyapi/util.py", "snippet": "def tensor_to_pil(image):\n return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))" }, { "identifier": "pil_to_tensor", "path": "easyapi/util.py", "snippet": "def pil_to_ten...
import base64 import copy import io import numpy as np import torch import json from PIL import ImageOps, Image from nodes import LoadImage from comfy.cli_args import args from PIL.PngImagePlugin import PngInfo from json import JSONEncoder, JSONDecoder from easyapi.util import tensor_to_pil, pil_to_tensor, base64_to_image, image_to_base64, read_image_from_url
1,382
class LoadImageFromURL: """ 从远程地址读取图片 """ @classmethod def INPUT_TYPES(self): return {"required": { "urls": ("STRING", {"multiline": True, "default": "", "dynamicPrompts": False}), }, } RETURN_TYPES = ("IMAGE", "MASK") RETURN_NAMES = ("images", "masks") FUNCTION = "convert" CATEGORY = "EasyApi/Image" # INPUT_IS_LIST = False OUTPUT_IS_LIST = (True, True,) def convert(self, urls): urls = urls.splitlines() images = [] masks = [] for url in urls: if not url.strip().isspace(): i = read_image_from_url(url.strip()) i = ImageOps.exif_transpose(i) image = i.convert("RGB") image = pil_to_tensor(image) images.append(image) if 'A' in i.getbands(): mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 mask = 1. - torch.from_numpy(mask) else: mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") masks.append(mask) return (images, masks, ) class LoadMaskFromURL: """ 从远程地址读取图片 """ _color_channels = ["red", "green", "blue", "alpha"] @classmethod def INPUT_TYPES(self): return { "required": { "urls": ("STRING", {"multiline": True, "default": "", "dynamicPrompts": False}), "channel": (self._color_channels, {"default": self._color_channels[0]}), }, } RETURN_TYPES = ("MASK", ) RETURN_NAMES = ("masks", ) FUNCTION = "convert" CATEGORY = "EasyApi/Image" # INPUT_IS_LIST = False OUTPUT_IS_LIST = (True, True,) def convert(self, urls, channel=_color_channels[0]): urls = urls.splitlines() masks = [] for url in urls: if not url.strip().isspace(): i = read_image_from_url(url.strip()) # 下面代码参考LoadImage i = ImageOps.exif_transpose(i) if i.getbands() != ("R", "G", "B", "A"): i = i.convert("RGBA") c = channel[0].upper() if c in i.getbands(): mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0 mask = torch.from_numpy(mask) if c == 'A': mask = 1. - mask else: mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") masks.append(mask) return (masks,) class Base64ToImage: """ 图片的base64格式还原成图片的张量 """ @classmethod def INPUT_TYPES(self): return {"required": { "base64Images": ("STRING", {"multiline": True, "default": "[\"\"]", "dynamicPrompts": False}), }, } RETURN_TYPES = ("IMAGE", "MASK") # RETURN_NAMES = ("image", "mask") FUNCTION = "convert" CATEGORY = "EasyApi/Image" # INPUT_IS_LIST = False OUTPUT_IS_LIST = (True, True) def convert(self, base64Images): # print(base64Image) base64ImageJson = JSONDecoder().decode(s=base64Images) images = [] masks = [] for base64Image in base64ImageJson:
class LoadImageFromURL: """ 从远程地址读取图片 """ @classmethod def INPUT_TYPES(self): return {"required": { "urls": ("STRING", {"multiline": True, "default": "", "dynamicPrompts": False}), }, } RETURN_TYPES = ("IMAGE", "MASK") RETURN_NAMES = ("images", "masks") FUNCTION = "convert" CATEGORY = "EasyApi/Image" # INPUT_IS_LIST = False OUTPUT_IS_LIST = (True, True,) def convert(self, urls): urls = urls.splitlines() images = [] masks = [] for url in urls: if not url.strip().isspace(): i = read_image_from_url(url.strip()) i = ImageOps.exif_transpose(i) image = i.convert("RGB") image = pil_to_tensor(image) images.append(image) if 'A' in i.getbands(): mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0 mask = 1. - torch.from_numpy(mask) else: mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") masks.append(mask) return (images, masks, ) class LoadMaskFromURL: """ 从远程地址读取图片 """ _color_channels = ["red", "green", "blue", "alpha"] @classmethod def INPUT_TYPES(self): return { "required": { "urls": ("STRING", {"multiline": True, "default": "", "dynamicPrompts": False}), "channel": (self._color_channels, {"default": self._color_channels[0]}), }, } RETURN_TYPES = ("MASK", ) RETURN_NAMES = ("masks", ) FUNCTION = "convert" CATEGORY = "EasyApi/Image" # INPUT_IS_LIST = False OUTPUT_IS_LIST = (True, True,) def convert(self, urls, channel=_color_channels[0]): urls = urls.splitlines() masks = [] for url in urls: if not url.strip().isspace(): i = read_image_from_url(url.strip()) # 下面代码参考LoadImage i = ImageOps.exif_transpose(i) if i.getbands() != ("R", "G", "B", "A"): i = i.convert("RGBA") c = channel[0].upper() if c in i.getbands(): mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0 mask = torch.from_numpy(mask) if c == 'A': mask = 1. - mask else: mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu") masks.append(mask) return (masks,) class Base64ToImage: """ 图片的base64格式还原成图片的张量 """ @classmethod def INPUT_TYPES(self): return {"required": { "base64Images": ("STRING", {"multiline": True, "default": "[\"\"]", "dynamicPrompts": False}), }, } RETURN_TYPES = ("IMAGE", "MASK") # RETURN_NAMES = ("image", "mask") FUNCTION = "convert" CATEGORY = "EasyApi/Image" # INPUT_IS_LIST = False OUTPUT_IS_LIST = (True, True) def convert(self, base64Images): # print(base64Image) base64ImageJson = JSONDecoder().decode(s=base64Images) images = [] masks = [] for base64Image in base64ImageJson:
i = base64_to_image(base64Image)
2
2023-12-19 02:32:10+00:00
2k
bersegosx/passosh
src/passosh/pesso.py
[ { "identifier": "HeaderField", "path": "src/passosh/fields.py", "snippet": "class HeaderField:\n \"\"\"\n An object that represents the fields that display information at the top of a pass.\n \"\"\"\n key: str\n value: str\n label: str = ''\n textAlignment: str = TextAlignment.NATUR...
from dataclasses import dataclass from .fields import (HeaderField, PrimaryField, SecondaryField, BackField, AuxiliaryField, Barcode, BoardingPassTransitType, Location)
786
@dataclass class Content: """ An object that represents the groups of fields that display the information for an event ticket. """ headerFields: list[HeaderField] | None = None primaryFields: list[PrimaryField] | None = None
@dataclass class Content: """ An object that represents the groups of fields that display the information for an event ticket. """ headerFields: list[HeaderField] | None = None primaryFields: list[PrimaryField] | None = None
secondaryFields: list[SecondaryField] | None = None
2
2023-12-18 22:51:38+00:00
2k
jonghwanhyeon/python-chzzk
chzzk/chzzk.py
[ { "identifier": "ChzzkClient", "path": "chzzk/client.py", "snippet": "class ChzzkClient(HTTPClient):\n BASE_URL = \"https://api.chzzk.naver.com/\"\n\n def __init__(self, credential: Optional[Credential] = None):\n super().__init__(credential)" }, { "identifier": "Credential", "p...
from typing import Optional from chzzk.client import ChzzkClient, Credential, GameClient from chzzk.models import ( Channel, ChannelSearchRecord, LiveDetail, LiveSearchRecord, LiveStatus, SearchCursor, User, Video, VideoSearchRecord, )
1,194
class ChzzkLive: def __init__(self, client: ChzzkClient): self._client = client async def status(self, channel_id: str) -> LiveStatus: response = await self._client.get(f"polling/v1/channels/{channel_id}/live-status") return LiveStatus(**response) async def detail(self, channel_id: str) -> LiveDetail: response = await self._client.get(f"service/v1/channels/{channel_id}/live-detail") return LiveDetail(**response) class ChzzkSearch: def __init__(self, client: ChzzkClient): self._client = client async def channels(self, keyword: str, size: int = 12, offset: int = 0) -> SearchCursor[ChannelSearchRecord]: response = await self._client.get( "service/v1/search/channels", params={ "keyword": keyword, "size": size, "offset": offset, }, ) return SearchCursor[ChannelSearchRecord](**response) async def lives(self, keyword: str, size: int = 12, offset: int = 0) -> SearchCursor[LiveSearchRecord]: response = await self._client.get( "service/v1/search/lives", params={ "keyword": keyword, "size": size, "offset": offset, }, ) return SearchCursor[LiveSearchRecord](**response)
class ChzzkLive: def __init__(self, client: ChzzkClient): self._client = client async def status(self, channel_id: str) -> LiveStatus: response = await self._client.get(f"polling/v1/channels/{channel_id}/live-status") return LiveStatus(**response) async def detail(self, channel_id: str) -> LiveDetail: response = await self._client.get(f"service/v1/channels/{channel_id}/live-detail") return LiveDetail(**response) class ChzzkSearch: def __init__(self, client: ChzzkClient): self._client = client async def channels(self, keyword: str, size: int = 12, offset: int = 0) -> SearchCursor[ChannelSearchRecord]: response = await self._client.get( "service/v1/search/channels", params={ "keyword": keyword, "size": size, "offset": offset, }, ) return SearchCursor[ChannelSearchRecord](**response) async def lives(self, keyword: str, size: int = 12, offset: int = 0) -> SearchCursor[LiveSearchRecord]: response = await self._client.get( "service/v1/search/lives", params={ "keyword": keyword, "size": size, "offset": offset, }, ) return SearchCursor[LiveSearchRecord](**response)
async def videos(self, keyword: str, size: int = 12, offset: int = 0) -> SearchCursor[VideoSearchRecord]:
11
2023-12-20 22:09:07+00:00
2k
pantherale0/ha-fuelprices
custom_components/fuel_prices/device_tracker.py
[ { "identifier": "CONF_AREAS", "path": "custom_components/fuel_prices/const.py", "snippet": "CONF_AREAS = \"areas\"" }, { "identifier": "DOMAIN", "path": "custom_components/fuel_prices/const.py", "snippet": "DOMAIN = \"fuel_prices\"" }, { "identifier": "FeulStationEntity", "pa...
import logging from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_RADIUS, CONF_NAME from homeassistant.components.device_tracker.config_entry import ( BaseTrackerEntity, SourceType, ATTR_SOURCE_TYPE, ATTR_LATITUDE, ATTR_LONGITUDE, ) from homeassistant.config_entries import ConfigEntry from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import StateType from pyfuelprices.const import PROP_FUEL_LOCATION_SOURCE from .const import CONF_AREAS, DOMAIN from .entity import FeulStationEntity from .coordinator import FuelPricesCoordinator
695
"""Device tracker for fuel prices.""" from __future__ import annotations _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: """Integration platform creation."""
"""Device tracker for fuel prices.""" from __future__ import annotations _LOGGER = logging.getLogger(__name__) async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: """Integration platform creation."""
cooridinator: FuelPricesCoordinator = hass.data[DOMAIN][entry.entry_id]
1
2023-12-19 20:54:21+00:00
2k
abdellatif-laghjaj/stock-market-prediction
main.py
[ { "identifier": "load_data", "path": "services.py", "snippet": "@st.cache_data\ndef load_data(ticker, start, end):\n \"\"\"\n Load historical stock price data from Yahoo Finance.\n\n Parameters:\n - ticker (str): Stock symbol (e.g., AAPL).\n - start (str): Start date in the format 'YYYY-M...
from time import sleep from sklearn.metrics import mean_absolute_error from streamlit_option_menu import option_menu from datetime import date from prophet import Prophet from prophet.plot import plot_plotly from services import load_data, plot_data, plot_multiple_data, plot_volume import uuid import pandas as pd import streamlit as st
1,104
# Set page layout to wide st.set_page_config(layout="wide", page_title="Forcastify", page_icon="📈") # Sidebar st.sidebar.markdown("<h1 style='text-align: center; font-size: 30px;'><b>Forcasti.</b><b style='color: orange'>fy</b></h1>", unsafe_allow_html=True) st.sidebar.title("Options") start_date_key = str(uuid.uuid4()) start_date = st.sidebar.date_input("Start date", date(2018, 1, 1), key=start_date_key) end_date = st.sidebar.date_input("End date", date.today()) # Header st.markdown("<h1 style='text-align: center;'>Stock Forecast App 📈</h1>", unsafe_allow_html=True) st.markdown("<p style='text-align: center;'><b>Forcasti.</b><b style='color: orange'>fy</b> is a simple web app for stock price prediction using the <a href='https://facebook.github.io/prophet/'>Prophet</a> library.</p>", unsafe_allow_html=True) selected_tab = option_menu( menu_title=None, options=["Dataframes", "Plots", "Statistics", "Forecasting", "Comparison"], icons=["table", "bar-chart", "calculator", "graph-up-arrow", "arrow-down-up"], menu_icon="📊", default_index=0, orientation="horizontal", ) # Stock selection stocks = ("AAPL", "GOOG", "MSFT", "GME", "AMC", "TSLA", "AMZN", "NFLX", "NVDA", "AMD", "PYPL") # Stocks abreviations selected_stock = st.sidebar.selectbox("Select stock for prediction", stocks) selected_stocks = st.sidebar.multiselect("Select stocks for comparison", stocks) years_to_predict = st.sidebar.slider("Years of prediction:", 1, 5) period = years_to_predict * 365 # Display a loading spinner while loading data with st.spinner("Loading data..."):
# Set page layout to wide st.set_page_config(layout="wide", page_title="Forcastify", page_icon="📈") # Sidebar st.sidebar.markdown("<h1 style='text-align: center; font-size: 30px;'><b>Forcasti.</b><b style='color: orange'>fy</b></h1>", unsafe_allow_html=True) st.sidebar.title("Options") start_date_key = str(uuid.uuid4()) start_date = st.sidebar.date_input("Start date", date(2018, 1, 1), key=start_date_key) end_date = st.sidebar.date_input("End date", date.today()) # Header st.markdown("<h1 style='text-align: center;'>Stock Forecast App 📈</h1>", unsafe_allow_html=True) st.markdown("<p style='text-align: center;'><b>Forcasti.</b><b style='color: orange'>fy</b> is a simple web app for stock price prediction using the <a href='https://facebook.github.io/prophet/'>Prophet</a> library.</p>", unsafe_allow_html=True) selected_tab = option_menu( menu_title=None, options=["Dataframes", "Plots", "Statistics", "Forecasting", "Comparison"], icons=["table", "bar-chart", "calculator", "graph-up-arrow", "arrow-down-up"], menu_icon="📊", default_index=0, orientation="horizontal", ) # Stock selection stocks = ("AAPL", "GOOG", "MSFT", "GME", "AMC", "TSLA", "AMZN", "NFLX", "NVDA", "AMD", "PYPL") # Stocks abreviations selected_stock = st.sidebar.selectbox("Select stock for prediction", stocks) selected_stocks = st.sidebar.multiselect("Select stocks for comparison", stocks) years_to_predict = st.sidebar.slider("Years of prediction:", 1, 5) period = years_to_predict * 365 # Display a loading spinner while loading data with st.spinner("Loading data..."):
data = load_data(selected_stock, start_date, end_date)
0
2023-12-17 11:38:48+00:00
2k
replicate/cog-marigold
src/model/marigold_pipeline.py
[ { "identifier": "RGBEncoder", "path": "src/model/rgb_encoder.py", "snippet": "class RGBEncoder(nn.Module):\n \"\"\"\n The encoder of pretrained Stable Diffusion VAE\n \"\"\"\n \n def __init__(self, pretrained_path, subfolder=None) -> None:\n super().__init__()\n \n va...
import logging import numpy as np import torch from typing import Dict from diffusers import ( DDIMScheduler, DDPMScheduler, PNDMScheduler, SchedulerMixin, UNet2DConditionModel, ) from torch import nn from torch.nn import Conv2d from torch.nn.parameter import Parameter from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from .rgb_encoder import RGBEncoder from .stacked_depth_AE import StackedDepthAE
1,288
# Author: Bingxin Ke # Last modified: 2023-12-11 class MarigoldPipeline(nn.Module): """ Marigold monocular depth estimator. """ def __init__( self, unet_pretrained_path: Dict, # {path: xxx, subfolder: xxx} rgb_encoder_pretrained_path: Dict, depht_ae_pretrained_path: Dict, noise_scheduler_pretrained_path: Dict, tokenizer_pretrained_path: Dict, text_encoder_pretrained_path: Dict, empty_text_embed=None, trainable_unet=False, rgb_latent_scale_factor=0.18215, depth_latent_scale_factor=0.18215, noise_scheduler_type="DDIMScheduler", enable_gradient_checkpointing=False, enable_xformers=True, ) -> None: super().__init__() self.rgb_latent_scale_factor = rgb_latent_scale_factor self.depth_latent_scale_factor = depth_latent_scale_factor self.device = "cpu" # ******* Initialize modules ******* # Trainable modules self.trainable_module_dic: Dict[str, nn.Module] = {} self.trainable_unet = trainable_unet # Denoising UNet self.unet: UNet2DConditionModel = UNet2DConditionModel.from_pretrained( unet_pretrained_path["path"], subfolder=unet_pretrained_path["subfolder"] ) logging.info(f"pretrained UNet loaded from: {unet_pretrained_path}") if 8 != self.unet.config["in_channels"]: self._replace_unet_conv_in() logging.warning("Unet conv_in layer is replaced") if enable_xformers: self.unet.enable_xformers_memory_efficient_attention() else: self.unet.disable_xformers_memory_efficient_attention() # Image encoder self.rgb_encoder = RGBEncoder( pretrained_path=rgb_encoder_pretrained_path["path"], subfolder=rgb_encoder_pretrained_path["subfolder"], ) logging.info( f"pretrained RGBEncoder loaded from: {rgb_encoder_pretrained_path}" ) self.rgb_encoder.requires_grad_(False) # Depth encoder-decoder
# Author: Bingxin Ke # Last modified: 2023-12-11 class MarigoldPipeline(nn.Module): """ Marigold monocular depth estimator. """ def __init__( self, unet_pretrained_path: Dict, # {path: xxx, subfolder: xxx} rgb_encoder_pretrained_path: Dict, depht_ae_pretrained_path: Dict, noise_scheduler_pretrained_path: Dict, tokenizer_pretrained_path: Dict, text_encoder_pretrained_path: Dict, empty_text_embed=None, trainable_unet=False, rgb_latent_scale_factor=0.18215, depth_latent_scale_factor=0.18215, noise_scheduler_type="DDIMScheduler", enable_gradient_checkpointing=False, enable_xformers=True, ) -> None: super().__init__() self.rgb_latent_scale_factor = rgb_latent_scale_factor self.depth_latent_scale_factor = depth_latent_scale_factor self.device = "cpu" # ******* Initialize modules ******* # Trainable modules self.trainable_module_dic: Dict[str, nn.Module] = {} self.trainable_unet = trainable_unet # Denoising UNet self.unet: UNet2DConditionModel = UNet2DConditionModel.from_pretrained( unet_pretrained_path["path"], subfolder=unet_pretrained_path["subfolder"] ) logging.info(f"pretrained UNet loaded from: {unet_pretrained_path}") if 8 != self.unet.config["in_channels"]: self._replace_unet_conv_in() logging.warning("Unet conv_in layer is replaced") if enable_xformers: self.unet.enable_xformers_memory_efficient_attention() else: self.unet.disable_xformers_memory_efficient_attention() # Image encoder self.rgb_encoder = RGBEncoder( pretrained_path=rgb_encoder_pretrained_path["path"], subfolder=rgb_encoder_pretrained_path["subfolder"], ) logging.info( f"pretrained RGBEncoder loaded from: {rgb_encoder_pretrained_path}" ) self.rgb_encoder.requires_grad_(False) # Depth encoder-decoder
self.depth_ae = StackedDepthAE(
1
2023-12-15 07:19:14+00:00
2k
tungeverest/python-k8s-base
src/app.py
[ { "identifier": "process_time_log_middleware", "path": "core/middlewares/https/process_time.py", "snippet": "async def process_time_log_middleware(request: Request, call_next):\n \"\"\"\n This middleware will log all requests and their processing time.\n E.g. log: HOST:PORT - GET /ping 200 OK ...
import logging from os import getenv from core.middlewares.https.process_time import process_time_log_middleware from core.middlewares.https.rate_limit import RateLimitCoreMiddleware from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.trustedhost import TrustedHostMiddleware from src.router import router as api_router from src.setting import get_settings
881
logger = logging.getLogger(__name__) def create_app(): settings = get_settings() app = FastAPI( title=f"{settings.PROJECT_NAME}", version=settings.APP_VERSION, debug=settings.DEBUG, description=f""" FastAPI Framework + K8s \n - PROJECT NAME: {settings.PROJECT_NAME} \n - VERSION: {settings.APP_VERSION} \n - ENV: {settings._ENV} \n - DEBUG: {settings.DEBUG} \n - API URI: {settings.API_VERSION_PREFIX} \n """, )
logger = logging.getLogger(__name__) def create_app(): settings = get_settings() app = FastAPI( title=f"{settings.PROJECT_NAME}", version=settings.APP_VERSION, debug=settings.DEBUG, description=f""" FastAPI Framework + K8s \n - PROJECT NAME: {settings.PROJECT_NAME} \n - VERSION: {settings.APP_VERSION} \n - ENV: {settings._ENV} \n - DEBUG: {settings.DEBUG} \n - API URI: {settings.API_VERSION_PREFIX} \n """, )
app.include_router(api_router, prefix=settings.API_VERSION_PREFIX)
1
2023-12-20 03:40:34+00:00
2k
CoolPointerException/Amigo
gui/llama_index_init.py
[ { "identifier": "validate", "path": "gui/input_validator.py", "snippet": "def validate(gui, properties):\n for prop in properties:\n match prop:\n case Properties.PROJECT_NAME:\n project_name = gui.projects_tab.project_name_entry.get()\n if not project_...
from tkinter import messagebox from llama_index import ServiceContext, set_global_service_context, OpenAIEmbedding from llama_index.embeddings import AzureOpenAIEmbedding, GeminiEmbedding from llama_index.llms import Gemini, OpenAI, AzureOpenAI from gui.input_validator import validate, Properties
1,186
def init_llama_index(self, api_type): if self.isLlamaInitialized: return llm = None embed_model = None if api_type == "azure": is_valid = validate(self, [
def init_llama_index(self, api_type): if self.isLlamaInitialized: return llm = None embed_model = None if api_type == "azure": is_valid = validate(self, [
Properties.API_BASE,
1
2023-12-15 14:06:38+00:00
2k
redvulpecula/DRILL-Concurrent-Python-1
main.py
[ { "identifier": "VideoStream", "path": "video_streaming.py", "snippet": "class VideoStream:\n def __init__(self, url, frames):\n self.frames = frames\n self.url = url\n self.process = Process(target=self.capture, args=(self.frames, self.url))\n self.process.start()\n\n ...
import time import torch from multiprocessing import Process, Manager from ultralytics import YOLO from video_streaming import VideoStream, calculate_fps, display_and_save_frame, check_rtsp_url, read_url_from_file from imgAlgSelect import YOLOProcessor
1,420
class ConcurrencyManager: def __init__(self, url): self.device = 'cuda' if torch.backends.cuda.is_built() else 'mps' if torch.backends.mps.is_available() else 'cpu' self.yolo_model = YOLO("yolov8m.pt") self.manager = Manager() self.url = url self.frames = self.manager.Queue(maxsize=1) self.video_stream = VideoStream(url, self.frames) self.fps_async = self.manager.Value('d', 0.0) self.fps_stream = self.manager.Value('d', 0.0) def start_stream(self): print("Waiting for the stream.") while not check_rtsp_url(self.url): print("Cannot connect to the URL or the port is not open. Retrying.") p_fps = Process(target=calculate_fps, args=(time.time(), self.fps_async)) p_fps.start() p_display = Process(target=display_and_save_frame, args=(self.fps_async, self.fps_stream, self.video_stream.frames)) p_display.start() p_yolo = Process(target=YOLOProcessor(self.video_stream.frames, self.yolo_model, self.device).process) p_yolo.start() p_display.join() p_fps.join() p_yolo.join() self.video_stream.release() def main():
class ConcurrencyManager: def __init__(self, url): self.device = 'cuda' if torch.backends.cuda.is_built() else 'mps' if torch.backends.mps.is_available() else 'cpu' self.yolo_model = YOLO("yolov8m.pt") self.manager = Manager() self.url = url self.frames = self.manager.Queue(maxsize=1) self.video_stream = VideoStream(url, self.frames) self.fps_async = self.manager.Value('d', 0.0) self.fps_stream = self.manager.Value('d', 0.0) def start_stream(self): print("Waiting for the stream.") while not check_rtsp_url(self.url): print("Cannot connect to the URL or the port is not open. Retrying.") p_fps = Process(target=calculate_fps, args=(time.time(), self.fps_async)) p_fps.start() p_display = Process(target=display_and_save_frame, args=(self.fps_async, self.fps_stream, self.video_stream.frames)) p_display.start() p_yolo = Process(target=YOLOProcessor(self.video_stream.frames, self.yolo_model, self.device).process) p_yolo.start() p_display.join() p_fps.join() p_yolo.join() self.video_stream.release() def main():
url = read_url_from_file()
4
2023-12-18 02:58:03+00:00
2k
LyubomirT/discord-lle
main.py
[ { "identifier": "Colorizer", "path": "colorizer.py", "snippet": "class Colorizer:\n def __init__(self, color):\n self.color = color\n self.colors = {\n \"red\": \"\\033[31m\",\n \"green\": \"\\033[32m\",\n \"yellow\": \"\\033[33m\",\n \"blue\"...
from dotenv import load_dotenv from discord.ext import commands from discord.commands import Option from discord.ui import Button, View, Select, Modal from colorizer import Colorizer from datetime import datetime from verify_dir import verify_dir import os import requests import json import discord import configparser import asyncio
1,460
load_dotenv() token = os.getenv("BOT_TOKEN") bot = commands.Bot(command_prefix="!", intents=discord.Intents.all()) log_dir = "_logs_" dm_config = { "enabled": True, "download_images": True, "download_videos": True, "download_audio": True, } server_config = { "enabled": True, "download_images": True, "download_videos": True, "download_audio": True, } printContents = False logtodms = False ownerid = 0 def load_config(): with open("_config_/directories.cfg", "r") as f: try: config = configparser.ConfigParser() config.read_file(f) except: print(Colorizer("red").colorize("Could not load config! The directories.cfg file is missing or corrupt.")) os._exit(1) global log_dir try: log_dir = config["directories"]["log_dir"] except: print(Colorizer("red").colorize("Could not load config! Please specify a proper log directory or use cfg_gen.py to generate a new config file.")) os._exit(1) with open("_config_/types.cfg", "r") as f: try: config = configparser.ConfigParser() config.read_file(f) except: print(Colorizer("red").colorize("Could not load config! The types.cfg file is missing or corrupt.")) os._exit(1) global dm_config try: dm_config["enabled"] = bool(config["direct_messages"]["enabled"]) dm_config["download_images"] = bool(config["direct_messages"]["download_images"]) dm_config["download_videos"] = bool(config["direct_messages"]["download_videos"]) dm_config["download_audio"] = bool(config["direct_messages"]["download_audio"]) except: print(Colorizer("red").colorize("Could not load config! Please specify proper types (DM) or use cfg_gen.py to generate a new config file.")) os._exit(1) global server_config try: server_config["enabled"] = bool(config["servers"]["enabled"]) server_config["download_images"] = bool(config["servers"]["download_images"]) server_config["download_videos"] = bool(config["servers"]["download_videos"]) server_config["download_audio"] = bool(config["servers"]["download_audio"]) except: print(Colorizer("red").colorize("Could not load config! Please specify proper types (server) or use cfg_gen.py to generate a new config file.")) os._exit(1) with open("_config_/misc.cfg", "r") as f: try: config = configparser.ConfigParser() config.read_file(f) except: print(Colorizer("red").colorize("Could not load config! The misc.cfg file is missing or corrupt.")) os._exit(1) global printContents try: printContents = bool(config["Console"]["printContents"]) except: print(Colorizer("red").colorize("Could not load config! Please specify proper misc options (printContents) or use cfg_gen.py to generate a new config file.")) os._exit(1) global logtodms try: logtodms = bool(config["DiscordLog"]["enabled"]) except: print(Colorizer("red").colorize("Could not load config! Please specify proper misc options (logtodms) or use cfg_gen.py to generate a new config file.")) os._exit(1) global ownerid try: ownerid = int(config["DiscordLog"]["ownerid"]) except: print(Colorizer("red").colorize("Could not load config! Please specify proper misc options (ownerid) or use cfg_gen.py to generate a new config file.")) os._exit(1)
load_dotenv() token = os.getenv("BOT_TOKEN") bot = commands.Bot(command_prefix="!", intents=discord.Intents.all()) log_dir = "_logs_" dm_config = { "enabled": True, "download_images": True, "download_videos": True, "download_audio": True, } server_config = { "enabled": True, "download_images": True, "download_videos": True, "download_audio": True, } printContents = False logtodms = False ownerid = 0 def load_config(): with open("_config_/directories.cfg", "r") as f: try: config = configparser.ConfigParser() config.read_file(f) except: print(Colorizer("red").colorize("Could not load config! The directories.cfg file is missing or corrupt.")) os._exit(1) global log_dir try: log_dir = config["directories"]["log_dir"] except: print(Colorizer("red").colorize("Could not load config! Please specify a proper log directory or use cfg_gen.py to generate a new config file.")) os._exit(1) with open("_config_/types.cfg", "r") as f: try: config = configparser.ConfigParser() config.read_file(f) except: print(Colorizer("red").colorize("Could not load config! The types.cfg file is missing or corrupt.")) os._exit(1) global dm_config try: dm_config["enabled"] = bool(config["direct_messages"]["enabled"]) dm_config["download_images"] = bool(config["direct_messages"]["download_images"]) dm_config["download_videos"] = bool(config["direct_messages"]["download_videos"]) dm_config["download_audio"] = bool(config["direct_messages"]["download_audio"]) except: print(Colorizer("red").colorize("Could not load config! Please specify proper types (DM) or use cfg_gen.py to generate a new config file.")) os._exit(1) global server_config try: server_config["enabled"] = bool(config["servers"]["enabled"]) server_config["download_images"] = bool(config["servers"]["download_images"]) server_config["download_videos"] = bool(config["servers"]["download_videos"]) server_config["download_audio"] = bool(config["servers"]["download_audio"]) except: print(Colorizer("red").colorize("Could not load config! Please specify proper types (server) or use cfg_gen.py to generate a new config file.")) os._exit(1) with open("_config_/misc.cfg", "r") as f: try: config = configparser.ConfigParser() config.read_file(f) except: print(Colorizer("red").colorize("Could not load config! The misc.cfg file is missing or corrupt.")) os._exit(1) global printContents try: printContents = bool(config["Console"]["printContents"]) except: print(Colorizer("red").colorize("Could not load config! Please specify proper misc options (printContents) or use cfg_gen.py to generate a new config file.")) os._exit(1) global logtodms try: logtodms = bool(config["DiscordLog"]["enabled"]) except: print(Colorizer("red").colorize("Could not load config! Please specify proper misc options (logtodms) or use cfg_gen.py to generate a new config file.")) os._exit(1) global ownerid try: ownerid = int(config["DiscordLog"]["ownerid"]) except: print(Colorizer("red").colorize("Could not load config! Please specify proper misc options (ownerid) or use cfg_gen.py to generate a new config file.")) os._exit(1)
verify_dir(log_dir)
1
2023-12-18 16:08:05+00:00
2k
KR1470R/plagiator-py
utils/plagiator.py
[ { "identifier": "exists", "path": "utils/exists.py", "snippet": "def exists(obj, *keys):\n format_keys = \"\".join(\n list(map(\n lambda key: f\"['{key}']\",\n keys\n ))\n )\n try:\n return eval(f\"obj{format_keys}\")\n except Exception:\n return None" }, { "identifier"...
import json import logging import requests from .exists import exists from configs.edupirdie import API_URI, HEADERS from random_user_agent.user_agent import UserAgent from random_user_agent.params import SoftwareName, OperatingSystem
844
class Plagiator: def __init__(self): self.session = requests.Session() adapter = requests.adapters.HTTPAdapter(pool_connections=10000, pool_maxsize=10000) self.session.mount("https://", adapter) software_names = [software_name.value for software_name in SoftwareName] operating_systems = [operating_system.value for operating_system in OperatingSystem] self.user_agent_rotator = UserAgent( software_names=software_names, operating_systems=operating_systems, limit=1000 ) def concretize_response(self, response: dict): if exists(response, "error") and response["error"]: return response del response["error"] del response["error_code"] if len(response["title"]) == 0: del response["title"] words = response["text"].split(" ") if exists(response, "highlight") and len(response["highlight"]): highlight_text = [] for span in response["highlight"]: span = list(map(int, span)) selected_words = words[span[0]] if ( span[0] == span[1] ) else words[span[0]:span[1]] if isinstance(selected_words, list): selected_words = " ".join(selected_words) highlight_text.append(selected_words) response["highlight"] = highlight_text if exists(response, "matches") and len(response["matches"]): matches_highlight = [] for match in response["matches"]: matched_highlight_text = [] for match_span in match["highlight"]: match_span = list(map(int, match_span)) selected_words = words[match_span[0]] if ( match_span[0] == match_span[1] ) else words[match_span[0]:match_span[1]] if isinstance(selected_words, list): selected_words = " ".join(selected_words) matched_highlight_text.append(selected_words) matches_highlight.append({**match, "highlight": matched_highlight_text}) response["matches"] = matches_highlight return response def __request__(self, text: str, title: str = None): return self.session.post( API_URI, headers={
class Plagiator: def __init__(self): self.session = requests.Session() adapter = requests.adapters.HTTPAdapter(pool_connections=10000, pool_maxsize=10000) self.session.mount("https://", adapter) software_names = [software_name.value for software_name in SoftwareName] operating_systems = [operating_system.value for operating_system in OperatingSystem] self.user_agent_rotator = UserAgent( software_names=software_names, operating_systems=operating_systems, limit=1000 ) def concretize_response(self, response: dict): if exists(response, "error") and response["error"]: return response del response["error"] del response["error_code"] if len(response["title"]) == 0: del response["title"] words = response["text"].split(" ") if exists(response, "highlight") and len(response["highlight"]): highlight_text = [] for span in response["highlight"]: span = list(map(int, span)) selected_words = words[span[0]] if ( span[0] == span[1] ) else words[span[0]:span[1]] if isinstance(selected_words, list): selected_words = " ".join(selected_words) highlight_text.append(selected_words) response["highlight"] = highlight_text if exists(response, "matches") and len(response["matches"]): matches_highlight = [] for match in response["matches"]: matched_highlight_text = [] for match_span in match["highlight"]: match_span = list(map(int, match_span)) selected_words = words[match_span[0]] if ( match_span[0] == match_span[1] ) else words[match_span[0]:match_span[1]] if isinstance(selected_words, list): selected_words = " ".join(selected_words) matched_highlight_text.append(selected_words) matches_highlight.append({**match, "highlight": matched_highlight_text}) response["matches"] = matches_highlight return response def __request__(self, text: str, title: str = None): return self.session.post( API_URI, headers={
**HEADERS,
2
2023-12-21 17:29:18+00:00
2k
fmhy/bot
cogs/rss.py
[ { "identifier": "rss_chan_ids", "path": "cogs/_config.py", "snippet": "TOKEN = os.getenv(\"TOKEN\", None)\nGUILD_ID = os.getenv(\"GUILD_ID\", None)\nOWNERS = os.getenv(\"OWNERS\").split(\",\")\nRSS_CHANNELS = os.getenv(\"RSS_CHANNEL_IDS\", None)\nFEEDS = os.getenv(\"RSS_FEED_URLS\", None)\nDB = os.geten...
from typing import TYPE_CHECKING from discord.ext import commands, tasks from cogs._config import rss_chan_ids from cogs._helpers import fetch_feed from main import Bot from discord.channel import TextChannel
985
if TYPE_CHECKING: class RSSFeeds(commands.Cog): """RSSFeeds commands""" def __init__(self, bot: Bot): self.bot = bot @commands.Cog.listener() async def on_ready(self): self.send_rss.start() async def cog_before_invoke(self, ctx): """Triggers typing indicator on Discord before every command.""" await ctx.channel.typing() return @tasks.loop(seconds=300) async def send_rss(self):
if TYPE_CHECKING: class RSSFeeds(commands.Cog): """RSSFeeds commands""" def __init__(self, bot: Bot): self.bot = bot @commands.Cog.listener() async def on_ready(self): self.send_rss.start() async def cog_before_invoke(self, ctx): """Triggers typing indicator on Discord before every command.""" await ctx.channel.typing() return @tasks.loop(seconds=300) async def send_rss(self):
for msg in fetch_feed():
1
2023-12-19 10:27:04+00:00
2k
cvlab-yonsei/RankMixup
calibrate/evaluation/segment_evaluator.py
[ { "identifier": "DatasetEvaluator", "path": "calibrate/evaluation/evaluator.py", "snippet": "class DatasetEvaluator(metaclass=ABCMeta):\n \"\"\"\n Base class for a dataset evaluator\n \"\"\"\n @abstractmethod\n def reset(self):\n \"\"\"\n Preparation for a new round of evalu...
import logging import numpy as np import pandas as pd import wandb from terminaltables import AsciiTable from typing import List, Optional from .evaluator import DatasetEvaluator from calibrate.utils.constants import EPS
973
logger = logging.getLogger(__name__) def intersect_and_union(pred_label, label, num_classes, ignore_index): mask = (label != ignore_index) pred_label = pred_label[mask] label = label[mask] intersect = pred_label[pred_label == label] area_intersect, _ = np.histogram( intersect, bins=np.arange(num_classes + 1) ) area_pred_label, _ = np.histogram( pred_label, bins=np.arange(num_classes + 1) ) area_label, _ = np.histogram( label, bins=np.arange(num_classes + 1) ) area_union = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label class SegmentEvaluator(DatasetEvaluator): def __init__(self, classes: Optional[List[str]] = None, ignore_index: int = -1) -> None: super().__init__() self.classes = classes self.num_classes = len(self.classes) self.ignore_index = ignore_index def num_samples(self): return self.nsamples def reset(self): self.total_area_inter = np.zeros((self.num_classes, ), dtype=np.float) self.total_area_union = np.zeros((self.num_classes, ), dtype=np.float) self.total_area_pred = np.zeros((self.num_classes, ), dtype=np.float) self.total_area_target = np.zeros((self.num_classes, ), dtype=np.float) self.nsamples = 0 def main_metric(self): return "miou" def ignore_background(self, pred: np.ndarray, target: np.ndarray): pred = pred[:, 1:] if pred.shape[1] > 1 else pred target = target[:, 1:] if target.shape[1] > 1 else target return pred, target def update(self, pred: np.ndarray, target: np.ndarray): """Update all the metric from batch size prediction and target. Args: pred: predictions to be evaluated in one-hot formation y: ground truth. It should be one-hot format. """ assert pred.shape == target.shape, "pred and target should have same shapes" n = pred.shape[0] self.nsamples += n batch_area_inter = np.zeros((self.num_classes, ), dtype=np.float) batch_area_union = np.zeros((self.num_classes, ), dtype=np.float) batch_area_pred = np.zeros((self.num_classes, ), dtype=np.float) batch_area_target = np.zeros((self.num_classes, ), dtype=np.float) for i in range(n): area_inter, area_union, area_pred, area_target = ( intersect_and_union( pred[i], target[i], self.num_classes, self.ignore_index ) ) batch_area_inter += area_inter batch_area_union += area_union batch_area_pred += area_pred batch_area_target += area_target
logger = logging.getLogger(__name__) def intersect_and_union(pred_label, label, num_classes, ignore_index): mask = (label != ignore_index) pred_label = pred_label[mask] label = label[mask] intersect = pred_label[pred_label == label] area_intersect, _ = np.histogram( intersect, bins=np.arange(num_classes + 1) ) area_pred_label, _ = np.histogram( pred_label, bins=np.arange(num_classes + 1) ) area_label, _ = np.histogram( label, bins=np.arange(num_classes + 1) ) area_union = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label class SegmentEvaluator(DatasetEvaluator): def __init__(self, classes: Optional[List[str]] = None, ignore_index: int = -1) -> None: super().__init__() self.classes = classes self.num_classes = len(self.classes) self.ignore_index = ignore_index def num_samples(self): return self.nsamples def reset(self): self.total_area_inter = np.zeros((self.num_classes, ), dtype=np.float) self.total_area_union = np.zeros((self.num_classes, ), dtype=np.float) self.total_area_pred = np.zeros((self.num_classes, ), dtype=np.float) self.total_area_target = np.zeros((self.num_classes, ), dtype=np.float) self.nsamples = 0 def main_metric(self): return "miou" def ignore_background(self, pred: np.ndarray, target: np.ndarray): pred = pred[:, 1:] if pred.shape[1] > 1 else pred target = target[:, 1:] if target.shape[1] > 1 else target return pred, target def update(self, pred: np.ndarray, target: np.ndarray): """Update all the metric from batch size prediction and target. Args: pred: predictions to be evaluated in one-hot formation y: ground truth. It should be one-hot format. """ assert pred.shape == target.shape, "pred and target should have same shapes" n = pred.shape[0] self.nsamples += n batch_area_inter = np.zeros((self.num_classes, ), dtype=np.float) batch_area_union = np.zeros((self.num_classes, ), dtype=np.float) batch_area_pred = np.zeros((self.num_classes, ), dtype=np.float) batch_area_target = np.zeros((self.num_classes, ), dtype=np.float) for i in range(n): area_inter, area_union, area_pred, area_target = ( intersect_and_union( pred[i], target[i], self.num_classes, self.ignore_index ) ) batch_area_inter += area_inter batch_area_union += area_union batch_area_pred += area_pred batch_area_target += area_target
iou = batch_area_inter[1:].sum() / (batch_area_union[1:].sum() + EPS)
1
2023-12-17 13:53:18+00:00
2k
CaptainCook4D/downloader
download_gopro_data.py
[ { "identifier": "prepare_gopro_2d_output_directory", "path": "util.py", "snippet": "def prepare_gopro_2d_output_directory(args, output_dir: Path):\n\toutput_dir.mkdir(parents=True, exist_ok=True)\n\t\n\tdata_directory = output_dir / Constants.CAPTAIN_COOK_4D\n\tdata_directory.mkdir(parents=True, exist_o...
import argparse import json from pathlib import Path from util import prepare_gopro_2d_output_directory, Constants, download_data
1,527
def process_download_gopro_data(download_args): # ---- Parse Download Links Json ---- with open("metadata/download_links.json", "r") as f: download_links = json.load(f) output_dir = Path(download_args.output_dir)
def process_download_gopro_data(download_args): # ---- Parse Download Links Json ---- with open("metadata/download_links.json", "r") as f: download_links = json.load(f) output_dir = Path(download_args.output_dir)
data_directory = prepare_gopro_2d_output_directory(download_args, output_dir)
0
2023-12-16 00:27:29+00:00
2k
mjavadpur/Sadtalker_LongVideos
src/audio2pose_models/audio2pose.py
[ { "identifier": "CVAE", "path": "src/audio2pose_models/cvae.py", "snippet": "class CVAE(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n encoder_layer_sizes = cfg.MODEL.CVAE.ENCODER_LAYER_SIZES\n decoder_layer_sizes = cfg.MODEL.CVAE.DECODER_LAYER_SIZES\n laten...
import torch from torch import nn from src.audio2pose_models.cvae import CVAE from src.audio2pose_models.discriminator import PoseSequenceDiscriminator from src.audio2pose_models.audio_encoder import AudioEncoder
1,566
class Audio2Pose(nn.Module): def __init__(self, cfg, wav2lip_checkpoint, device='cuda'): super().__init__() self.cfg = cfg self.seq_len = cfg.MODEL.CVAE.SEQ_LEN self.latent_dim = cfg.MODEL.CVAE.LATENT_SIZE self.device = device self.audio_encoder = AudioEncoder(wav2lip_checkpoint, device) self.audio_encoder.eval() for param in self.audio_encoder.parameters(): param.requires_grad = False self.netG = CVAE(cfg)
class Audio2Pose(nn.Module): def __init__(self, cfg, wav2lip_checkpoint, device='cuda'): super().__init__() self.cfg = cfg self.seq_len = cfg.MODEL.CVAE.SEQ_LEN self.latent_dim = cfg.MODEL.CVAE.LATENT_SIZE self.device = device self.audio_encoder = AudioEncoder(wav2lip_checkpoint, device) self.audio_encoder.eval() for param in self.audio_encoder.parameters(): param.requires_grad = False self.netG = CVAE(cfg)
self.netD_motion = PoseSequenceDiscriminator(cfg)
1
2023-12-19 11:01:35+00:00
2k
Angryrou/udao
udao/data/tests/iterators/dummy_udao_iterator.py
[ { "identifier": "TabularContainer", "path": "udao/data/containers/tabular_container.py", "snippet": "class TabularContainer(BaseContainer):\n \"\"\"Container for tabular data, stored in DataFrame format.\"\"\"\n\n data: pd.DataFrame\n\n def get(self, key: str) -> np.ndarray:\n return sel...
from typing import Sequence, Tuple from ....data.containers.tabular_container import TabularContainer from ....data.iterators.base_iterator import UdaoIterator from ....utils.interfaces import ( UdaoEmbedInput, UdaoEmbedItemShape, UdaoInput, UdaoItemShape, ) import torch as th
1,099
class DummyUdaoIterator(UdaoIterator[UdaoInput, UdaoItemShape]): def __init__( self, keys: Sequence[str], tabular_features: TabularContainer, objectives: TabularContainer, ) -> None: super().__init__(keys, tabular_features=tabular_features, objectives=objectives) def _getitem(self, idx: int) -> Tuple[UdaoInput, th.Tensor]: key = self.keys[idx] return ( UdaoInput( th.tensor(self.tabular_features.get(key), dtype=self.tensors_dtype) ), th.tensor(self.objectives.get(key), dtype=self.tensors_dtype), ) @property def shape(self) -> UdaoItemShape: return UdaoItemShape( feature_names=list(self.tabular_features.data.columns), output_names=list(self.objectives.data.columns), ) @staticmethod def collate( items: Sequence[Tuple[UdaoInput, th.Tensor]] ) -> Tuple[UdaoInput, th.Tensor]: features = UdaoInput(th.vstack([item[0].features for item in items])) objectives = th.vstack([item[1] for item in items]) return features, objectives
class DummyUdaoIterator(UdaoIterator[UdaoInput, UdaoItemShape]): def __init__( self, keys: Sequence[str], tabular_features: TabularContainer, objectives: TabularContainer, ) -> None: super().__init__(keys, tabular_features=tabular_features, objectives=objectives) def _getitem(self, idx: int) -> Tuple[UdaoInput, th.Tensor]: key = self.keys[idx] return ( UdaoInput( th.tensor(self.tabular_features.get(key), dtype=self.tensors_dtype) ), th.tensor(self.objectives.get(key), dtype=self.tensors_dtype), ) @property def shape(self) -> UdaoItemShape: return UdaoItemShape( feature_names=list(self.tabular_features.data.columns), output_names=list(self.objectives.data.columns), ) @staticmethod def collate( items: Sequence[Tuple[UdaoInput, th.Tensor]] ) -> Tuple[UdaoInput, th.Tensor]: features = UdaoInput(th.vstack([item[0].features for item in items])) objectives = th.vstack([item[1] for item in items]) return features, objectives
class DummyUdaoEmbedIterator(UdaoIterator[UdaoEmbedInput, UdaoEmbedItemShape]):
3
2023-12-20 09:10:42+00:00
2k
SnailForce/SIM-Net
data/mask_dataset.py
[ { "identifier": "BaseDataset", "path": "data/base_dataset.py", "snippet": "class BaseDataset(data.Dataset, ABC):\n \"\"\"This class is an abstract base class (ABC) for datasets.\n\n To create a subclass, you need to implement the following four functions:\n -- <__init__>: i...
import os,yaml import torch.nn.functional as F import random import numpy as np import collections import torch from data.base_dataset import BaseDataset, get_transform from data.image_folder import make_dataset_by_name from PIL import Image,ImageFilter
1,213
class MaskDataset(BaseDataset): def __init__(self, opt): """Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions """ BaseDataset.__init__(self, opt) self.root_dir = os.path.join(opt.dataroot,'class') self.phase = opt.phase self.img_mask_dict = {} self.img_names = {} self.data_size = {} self.label_list = os.listdir(os.path.join(self.root_dir)) # The shape of the human face is more complex, so increase the training ratio if "face" in self.label_list: self.label_list.append("face") for label in self.label_list: label_dir = os.path.join(self.root_dir,label,"images") with open(os.path.join(self.root_dir,label,'list.yaml')) as f: self.img_mask_dict[label] = yaml.safe_load(f) self.img_names[label] = list(self.img_mask_dict[label].keys()) self.data_size[label] = len(self.img_names[label])
class MaskDataset(BaseDataset): def __init__(self, opt): """Initialize this dataset class. Parameters: opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions """ BaseDataset.__init__(self, opt) self.root_dir = os.path.join(opt.dataroot,'class') self.phase = opt.phase self.img_mask_dict = {} self.img_names = {} self.data_size = {} self.label_list = os.listdir(os.path.join(self.root_dir)) # The shape of the human face is more complex, so increase the training ratio if "face" in self.label_list: self.label_list.append("face") for label in self.label_list: label_dir = os.path.join(self.root_dir,label,"images") with open(os.path.join(self.root_dir,label,'list.yaml')) as f: self.img_mask_dict[label] = yaml.safe_load(f) self.img_names[label] = list(self.img_mask_dict[label].keys()) self.data_size[label] = len(self.img_names[label])
self.transform = get_transform(self.opt)
1
2023-12-16 12:49:10+00:00
2k
adarshsankarrs/PhotoshopApp
app.py
[ { "identifier": "MultiApp", "path": "multiapp.py", "snippet": "class MultiApp:\n \"\"\"Framework for combining multiple streamlit applications.\n \"\"\"\n def __init__(self):\n self.apps = []\n\n def add_app(self, title, func):\n \"\"\"Adds a new application.\n\n \"\"\"\...
import streamlit as st import numpy as np import pandas as pd import cv2 from PIL import Image, ImageOps from multiapp import MultiApp from apps import home,sketch,inpaint,stadap,textonimg,Edge_Cont,Face_detect,Crop,filters,abtus,Feature_detect
928
app = MultiApp() # option = st.selectbox( # 'Select from the options', # ('Home', 'Filters', 'Doc scanner','add text'), key = 1) # if(option=='Filters'): # opt = st.selectbox( # 'Select from the options', # ('sepia', 'Filter1', 'filter2','filter3'), key = 2) # Add all your application here app.add_app("Home", home.app) app.add_app("Add filters to image", filters.app) app.add_app("Sketch", sketch.app) app.add_app("Image inpainting", inpaint.app) app.add_app("Doc Scanner", stadap.app) app.add_app("Add Title to image", textonimg.app) app.add_app("Crop an Image", Crop.app) app.add_app("Edge and Contour detection ", Edge_Cont.app)
app = MultiApp() # option = st.selectbox( # 'Select from the options', # ('Home', 'Filters', 'Doc scanner','add text'), key = 1) # if(option=='Filters'): # opt = st.selectbox( # 'Select from the options', # ('sepia', 'Filter1', 'filter2','filter3'), key = 2) # Add all your application here app.add_app("Home", home.app) app.add_app("Add filters to image", filters.app) app.add_app("Sketch", sketch.app) app.add_app("Image inpainting", inpaint.app) app.add_app("Doc Scanner", stadap.app) app.add_app("Add Title to image", textonimg.app) app.add_app("Crop an Image", Crop.app) app.add_app("Edge and Contour detection ", Edge_Cont.app)
app.add_app("Face detection", Face_detect.app)
7
2023-12-20 20:32:16+00:00
2k
DURUII/Replica-AUCB
main.py
[ { "identifier": "StrategicArm", "path": "arms.py", "snippet": "class StrategicArm(NormalArm):\n c_min, c_max = 0.1, 1\n\n def __init__(self):\n # in the paper, r is expected reward\n r = random.uniform(0.1, 1)\n # to make that sample value is within 0~1 with 97%\n sigma...
import os import pandas as pd import numpy as np import pickle from matplotlib import pyplot as plt from tqdm import tqdm from arms import StrategicArm from config import Config from emulator import Emulator
1,153
""" Author: DURUII Date: 2023/12/17 """ plt.style.use(['science', 'grid']) config = Config # data preparation if not os.path.exists('./runs.pkl'): data = [] for X in ['N', 'K', 'B']: for x in tqdm(eval(f'config.{X}_range'), desc=X): if X == 'N':
""" Author: DURUII Date: 2023/12/17 """ plt.style.use(['science', 'grid']) config = Config # data preparation if not os.path.exists('./runs.pkl'): data = [] for X in ['N', 'K', 'B']: for x in tqdm(eval(f'config.{X}_range'), desc=X): if X == 'N':
name2res = Emulator(n_arms=x).simulate()
2
2023-12-15 18:17:01+00:00
2k
XLearning-SCU/2023-TPAMI-SMILE
_AutoLauncher.py
[ { "identifier": "path_operator", "path": "_MainLauncher.py", "snippet": "def get_settings():\r\ndef clear_gpu_fail(root):\r\ndef run():\r\ndef main():\r" }, { "identifier": "Launcher", "path": "_Utils/Launcher.py", "snippet": "class Launcher(SubprocessOperator):\r\n def __init__(self,...
import time from _MainLauncher import path_operator from _Utils import Launcher from _Utils.ConfigOperator import ConfigOperator
1,420
def main(): class C2(ConfigOperator): def get_name(self, *args, **kwargs): return '_QueueLog'
def main(): class C2(ConfigOperator): def get_name(self, *args, **kwargs): return '_QueueLog'
Launcher.Launcher(
1
2023-12-21 08:50:36+00:00
2k
precisionalgorithms/loopring-python-SDK
main.py
[ { "identifier": "Session", "path": "loopring/session.py", "snippet": "class Session:\n \"\"\"\n Parent class for Loopring API.\n \"\"\"\n # Class variables\n api_key = None\n account_id = None\n headers = None\n base_url = 'https://api3.loopring.io/api/v3'\n\n @classmethod\n ...
import pickle from loopring.session import Session from loopring.account import Account from loopring.exchange import Exchange from utils import join_balance_with_token_info
1,087
# Initialize the Loopring API with API key and account ID Session.initialize() # Get the account balances account = Account() balances = account.get_account_balances() # Get token info on exchange
# Initialize the Loopring API with API key and account ID Session.initialize() # Get the account balances account = Account() balances = account.get_account_balances() # Get token info on exchange
exchange = Exchange()
2
2023-12-18 00:19:56+00:00
2k
Liyulingyue/ModulelyTools
codes/extraction/ModuleTools.py
[ { "identifier": "parse_ipynb", "path": "codes/extraction/ipynb/ipynb_analyse.py", "snippet": "def parse_ipynb(file_path):\n \"\"\"\n # 示例:使用函数解析一个ipynb文件\n file_path = 'main.ipynb' # 请将此处替换为您的ipynb文件路径\n result = parse_ipynb(file_path)\n print(result)\n \"\"\"\n # 读取ipynb文件\n wi...
from .ipynb.ipynb_analyse import parse_ipynb, get_ipynb_content, get_model_list, model_list2python from .py.py_analyse import extract_function_defs, get_function_defs, get_intro_of_fun from ..llm.Ernie import Ernie from ..llm.Ernie import Ernie
1,554
class ModuleTools(object): def __init__(self, llm_type="Ernie"): super.__init__() if llm_type=="Ernie": self.llm = Ernie() else: # default set ernie as used llm self.llm = Ernie() def ipynb2py(self, ipynb_path = "example.ipynb", prompt = ""):
class ModuleTools(object): def __init__(self, llm_type="Ernie"): super.__init__() if llm_type=="Ernie": self.llm = Ernie() else: # default set ernie as used llm self.llm = Ernie() def ipynb2py(self, ipynb_path = "example.ipynb", prompt = ""):
result = parse_ipynb(ipynb_path)
0
2023-12-17 14:20:45+00:00
2k
Azure-Samples/functions-python-web-crawler
.venv/Lib/site-packages/urllib3/_base_connection.py
[ { "identifier": "_TYPE_SOCKET_OPTIONS", "path": ".venv/Lib/site-packages/urllib3/util/connection.py", "snippet": "_TYPE_SOCKET_OPTIONS = typing.Sequence[typing.Tuple[int, int, typing.Union[int, bytes]]]" }, { "identifier": "_DEFAULT_TIMEOUT", "path": ".venv/Lib/site-packages/urllib3/util/tim...
import typing import ssl from .util.connection import _TYPE_SOCKET_OPTIONS from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT from .util.url import Url from typing import Literal, Protocol from .response import BaseHTTPResponse
1,468
from __future__ import annotations _TYPE_BODY = typing.Union[bytes, typing.IO[typing.Any], typing.Iterable[bytes], str] class ProxyConfig(typing.NamedTuple): ssl_context: ssl.SSLContext | None use_forwarding_for_https: bool assert_hostname: None | str | Literal[False] assert_fingerprint: str | None class _ResponseOptions(typing.NamedTuple): # TODO: Remove this in favor of a better # HTTP request/response lifecycle tracking. request_method: str request_url: str preload_content: bool decode_content: bool enforce_content_length: bool if typing.TYPE_CHECKING: class BaseHTTPConnection(Protocol): default_port: typing.ClassVar[int] default_socket_options: typing.ClassVar[_TYPE_SOCKET_OPTIONS] host: str port: int timeout: None | ( float ) # Instance doesn't store _DEFAULT_TIMEOUT, must be resolved. blocksize: int source_address: tuple[str, int] | None socket_options: _TYPE_SOCKET_OPTIONS | None
from __future__ import annotations _TYPE_BODY = typing.Union[bytes, typing.IO[typing.Any], typing.Iterable[bytes], str] class ProxyConfig(typing.NamedTuple): ssl_context: ssl.SSLContext | None use_forwarding_for_https: bool assert_hostname: None | str | Literal[False] assert_fingerprint: str | None class _ResponseOptions(typing.NamedTuple): # TODO: Remove this in favor of a better # HTTP request/response lifecycle tracking. request_method: str request_url: str preload_content: bool decode_content: bool enforce_content_length: bool if typing.TYPE_CHECKING: class BaseHTTPConnection(Protocol): default_port: typing.ClassVar[int] default_socket_options: typing.ClassVar[_TYPE_SOCKET_OPTIONS] host: str port: int timeout: None | ( float ) # Instance doesn't store _DEFAULT_TIMEOUT, must be resolved. blocksize: int source_address: tuple[str, int] | None socket_options: _TYPE_SOCKET_OPTIONS | None
proxy: Url | None
3
2023-12-16 04:12:01+00:00
2k
neuroglia-io/python-framework
tests/cases/test_service_provider.py
[ { "identifier": "FileLogger", "path": "tests/services.py", "snippet": "class FileLogger(LoggerBase):\n \n def log(text: str):\n with open('example.txt', 'a') as file:\n file.write(f'{text}\\n')" }, { "identifier": "LoggerBase", "path": "tests/services.py", "snippe...
from re import T from sys import implementation from neuroglia.dependency_injection.service_provider import IServiceProvider, ServiceCollection, ServiceProvider from tests.services import FileLogger, LoggerBase, NullLogger, PrintLogger import pytest
820
class TestServiceProvider: def test_build_should_work(self): #arrange services = ServiceCollection() services.add_singleton(LoggerBase, PrintLogger) services.add_singleton(LoggerBase, singleton = FileLogger()) services.add_singleton(LoggerBase, implementation_factory = self._build_null_logger) #act service_provider = services.build() #assert assert service_provider is not None, 'service_provider is none' def test_get_service_should_work(self): #arrange services = ServiceCollection() implementation_type = PrintLogger services.add_singleton(LoggerBase, implementation_type) service_provider = services.build() #act logger = service_provider.get_service(LoggerBase) #assert assert logger is not None, 'logger is none' assert isinstance(logger, implementation_type), f"logger is not of expected type '{implementation_type.__name__}'" def test_get_unregistered_service_should_work(self): #arrange services = ServiceCollection() service_provider = services.build() #act logger = service_provider.get_service(LoggerBase) #assert assert logger is None, 'logger is not none' def test_get_required_service_should_work(self): #arrange services = ServiceCollection() implementation_type = PrintLogger services.add_singleton(LoggerBase, implementation_type) service_provider = services.build() #act logger = service_provider.get_required_service(LoggerBase) #assert assert logger is not None, 'logger is none' assert isinstance(logger, implementation_type), f"logger is not of expected type '{implementation_type.__name__}'" def test_get_required_unregistered_service_should_raise_error(self): #arrange services = ServiceCollection() service_provider = services.build() #assert with pytest.raises(Exception): service_provider.get_required_service(LoggerBase)() def test_get_scoped_service_from_root_should_raise_error(self): #arrange services = ServiceCollection() implementation_type = PrintLogger services.add_scoped(LoggerBase, implementation_type) service_provider = services.build() #assert with pytest.raises(Exception): service_provider.get_required_service(LoggerBase)() def test_get_services_should_work(self): #arrange services = ServiceCollection() services.add_singleton(LoggerBase, PrintLogger) services.add_singleton(LoggerBase, singleton = FileLogger()) services.add_singleton(LoggerBase, implementation_factory = self._build_null_logger) service_provider = services.build() #act loggers = service_provider.get_services(LoggerBase) #assert assert len(loggers) == 3, f'expected 3 loggers, got {len(loggers)}' def test_create_scope_should_work(self): pass def test_get_scoped_service_should_work(self): pass
class TestServiceProvider: def test_build_should_work(self): #arrange services = ServiceCollection() services.add_singleton(LoggerBase, PrintLogger) services.add_singleton(LoggerBase, singleton = FileLogger()) services.add_singleton(LoggerBase, implementation_factory = self._build_null_logger) #act service_provider = services.build() #assert assert service_provider is not None, 'service_provider is none' def test_get_service_should_work(self): #arrange services = ServiceCollection() implementation_type = PrintLogger services.add_singleton(LoggerBase, implementation_type) service_provider = services.build() #act logger = service_provider.get_service(LoggerBase) #assert assert logger is not None, 'logger is none' assert isinstance(logger, implementation_type), f"logger is not of expected type '{implementation_type.__name__}'" def test_get_unregistered_service_should_work(self): #arrange services = ServiceCollection() service_provider = services.build() #act logger = service_provider.get_service(LoggerBase) #assert assert logger is None, 'logger is not none' def test_get_required_service_should_work(self): #arrange services = ServiceCollection() implementation_type = PrintLogger services.add_singleton(LoggerBase, implementation_type) service_provider = services.build() #act logger = service_provider.get_required_service(LoggerBase) #assert assert logger is not None, 'logger is none' assert isinstance(logger, implementation_type), f"logger is not of expected type '{implementation_type.__name__}'" def test_get_required_unregistered_service_should_raise_error(self): #arrange services = ServiceCollection() service_provider = services.build() #assert with pytest.raises(Exception): service_provider.get_required_service(LoggerBase)() def test_get_scoped_service_from_root_should_raise_error(self): #arrange services = ServiceCollection() implementation_type = PrintLogger services.add_scoped(LoggerBase, implementation_type) service_provider = services.build() #assert with pytest.raises(Exception): service_provider.get_required_service(LoggerBase)() def test_get_services_should_work(self): #arrange services = ServiceCollection() services.add_singleton(LoggerBase, PrintLogger) services.add_singleton(LoggerBase, singleton = FileLogger()) services.add_singleton(LoggerBase, implementation_factory = self._build_null_logger) service_provider = services.build() #act loggers = service_provider.get_services(LoggerBase) #assert assert len(loggers) == 3, f'expected 3 loggers, got {len(loggers)}' def test_create_scope_should_work(self): pass def test_get_scoped_service_should_work(self): pass
def _build_null_logger(self, provider : IServiceProvider) -> NullLogger: return NullLogger()
2
2023-12-15 14:36:50+00:00
2k
Vlodson/Faculty-Choice-Assistant
backend/server/endpoints/natural_language.py
[ { "identifier": "make_thread_for_user", "path": "backend/llm/threads.py", "snippet": "def make_thread_for_user() -> Thread:\n return CLIENT.beta.threads.create()" }, { "identifier": "retrieve_thread_for_user", "path": "backend/llm/threads.py", "snippet": "def retrieve_thread_for_user(...
from flask import Blueprint, abort, request, jsonify from backend.llm.threads import ( make_thread_for_user, retrieve_thread_for_user, send_setup_message, send_user_message, create_run_for_thread, retrieve_run_for_user, get_last_message, get_query_from_message, ) from backend.ontology.queries import apply_query, query_results_to_table from backend.server.endpoints.custom_types import SendMessageRequest, SetupUserResponse
1,080
bp = Blueprint("llm", __name__) @bp.route("/setup", methods=["GET"]) def setup_user() -> SetupUserResponse: thread = make_thread_for_user() _ = send_setup_message(thread)
bp = Blueprint("llm", __name__) @bp.route("/setup", methods=["GET"]) def setup_user() -> SetupUserResponse: thread = make_thread_for_user() _ = send_setup_message(thread)
run = create_run_for_thread(thread) # for easier expansion of the API
4
2023-12-21 17:55:05+00:00
2k
stevej2608/reactpy-apexcharts
utils/fast_server.py
[ { "identifier": "log", "path": "utils/logger.py", "snippet": "" }, { "identifier": "var_name", "path": "utils/var_name.py", "snippet": "def var_name(obj: Any, namespace: Dict[str, Any]) -> str:\r\n \"\"\"Return var name as a string\r\n\r\n Args:\r\n obj (Any): Variable ty be...
from typing import Callable from fastapi import FastAPI from reactpy.core.component import Component from reactpy.backend.fastapi import configure, Options from .logger import log, logging from .var_name import var_name from .fast_server_options import DEFAULT_OPTIONS import sys import signal import multiprocessing import uvicorn
802
app = FastAPI(description="ReactPy", version="0.1.0") LOGS = [ "asgi-logger", "concurrent.futures", "concurrent", "asyncio", "uvicorn.error", "uvicorn", "watchfiles.watcher", "watchfiles", "watchfiles.main", "fastapi", "reactpy.backend", "reactpy", "reactpy._option", "reactpy.core.hooks", "reactpy.core", "urllib3.util.retry", "urllib3.util", "urllib3", "urllib3.connection", "urllib3.response", "urllib3.connectionpool", "urllib3.poolmanager", "charset_normalizer", "requests", "reactpy.web.utils", "reactpy.web", "reactpy.web.module", "reactpy.backend.utils", "reactpy.core.layout", "reactpy.core.serve", "reactpy.backend.starlette", "uvicorn.access", "starlette", ] def disable_noisy_logs(): # Turn off noisy logging for log_id in LOGS: _log = logging.getLogger(log_id) _log.setLevel(logging.ERROR) def handler(signum, frame): active = multiprocessing.active_children() for child in active: child.terminate() def run(AppMain: Callable[[], Component], options:Options=DEFAULT_OPTIONS, host='127.0.0.1', port=8000, disable_server_logs=False, **kwargs) -> None: """Called once to run reactpy application on the fastapi server Args: AppMain (Callable[[], Component]): Function that returns a reactpy Component options (Options, optional): Server options. Defaults to DASHBOARD_OPTIONS. Usage: ``` @component def AppMain(): return html.h2('Hello from reactPy!') ) run(AppMain, options=PICO_OPTIONS) ``` """ def _app_path(app: FastAPI) -> str: app_str = var_name(app, globals()) return f"{__name__}:{app_str}" configure(app, AppMain, options=options) app_path = _app_path(app) @app.on_event('startup') async def fastapi_startup(): if disable_server_logs: disable_noisy_logs()
app = FastAPI(description="ReactPy", version="0.1.0") LOGS = [ "asgi-logger", "concurrent.futures", "concurrent", "asyncio", "uvicorn.error", "uvicorn", "watchfiles.watcher", "watchfiles", "watchfiles.main", "fastapi", "reactpy.backend", "reactpy", "reactpy._option", "reactpy.core.hooks", "reactpy.core", "urllib3.util.retry", "urllib3.util", "urllib3", "urllib3.connection", "urllib3.response", "urllib3.connectionpool", "urllib3.poolmanager", "charset_normalizer", "requests", "reactpy.web.utils", "reactpy.web", "reactpy.web.module", "reactpy.backend.utils", "reactpy.core.layout", "reactpy.core.serve", "reactpy.backend.starlette", "uvicorn.access", "starlette", ] def disable_noisy_logs(): # Turn off noisy logging for log_id in LOGS: _log = logging.getLogger(log_id) _log.setLevel(logging.ERROR) def handler(signum, frame): active = multiprocessing.active_children() for child in active: child.terminate() def run(AppMain: Callable[[], Component], options:Options=DEFAULT_OPTIONS, host='127.0.0.1', port=8000, disable_server_logs=False, **kwargs) -> None: """Called once to run reactpy application on the fastapi server Args: AppMain (Callable[[], Component]): Function that returns a reactpy Component options (Options, optional): Server options. Defaults to DASHBOARD_OPTIONS. Usage: ``` @component def AppMain(): return html.h2('Hello from reactPy!') ) run(AppMain, options=PICO_OPTIONS) ``` """ def _app_path(app: FastAPI) -> str: app_str = var_name(app, globals()) return f"{__name__}:{app_str}" configure(app, AppMain, options=options) app_path = _app_path(app) @app.on_event('startup') async def fastapi_startup(): if disable_server_logs: disable_noisy_logs()
log.info("Uvicorn running on http://%s:%s (Press CTRL+C to quit)", host, port)
0
2023-12-19 16:05:41+00:00
2k
ict-bigdatalab/RIGHT
retrieval_analysis.py
[ { "identifier": "read_line_examples_from_file", "path": "get_datasets.py", "snippet": "def read_line_examples_from_file(data_path):\n sequence = []\n with open(data_path, 'r', encoding='utf-8') as f:\n for line in f:\n line = line.strip(\"\\n\")\n if not line:\n ...
import json from get_datasets import read_line_examples_from_file from tqdm import tqdm from eval_utils import f1
816
def get_hashtag_list(dst): tags = dst.split('[SEP]') target = [] for j in range(len(tags)): tags[j] = tags[j].strip() if tags[j] != '': target.append(tags[j]) # if the dst is nothing if len(target) == 0: target.append('None') # statistic_hashtags(hashtags) return target def retrieval_analysis(src_path, label_path, rev_index_path, document_path, out_path): src_list = read_line_examples_from_file(src_path) dst_list = read_line_examples_from_file(label_path) document_list = read_line_examples_from_file(document_path) with open(rev_index_path, 'r', encoding='UTF-8') as fp: rev_index = json.load(fp) rev_dst = [[document_list[index] for index in rev_index[i]["index"]] for i in range(len(src_list))] with open(out_path, 'w', encoding='UTF-8') as fp: for i in tqdm(range(len(src_list))): line = str(i) + '\n' + src_list[i] + '\n' + dst_list[i] + '\n' for k in range(len(rev_dst[i])): line = line + str(rev_index[i]['score'][k]) + '\t' + rev_dst[i][k] + '\n' line += '\n' fp.write(line) def retrieval_hashtag_score_analysis(src_path, label_path, rev_index_path, document_path, top_k): src_list = read_line_examples_from_file(src_path) dst_list = read_line_examples_from_file(label_path) document_list = read_line_examples_from_file(document_path) with open(rev_index_path, 'r', encoding='UTF-8') as fp: rev_index = json.load(fp) rev_dst = [[get_hashtag_list(document_list[index]) for index in rev_index[i]["index"]] for i in range(len(src_list))] dst_list = [get_hashtag_list(dst) for dst in dst_list] total_p = 0 total_r = 0 true_num = 0 for i in tqdm(range(len(src_list))): label = dst_list[i] hashtag_score = dict() for k in range(len(rev_dst[i])): for rev_hashtag in rev_dst[i][k]: if rev_hashtag not in hashtag_score.keys(): hashtag_score[rev_hashtag] = 0 hashtag_score[rev_hashtag] += rev_index[i]['score'][k] hashtag_score = sorted(hashtag_score.items(), key=lambda x: x[1], reverse=True)[:top_k] total_p += len(hashtag_score) total_r += len(label) for rev_hashtag_pair in hashtag_score: for lab in label: if rev_hashtag_pair[0] == lab or rev_hashtag_pair[0] in lab or lab in rev_hashtag_pair[0]: true_num += 1 p = true_num / total_p r = true_num / total_r
def get_hashtag_list(dst): tags = dst.split('[SEP]') target = [] for j in range(len(tags)): tags[j] = tags[j].strip() if tags[j] != '': target.append(tags[j]) # if the dst is nothing if len(target) == 0: target.append('None') # statistic_hashtags(hashtags) return target def retrieval_analysis(src_path, label_path, rev_index_path, document_path, out_path): src_list = read_line_examples_from_file(src_path) dst_list = read_line_examples_from_file(label_path) document_list = read_line_examples_from_file(document_path) with open(rev_index_path, 'r', encoding='UTF-8') as fp: rev_index = json.load(fp) rev_dst = [[document_list[index] for index in rev_index[i]["index"]] for i in range(len(src_list))] with open(out_path, 'w', encoding='UTF-8') as fp: for i in tqdm(range(len(src_list))): line = str(i) + '\n' + src_list[i] + '\n' + dst_list[i] + '\n' for k in range(len(rev_dst[i])): line = line + str(rev_index[i]['score'][k]) + '\t' + rev_dst[i][k] + '\n' line += '\n' fp.write(line) def retrieval_hashtag_score_analysis(src_path, label_path, rev_index_path, document_path, top_k): src_list = read_line_examples_from_file(src_path) dst_list = read_line_examples_from_file(label_path) document_list = read_line_examples_from_file(document_path) with open(rev_index_path, 'r', encoding='UTF-8') as fp: rev_index = json.load(fp) rev_dst = [[get_hashtag_list(document_list[index]) for index in rev_index[i]["index"]] for i in range(len(src_list))] dst_list = [get_hashtag_list(dst) for dst in dst_list] total_p = 0 total_r = 0 true_num = 0 for i in tqdm(range(len(src_list))): label = dst_list[i] hashtag_score = dict() for k in range(len(rev_dst[i])): for rev_hashtag in rev_dst[i][k]: if rev_hashtag not in hashtag_score.keys(): hashtag_score[rev_hashtag] = 0 hashtag_score[rev_hashtag] += rev_index[i]['score'][k] hashtag_score = sorted(hashtag_score.items(), key=lambda x: x[1], reverse=True)[:top_k] total_p += len(hashtag_score) total_r += len(label) for rev_hashtag_pair in hashtag_score: for lab in label: if rev_hashtag_pair[0] == lab or rev_hashtag_pair[0] in lab or lab in rev_hashtag_pair[0]: true_num += 1 p = true_num / total_p r = true_num / total_r
f = f1(p, r)
1
2023-12-16 06:00:53+00:00
2k
shell-nlp/gpt_server
gpt_server/serving/main.py
[ { "identifier": "get_free_tcp_port", "path": "gpt_server/utils.py", "snippet": "def get_free_tcp_port():\n \"\"\"获取可用的端口\"\"\"\n tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n tcp.bind((\"\", 0))\n _, port = tcp.getsockname()\n tcp.close()\n return port" }, { "identi...
import yaml import os import sys import subprocess import signal from pprint import pprint from multiprocessing import Process from gpt_server.utils import get_free_tcp_port, start_server, run_cmd, stop_server,delete_log
1,037
# 配置根目录 root_dir = os.path.join(os.path.dirname(__file__), "..") root_dir = os.path.abspath(root_dir) sys.path.append(root_dir) # 删除日志 delete_log(root_dir) def signal_handler(signum, frame): stop_server() raise KeyboardInterrupt signal.signal(signal.SIGINT, signal_handler) with open("./config.yaml", "r") as f: config = yaml.safe_load(f) print(config) # ----------------------------启动 Controller 和 Openai API 服务---------------------------------------------------- host = config["serve_args"]["host"] port = config["serve_args"]["port"] start_server(host, port) # ----------------------------启动 Controller 和 Openai API 服务---------------------------------------------------- for model_name, model_config in config["models"].items(): # 启用的模型 if model_config["enable"]: pprint(model_config) print() # 模型地址 model_name_or_path = model_config["model_name_or_path"] # 模型类型 model_type = model_config["model_type"] # model type 校验 py_path = f"{root_dir}/model_worker/{model_type}.py" model_names = model_name if model_config["alias"]: model_names = model_name + "," + model_config["alias"] # 获取 worker 数目 并获取每个 worker 的资源 workers = model_config["workers"] # if model_config["work_mode"] == "deepspeed": # 设置使用 deepspeed process = [] for worker in workers: gpus = worker["gpus"] # 将gpus int ---> str gpus = [str(i) for i in gpus] gpus_str = ",".join(gpus) num_gpus = len(gpus) if model_config["work_mode"] == "deepspeed": os.environ["USE_DS"] = "1" run_mode = f"deepspeed --num_gpus {num_gpus} " pass elif model_config["work_mode"] == "accelerate": os.environ["USE_ACC"] = "1" os.environ["CUDA_VISIBLE_DEVICES"] = gpus_str run_mode = "python " pass elif model_config["work_mode"] == "hf": os.environ["CUDA_VISIBLE_DEVICES"] = gpus_str run_mode = "python " pass # DS 只能在代码内部起效 # os.environ["CUDA_VISIBLE_DEVICES"] = gpus_str cmd = ( run_mode + py_path + f" --gpus {gpus_str}"
# 配置根目录 root_dir = os.path.join(os.path.dirname(__file__), "..") root_dir = os.path.abspath(root_dir) sys.path.append(root_dir) # 删除日志 delete_log(root_dir) def signal_handler(signum, frame): stop_server() raise KeyboardInterrupt signal.signal(signal.SIGINT, signal_handler) with open("./config.yaml", "r") as f: config = yaml.safe_load(f) print(config) # ----------------------------启动 Controller 和 Openai API 服务---------------------------------------------------- host = config["serve_args"]["host"] port = config["serve_args"]["port"] start_server(host, port) # ----------------------------启动 Controller 和 Openai API 服务---------------------------------------------------- for model_name, model_config in config["models"].items(): # 启用的模型 if model_config["enable"]: pprint(model_config) print() # 模型地址 model_name_or_path = model_config["model_name_or_path"] # 模型类型 model_type = model_config["model_type"] # model type 校验 py_path = f"{root_dir}/model_worker/{model_type}.py" model_names = model_name if model_config["alias"]: model_names = model_name + "," + model_config["alias"] # 获取 worker 数目 并获取每个 worker 的资源 workers = model_config["workers"] # if model_config["work_mode"] == "deepspeed": # 设置使用 deepspeed process = [] for worker in workers: gpus = worker["gpus"] # 将gpus int ---> str gpus = [str(i) for i in gpus] gpus_str = ",".join(gpus) num_gpus = len(gpus) if model_config["work_mode"] == "deepspeed": os.environ["USE_DS"] = "1" run_mode = f"deepspeed --num_gpus {num_gpus} " pass elif model_config["work_mode"] == "accelerate": os.environ["USE_ACC"] = "1" os.environ["CUDA_VISIBLE_DEVICES"] = gpus_str run_mode = "python " pass elif model_config["work_mode"] == "hf": os.environ["CUDA_VISIBLE_DEVICES"] = gpus_str run_mode = "python " pass # DS 只能在代码内部起效 # os.environ["CUDA_VISIBLE_DEVICES"] = gpus_str cmd = ( run_mode + py_path + f" --gpus {gpus_str}"
+ f" --master_port {get_free_tcp_port()}"
0
2023-12-16 07:43:28+00:00
2k
LLM-Evaluation-s-Always-Fatiguing/leaf-playground-hub
rag_qa/rag_qa/scene.py
[ { "identifier": "Examiner", "path": "rag_qa/rag_qa/agents/examiner.py", "snippet": "class Examiner(SceneStaticAgent, role_definition=ROLE_DEFINITION, cls_description=\"An agent who minitor the examine\"):\n config_cls = ExaminerConfig\n config: config_cls\n\n def __init__(self, config: config_c...
import asyncio from typing import List, Optional from pydantic import Field from leaf_playground.core.workers import Logger from leaf_playground.core.scene import Scene from leaf_playground.core.scene_definition import SceneConfig from leaf_playground.data.log_body import ActionLogBody from leaf_playground.data.media import Text, Json from .agents.examiner import Examiner from .agents.base_examinee import AIBaseExaminee from .dataset_utils import DatasetConfig from .scene_definition import ExamineeAnswer, ExaminerQuestion, MessageType, SCENE_DEFINITION
1,386
class RagSceneLogBody(ActionLogBody): references: Optional[List[MessageType]] = Field(default=None) response: MessageType = Field(default=...) ground_truth: Optional[Json] = Field(default=None) RagSceneConfig = SceneConfig.create_config_model( SCENE_DEFINITION, additional_config_fields={"dataset_config": (DatasetConfig, Field(default=...))} ) class RagScene(Scene, scene_definition=SCENE_DEFINITION, log_body_class=RagSceneLogBody): config_cls = RagSceneConfig config: config_cls def __init__(self, config: config_cls, logger: Logger): super().__init__(config=config, logger=logger) self.examiner: Examiner = self.static_agents["examiner"][0] self.examinees: List[AIBaseExaminee] = self.agents["examinee"] async def _run(self): async def examinee_answer(examinee: AIBaseExaminee, q: ExaminerQuestion) -> None: try:
class RagSceneLogBody(ActionLogBody): references: Optional[List[MessageType]] = Field(default=None) response: MessageType = Field(default=...) ground_truth: Optional[Json] = Field(default=None) RagSceneConfig = SceneConfig.create_config_model( SCENE_DEFINITION, additional_config_fields={"dataset_config": (DatasetConfig, Field(default=...))} ) class RagScene(Scene, scene_definition=SCENE_DEFINITION, log_body_class=RagSceneLogBody): config_cls = RagSceneConfig config: config_cls def __init__(self, config: config_cls, logger: Logger): super().__init__(config=config, logger=logger) self.examiner: Examiner = self.static_agents["examiner"][0] self.examinees: List[AIBaseExaminee] = self.agents["examinee"] async def _run(self): async def examinee_answer(examinee: AIBaseExaminee, q: ExaminerQuestion) -> None: try:
answer: ExamineeAnswer = await examinee.answer_question(question=q, examiner=self.examiner.profile)
3
2023-12-21 03:09:08+00:00
2k
djkcyl/ABot-NT
func/tool/mcping/mcping.py
[ { "identifier": "SelfPicture", "path": "utils/message/picture.py", "snippet": "class SelfPicture:\n def __init__(self) -> None:\n self.s3file = Launart.current().get_component(S3FileService).s3file\n\n async def from_name(self, name: str) -> Picture:\n url = await self.s3file.get_pre...
import asyncio import base64 import contextlib import json import re import dns.resolver from io import BytesIO from avilla.core import Picture from loguru import logger from PIL import Image from utils.message.picture import SelfPicture from .statusping import StatusPing
1,539
def ping_status(host: str, port: int | None = None) -> dict: if port is None: with contextlib.suppress(Exception): srv_records = dns.resolver.query(f"_minecraft._tcp.{host}", "SRV") for srv in srv_records: host = str(srv.target).rstrip(".") port = srv.port break status_ping = StatusPing(host, port or 25565) status = status_ping.get_status() status_str = json.dumps(status) status_str = re.sub(r"\\u00a7.", "", status_str) status: dict = json.loads(status_str) logger.debug(status) return status def get_server_status(say: str) -> dict: host, _, port = say.partition(":") return ping_status(host, int(port) if port else None) async def handle_favicon(status: dict, messages: list[str | Picture]) -> None: if favicon := status.get("favicon"): byte_data = base64.b64decode(f"{favicon[22:-1]}=") img = Image.open(BytesIO(byte_data)).convert("RGB") image = BytesIO() img.save(image, format="JPEG", quality=90)
def ping_status(host: str, port: int | None = None) -> dict: if port is None: with contextlib.suppress(Exception): srv_records = dns.resolver.query(f"_minecraft._tcp.{host}", "SRV") for srv in srv_records: host = str(srv.target).rstrip(".") port = srv.port break status_ping = StatusPing(host, port or 25565) status = status_ping.get_status() status_str = json.dumps(status) status_str = re.sub(r"\\u00a7.", "", status_str) status: dict = json.loads(status_str) logger.debug(status) return status def get_server_status(say: str) -> dict: host, _, port = say.partition(":") return ping_status(host, int(port) if port else None) async def handle_favicon(status: dict, messages: list[str | Picture]) -> None: if favicon := status.get("favicon"): byte_data = base64.b64decode(f"{favicon[22:-1]}=") img = Image.open(BytesIO(byte_data)).convert("RGB") image = BytesIO() img.save(image, format="JPEG", quality=90)
messages.append(await SelfPicture().from_data(image, "jpeg"))
0
2023-12-16 13:19:56+00:00
2k
Chenyme/Chenyme-AAMT
AAMT.py
[ { "identifier": "generate_srt_from_result", "path": "utils/utils.py", "snippet": "def generate_srt_from_result(result): # 格式化为SRT字幕的形式\r\n segments = result['segments']\r\n srt_content = ''\r\n segment_id = 1\r\n for segment in segments:\r\n start_time = int(segment['start'] * 1000)\...
import os import json import streamlit as st import whisper from utils.utils import generate_srt_from_result, tmp_filepath, openai_translate, srt_mv, cache, convert_size
1,557
# 作者:chenyme # 版本:v0.2.2 # 博客站:待更新 st.set_page_config( page_title="AAMT v0.2.2", page_icon="📊", layout="wide", # 设置布局样式为宽展示 initial_sidebar_state="expanded" # 设置初始边栏状态为展开 ) st.title("Chenyme-AAMT") st.write("##### AI全自动视频翻译") with st.sidebar: st.title("欢迎!") st.write(''' ### 尊敬的用户,恭喜你完成了该项目的安装! 欢迎您使用AAMT V0.2.2!本项目的目标是为您提供一个简单易用的全自动视频翻译工具,以便您能够快速地将翻译后的字幕与原视频合并,从而更轻松地享受翻译后的内容。 请注意以下事项: 1. 请确保您的系统已正确安装Python,并且版本号为3.8或更高。 2. 请确保已经安装了所有依赖库,并设置了ffmpeg为环境变量。 3. 如果在安装或运行过程中遇到任何问题,请查阅项目文档或联系开发人员以获取帮助。 ''') dir_1 = os.path.dirname(os.path.abspath(__file__)) dir_2 = dir_1.replace("\\", "/") config_dir = dir_2 + "/config/" cache_dir = dir_2 + "/cache/" print("当前项目的配置文件:", config_dir) print("当前项目的缓存位置:", cache_dir) with open(config_dir + "config.json", 'r') as file: # 读取配置 config = json.load(file) tab1, tab2, tab3 = st.tabs(["主页", "设置", "关于"]) with tab1: # 文件上传逻辑 uploaded_file = st.file_uploader("请在这里上传视频:", type=['mp4', 'mov']) if uploaded_file is not None: with open(cache_dir + "uploaded.mp4", "wb") as file: file.write(uploaded_file.getbuffer()) st.success("上传成功") if st.button('运行程序'): if uploaded_file is not None: with st.spinner('Wait for it...'): # whisper识别 model = whisper.load_model(st.session_state.option) pathvideo = tmp_filepath(uploaded_file) result = model.transcribe(pathvideo) print("whisper识别:" + result['text']) # whisper源语言识别内容
# 作者:chenyme # 版本:v0.2.2 # 博客站:待更新 st.set_page_config( page_title="AAMT v0.2.2", page_icon="📊", layout="wide", # 设置布局样式为宽展示 initial_sidebar_state="expanded" # 设置初始边栏状态为展开 ) st.title("Chenyme-AAMT") st.write("##### AI全自动视频翻译") with st.sidebar: st.title("欢迎!") st.write(''' ### 尊敬的用户,恭喜你完成了该项目的安装! 欢迎您使用AAMT V0.2.2!本项目的目标是为您提供一个简单易用的全自动视频翻译工具,以便您能够快速地将翻译后的字幕与原视频合并,从而更轻松地享受翻译后的内容。 请注意以下事项: 1. 请确保您的系统已正确安装Python,并且版本号为3.8或更高。 2. 请确保已经安装了所有依赖库,并设置了ffmpeg为环境变量。 3. 如果在安装或运行过程中遇到任何问题,请查阅项目文档或联系开发人员以获取帮助。 ''') dir_1 = os.path.dirname(os.path.abspath(__file__)) dir_2 = dir_1.replace("\\", "/") config_dir = dir_2 + "/config/" cache_dir = dir_2 + "/cache/" print("当前项目的配置文件:", config_dir) print("当前项目的缓存位置:", cache_dir) with open(config_dir + "config.json", 'r') as file: # 读取配置 config = json.load(file) tab1, tab2, tab3 = st.tabs(["主页", "设置", "关于"]) with tab1: # 文件上传逻辑 uploaded_file = st.file_uploader("请在这里上传视频:", type=['mp4', 'mov']) if uploaded_file is not None: with open(cache_dir + "uploaded.mp4", "wb") as file: file.write(uploaded_file.getbuffer()) st.success("上传成功") if st.button('运行程序'): if uploaded_file is not None: with st.spinner('Wait for it...'): # whisper识别 model = whisper.load_model(st.session_state.option) pathvideo = tmp_filepath(uploaded_file) result = model.transcribe(pathvideo) print("whisper识别:" + result['text']) # whisper源语言识别内容
result = openai_translate(st.session_state.key, st.session_state.base, result) # 翻译成目标语言
2
2023-12-18 04:06:03+00:00
2k
davidrs/logo-buddy
logo_buddy/main.py
[ { "identifier": "preprocess", "path": "logo_buddy/controlnet.py", "snippet": "def preprocess(image, controlnet_path=None):\n if \"canny\" in controlnet_path:\n return canny_preprocess(image)\n else:\n return Image.fromarray(image)" }, { "identifier": "CN_MODELS", "path": ...
import os import os.path as op import numpy as np import torch import cv2 import torch from glob import glob from diffusers import StableDiffusionPipeline, DiffusionPipeline from diffusers import ( StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler, ) from diffusers.utils import load_image from PIL import Image from .controlnet import preprocess, CN_MODELS from .utils import read_fit
1,465
STEPS = 34 SEED = 12 MODELS = { "real": "/Users/drustsmith/repos/stable-diffusion-webui/models/Stable-diffusion/realisticVisionV51_v51VAE.safetensors", "anim": "/Users/drustsmith/repos/stable-diffusion-webui/models/Stable-diffusion/revAnimated_v122EOL.safetensors", } # PROMPT_LIST = [ # Winter {"text": "santa playing in the snow, ethereal, dreamy, highly detailed, realistic lighting, sharp focus, rule of thirds, artgerm, wlop, arney freytag, hd, octane, 4 k, ", "file_name": "winter_santa", "model":"anim"}, # <lora:fantasy00d:0.5>, animated { "text": "ethereal fantasy concept art of dreamscape Winter wonderland, surreal, ethereal, dreamy, mysterious, fantasy, highly detailed, magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy", "file_name": "winter_wonderland", "model": "anim", }, {"text": "((ginger bread house)), realistic, insanely detailed, octane rendered, unreal engine, illustration, trending on artstation, masterpiece, photography", "file_name": "winter_ginger", "model":"real"}, {"text": "winter ice sculpture ", "file_name": "winter_ice"}, # General {"text": "a neon glowing sign", "file_name": "neon"}, {"text": "hot air balloons ", "file_name": "hot_air_balloons", "model":"real"}, {"text": "(wood carving), (inlay), (etsy) ", "file_name": "wood_carving", "model":"real"}, { "text": "paper cut, paper layers, laser cut, paper art, vibrant colors, ", "file_name": "paper_art", "model": "real", }, # {"text": "carved halloween pumpkin, witches, spooky, fun, (vibrant colors:1.1), ", "file_name": "haloween", "model":"anim"}, # <lora:fantasy00d:0.5>, animated { "text": "fun textures and colours , logo, pixar, orange and pink clouds blue sky, sun, happy vibes, subtle lense flare, birds ", "file_name": "clouds", "model": "anim", }, ] DEFAULT_POSITIVE_SUFFIX = ( ",detailed, intricate, best quality, (highest quality, award winning:1.3)" ) DEFAULT_NEGATIVE_PROMPT = ( "blurry, low quality, low resolution, low res, low resolution, watermark, logo" ) OUT_DIR = "./out" os.makedirs(OUT_DIR, exist_ok=True) # env is mac, cpu or gpu DEVICE = "mps" if torch.cuda.is_available(): DEVICE = "gpu" def get_pipe(model_path, controlnet_path=None): controlnet_model = None if controlnet_path: # load control net and stable diffusion v1-5 controlnet_model = ControlNetModel.from_single_file( controlnet_path, torch_dtype=torch.float16, use_safetensors=True, device=DEVICE, ) pipe = StableDiffusionControlNetPipeline.from_single_file( model_path, use_safetensors=True, torch_dtype=torch.float16, controlnet=controlnet_model, ) pipe = pipe.to(DEVICE) # Recommended if your computer has < 64 GB of RAM pipe.enable_attention_slicing() return pipe def controlnet_generate(img_path, pipe, out_dir, prompts=PROMPT_LIST, controlnet=None): image = read_fit(img_path) preprocessed_image = None if controlnet: preprocessed_image = preprocess(image, controlnet_path=controlnet) for p in prompts: generator = torch.manual_seed(SEED) for i in range(0, 1): print(DEFAULT_POSITIVE_SUFFIX) print(p["text"]) steps = STEPS image = pipe( p["text"] + DEFAULT_POSITIVE_SUFFIX, negative_prompt=DEFAULT_NEGATIVE_PROMPT, num_inference_steps=steps, generator=generator, image=preprocessed_image, # guidance_scale=20 if 'qr' in controlnet else 15, # controlnet_conditioning_scale=2.0 if 'qr' in controlnet else 1.0, # strength=0.85, ).images[0] image.save(op.join(out_dir, f"{p['file_name']}_{controlnet}_{SEED}.png")) # if main if __name__ == "__main__": for m, mp in MODELS.items():
STEPS = 34 SEED = 12 MODELS = { "real": "/Users/drustsmith/repos/stable-diffusion-webui/models/Stable-diffusion/realisticVisionV51_v51VAE.safetensors", "anim": "/Users/drustsmith/repos/stable-diffusion-webui/models/Stable-diffusion/revAnimated_v122EOL.safetensors", } # PROMPT_LIST = [ # Winter {"text": "santa playing in the snow, ethereal, dreamy, highly detailed, realistic lighting, sharp focus, rule of thirds, artgerm, wlop, arney freytag, hd, octane, 4 k, ", "file_name": "winter_santa", "model":"anim"}, # <lora:fantasy00d:0.5>, animated { "text": "ethereal fantasy concept art of dreamscape Winter wonderland, surreal, ethereal, dreamy, mysterious, fantasy, highly detailed, magnificent, celestial, ethereal, painterly, epic, majestic, magical, fantasy art, cover art, dreamy", "file_name": "winter_wonderland", "model": "anim", }, {"text": "((ginger bread house)), realistic, insanely detailed, octane rendered, unreal engine, illustration, trending on artstation, masterpiece, photography", "file_name": "winter_ginger", "model":"real"}, {"text": "winter ice sculpture ", "file_name": "winter_ice"}, # General {"text": "a neon glowing sign", "file_name": "neon"}, {"text": "hot air balloons ", "file_name": "hot_air_balloons", "model":"real"}, {"text": "(wood carving), (inlay), (etsy) ", "file_name": "wood_carving", "model":"real"}, { "text": "paper cut, paper layers, laser cut, paper art, vibrant colors, ", "file_name": "paper_art", "model": "real", }, # {"text": "carved halloween pumpkin, witches, spooky, fun, (vibrant colors:1.1), ", "file_name": "haloween", "model":"anim"}, # <lora:fantasy00d:0.5>, animated { "text": "fun textures and colours , logo, pixar, orange and pink clouds blue sky, sun, happy vibes, subtle lense flare, birds ", "file_name": "clouds", "model": "anim", }, ] DEFAULT_POSITIVE_SUFFIX = ( ",detailed, intricate, best quality, (highest quality, award winning:1.3)" ) DEFAULT_NEGATIVE_PROMPT = ( "blurry, low quality, low resolution, low res, low resolution, watermark, logo" ) OUT_DIR = "./out" os.makedirs(OUT_DIR, exist_ok=True) # env is mac, cpu or gpu DEVICE = "mps" if torch.cuda.is_available(): DEVICE = "gpu" def get_pipe(model_path, controlnet_path=None): controlnet_model = None if controlnet_path: # load control net and stable diffusion v1-5 controlnet_model = ControlNetModel.from_single_file( controlnet_path, torch_dtype=torch.float16, use_safetensors=True, device=DEVICE, ) pipe = StableDiffusionControlNetPipeline.from_single_file( model_path, use_safetensors=True, torch_dtype=torch.float16, controlnet=controlnet_model, ) pipe = pipe.to(DEVICE) # Recommended if your computer has < 64 GB of RAM pipe.enable_attention_slicing() return pipe def controlnet_generate(img_path, pipe, out_dir, prompts=PROMPT_LIST, controlnet=None): image = read_fit(img_path) preprocessed_image = None if controlnet: preprocessed_image = preprocess(image, controlnet_path=controlnet) for p in prompts: generator = torch.manual_seed(SEED) for i in range(0, 1): print(DEFAULT_POSITIVE_SUFFIX) print(p["text"]) steps = STEPS image = pipe( p["text"] + DEFAULT_POSITIVE_SUFFIX, negative_prompt=DEFAULT_NEGATIVE_PROMPT, num_inference_steps=steps, generator=generator, image=preprocessed_image, # guidance_scale=20 if 'qr' in controlnet else 15, # controlnet_conditioning_scale=2.0 if 'qr' in controlnet else 1.0, # strength=0.85, ).images[0] image.save(op.join(out_dir, f"{p['file_name']}_{controlnet}_{SEED}.png")) # if main if __name__ == "__main__": for m, mp in MODELS.items():
for cn, cn_path in CN_MODELS.items():
1
2023-12-17 19:24:56+00:00
2k
Varexa/Gateway
chat_exporter/construct/assets/embed.py
[ { "identifier": "discord", "path": "chat_exporter/ext/discord_import.py", "snippet": "" }, { "identifier": "fill_out", "path": "chat_exporter/ext/html_generator.py", "snippet": "PARSE_MODE_NONE = 0\r\nPARSE_MODE_NO_MARKDOWN = 1\r\nPARSE_MODE_MARKDOWN = 2\r\nPARSE_MODE_EMBED = 3\r\nPARSE_...
import html from chat_exporter.ext.discord_import import discord from chat_exporter.ext.html_generator import ( fill_out, embed_body, embed_title, embed_description, embed_field, embed_field_inline, embed_footer, embed_footer_icon, embed_image, embed_thumbnail, embed_author, embed_author_icon, PARSE_MODE_NONE, PARSE_MODE_EMBED, PARSE_MODE_MARKDOWN, PARSE_MODE_SPECIAL_EMBED, )
894
modules_which_use_none = ["nextcord", "disnake"] def _gather_checker(): if discord.module not in modules_which_use_none and hasattr(discord.Embed, "Empty"): return discord.Embed.Empty return None class Embed: r: str g: str b: str title: str description: str author: str image: str thumbnail: str footer: str fields: str check_against = None def __init__(self, embed, guild): self.embed: discord.Embed = embed self.guild: discord.Guild = guild async def flow(self): self.check_against = _gather_checker() self.build_colour() await self.build_title() await self.build_description() await self.build_fields() await self.build_author() await self.build_image() await self.build_thumbnail() await self.build_footer() await self.build_embed() return self.embed def build_colour(self): self.r, self.g, self.b = ( (self.embed.colour.r, self.embed.colour.g, self.embed.colour.b) if self.embed.colour != self.check_against else (0x20, 0x22, 0x25) # default colour ) async def build_title(self): self.title = html.escape(self.embed.title) if self.embed.title != self.check_against else "" if self.title: self.title = await fill_out(self.guild, embed_title, [ ("EMBED_TITLE", self.title, PARSE_MODE_MARKDOWN) ]) async def build_description(self): self.description = html.escape(self.embed.description) if self.embed.description != self.check_against else "" if self.description: self.description = await fill_out(self.guild, embed_description, [ ("EMBED_DESC", self.embed.description, PARSE_MODE_EMBED) ]) async def build_fields(self): self.fields = "" # This does not have to be here, but Pycord. if not self.embed.fields: return for field in self.embed.fields: field.name = html.escape(field.name) field.value = html.escape(field.value) if field.inline: self.fields += await fill_out(self.guild, embed_field_inline, [ ("FIELD_NAME", field.name, PARSE_MODE_SPECIAL_EMBED), ("FIELD_VALUE", field.value, PARSE_MODE_EMBED) ]) else: self.fields += await fill_out(self.guild, embed_field, [ ("FIELD_NAME", field.name, PARSE_MODE_SPECIAL_EMBED), ("FIELD_VALUE", field.value, PARSE_MODE_EMBED)]) async def build_author(self): self.author = html.escape(self.embed.author.name) if self.embed.author.name != self.check_against else "" self.author = f'<a class="chatlog__embed-author-name-link" href="{self.embed.author.url}">{self.author}</a>' \ if self.embed.author.url != self.check_against \ else self.author
modules_which_use_none = ["nextcord", "disnake"] def _gather_checker(): if discord.module not in modules_which_use_none and hasattr(discord.Embed, "Empty"): return discord.Embed.Empty return None class Embed: r: str g: str b: str title: str description: str author: str image: str thumbnail: str footer: str fields: str check_against = None def __init__(self, embed, guild): self.embed: discord.Embed = embed self.guild: discord.Guild = guild async def flow(self): self.check_against = _gather_checker() self.build_colour() await self.build_title() await self.build_description() await self.build_fields() await self.build_author() await self.build_image() await self.build_thumbnail() await self.build_footer() await self.build_embed() return self.embed def build_colour(self): self.r, self.g, self.b = ( (self.embed.colour.r, self.embed.colour.g, self.embed.colour.b) if self.embed.colour != self.check_against else (0x20, 0x22, 0x25) # default colour ) async def build_title(self): self.title = html.escape(self.embed.title) if self.embed.title != self.check_against else "" if self.title: self.title = await fill_out(self.guild, embed_title, [ ("EMBED_TITLE", self.title, PARSE_MODE_MARKDOWN) ]) async def build_description(self): self.description = html.escape(self.embed.description) if self.embed.description != self.check_against else "" if self.description: self.description = await fill_out(self.guild, embed_description, [ ("EMBED_DESC", self.embed.description, PARSE_MODE_EMBED) ]) async def build_fields(self): self.fields = "" # This does not have to be here, but Pycord. if not self.embed.fields: return for field in self.embed.fields: field.name = html.escape(field.name) field.value = html.escape(field.value) if field.inline: self.fields += await fill_out(self.guild, embed_field_inline, [ ("FIELD_NAME", field.name, PARSE_MODE_SPECIAL_EMBED), ("FIELD_VALUE", field.value, PARSE_MODE_EMBED) ]) else: self.fields += await fill_out(self.guild, embed_field, [ ("FIELD_NAME", field.name, PARSE_MODE_SPECIAL_EMBED), ("FIELD_VALUE", field.value, PARSE_MODE_EMBED)]) async def build_author(self): self.author = html.escape(self.embed.author.name) if self.embed.author.name != self.check_against else "" self.author = f'<a class="chatlog__embed-author-name-link" href="{self.embed.author.url}">{self.author}</a>' \ if self.embed.author.url != self.check_against \ else self.author
author_icon = await fill_out(self.guild, embed_author_icon, [
1
2023-12-18 14:17:31+00:00
2k
mariaalfaroc/a2s-transformer
my_utils/metrics.py
[ { "identifier": "VOICE_CHANGE_TOKEN", "path": "my_utils/encoding_convertions.py", "snippet": "VOICE_CHANGE_TOKEN = \"<COC>\"" }, { "identifier": "STEP_CHANGE_TOKEN", "path": "my_utils/encoding_convertions.py", "snippet": "STEP_CHANGE_TOKEN = \"<COR>\"" } ]
import os import shutil from music21 import converter as converterm21 from pyMV2H.utils.mv2h import MV2H from pyMV2H.metrics.mv2h import mv2h from pyMV2H.utils.music import Music from pyMV2H.converter.midi_converter import MidiConverter as Converter from .encoding_convertions import VOICE_CHANGE_TOKEN, STEP_CHANGE_TOKEN
927
def compute_metrics(y_true, y_pred): ################################# Sym-ER and Seq-ER: metrics = compute_ed_metrics(y_true=y_true, y_pred=y_pred) ################################# MV2H: mv2h_dict = compute_mv2h_metrics(y_true=y_true, y_pred=y_pred) metrics.update(mv2h_dict) return metrics #################################################################### SYM-ER AND SEQ-ER: def compute_ed_metrics(y_true, y_pred): def levenshtein(a, b): n, m = len(a), len(b) if n > m: a, b = b, a n, m = m, n current = range(n + 1) for i in range(1, m + 1): previous, current = current, [i] + [0] * n for j in range(1, n + 1): add, delete = previous[j] + 1, current[j - 1] + 1 change = previous[j - 1] if a[j - 1] != b[i - 1]: change = change + 1 current[j] = min(add, delete, change) return current[n] ed_acc = 0 length_acc = 0 label_acc = 0 for t, h in zip(y_true, y_pred): ed = levenshtein(t, h) ed_acc += ed length_acc += len(t) if ed > 0: label_acc += 1 return { "sym-er": 100.0 * ed_acc / length_acc, "seq-er": 100.0 * label_acc / len(y_pred), } #################################################################### MV2H: def compute_mv2h_metrics(y_true, y_pred): def krn2midi(in_file): a = converterm21.parse(in_file).write("midi") midi_file = a.name shutil.copyfile(a, midi_file) os.remove(in_file) return midi_file def midi2txt(midi_file): txt_file = midi_file.replace("mid", "txt") converter = Converter(file=midi_file, output=txt_file) converter.convert_file() with open(txt_file, "r") as fin: f = [u.replace(".0", "") for u in fin.readlines()] with open(txt_file, "w") as fout: for u in f: fout.write(u) os.remove(midi_file) return txt_file ########################################### Polyphonic evaluation: def eval_as_polyphonic(): # Convert to MIDI reference_midi_file = krn2midi("true.krn") predicted_midi_file = krn2midi("pred.krn") # Convert to TXT reference_txt_file = midi2txt(reference_midi_file) predicted_txt_file = midi2txt(predicted_midi_file) # Compute MV2H reference_file = Music.from_file(reference_txt_file) transcription_file = Music.from_file(predicted_txt_file) res_dict = MV2H(multi_pitch=0, voice=0, meter=0, harmony=0, note_value=0) try: res_dict = mv2h(reference_file, transcription_file) except: pass # Remove auxiliar files os.remove(reference_txt_file) os.remove(predicted_txt_file) return res_dict ########################################### Monophonic evaluation: def get_number_of_voices(kern): num_voices = 0 for token in kern: if token == VOICE_CHANGE_TOKEN: continue
def compute_metrics(y_true, y_pred): ################################# Sym-ER and Seq-ER: metrics = compute_ed_metrics(y_true=y_true, y_pred=y_pred) ################################# MV2H: mv2h_dict = compute_mv2h_metrics(y_true=y_true, y_pred=y_pred) metrics.update(mv2h_dict) return metrics #################################################################### SYM-ER AND SEQ-ER: def compute_ed_metrics(y_true, y_pred): def levenshtein(a, b): n, m = len(a), len(b) if n > m: a, b = b, a n, m = m, n current = range(n + 1) for i in range(1, m + 1): previous, current = current, [i] + [0] * n for j in range(1, n + 1): add, delete = previous[j] + 1, current[j - 1] + 1 change = previous[j - 1] if a[j - 1] != b[i - 1]: change = change + 1 current[j] = min(add, delete, change) return current[n] ed_acc = 0 length_acc = 0 label_acc = 0 for t, h in zip(y_true, y_pred): ed = levenshtein(t, h) ed_acc += ed length_acc += len(t) if ed > 0: label_acc += 1 return { "sym-er": 100.0 * ed_acc / length_acc, "seq-er": 100.0 * label_acc / len(y_pred), } #################################################################### MV2H: def compute_mv2h_metrics(y_true, y_pred): def krn2midi(in_file): a = converterm21.parse(in_file).write("midi") midi_file = a.name shutil.copyfile(a, midi_file) os.remove(in_file) return midi_file def midi2txt(midi_file): txt_file = midi_file.replace("mid", "txt") converter = Converter(file=midi_file, output=txt_file) converter.convert_file() with open(txt_file, "r") as fin: f = [u.replace(".0", "") for u in fin.readlines()] with open(txt_file, "w") as fout: for u in f: fout.write(u) os.remove(midi_file) return txt_file ########################################### Polyphonic evaluation: def eval_as_polyphonic(): # Convert to MIDI reference_midi_file = krn2midi("true.krn") predicted_midi_file = krn2midi("pred.krn") # Convert to TXT reference_txt_file = midi2txt(reference_midi_file) predicted_txt_file = midi2txt(predicted_midi_file) # Compute MV2H reference_file = Music.from_file(reference_txt_file) transcription_file = Music.from_file(predicted_txt_file) res_dict = MV2H(multi_pitch=0, voice=0, meter=0, harmony=0, note_value=0) try: res_dict = mv2h(reference_file, transcription_file) except: pass # Remove auxiliar files os.remove(reference_txt_file) os.remove(predicted_txt_file) return res_dict ########################################### Monophonic evaluation: def get_number_of_voices(kern): num_voices = 0 for token in kern: if token == VOICE_CHANGE_TOKEN: continue
if token == STEP_CHANGE_TOKEN:
1
2023-12-18 20:01:00+00:00
2k
YashsviG/rootkit
victim.py
[ { "identifier": "port_knocking", "path": "portknocker.py", "snippet": "def port_knocking(victim_ip):\n \"\"\"\n Perform port knocking on the victim side to authenticate the commander.\n\n Args:\n victim_ip (str): IP address of the victim.\n\n Returns:\n tuple: IP address and po...
import argparse import setproctitle import shutil from keylogger import * from watcher import * from portknocker import port_knocking from processname import choose_process_name from utils import get_ip_address, transfer_keylog_file, check_exists
1,279
def handle_command(command: int, keylogger, watcher, covert): """ Handle the received command. Args: command (int): Received command. keylogger (Keylogger): Keylogger instance. watcher (Watcher): Watcher instance. covert (CovertChannel): Covert channel instance. Returns: int: Result code. """ if command == 0: return 0 print(f"VICTIM:: Command Received", end=" ") if command == 1: print("VICTIM:: Received command to start the keylog program...") keylogger.start_keylogger() return 1 elif command == 2: print("VICTIM:: Received command to stop the keylog program...") if not keylogger.get_status(): print("VICTIM:: Keylogger is not running.") return 2 val = keylogger.stop_keylogger() if val == 0: print("VICTIM:: Keylogger has been stopped.") return 2 elif command == 3: print("VICTIM:: Received command to transfer the keylog file...") return transfer_keylog_file(keylogger, covert, "keylog.txt") elif command == 4: print(f"VICTIM:: Received command to watch file...") file = covert.receive_data(for_victim=True)
def handle_command(command: int, keylogger, watcher, covert): """ Handle the received command. Args: command (int): Received command. keylogger (Keylogger): Keylogger instance. watcher (Watcher): Watcher instance. covert (CovertChannel): Covert channel instance. Returns: int: Result code. """ if command == 0: return 0 print(f"VICTIM:: Command Received", end=" ") if command == 1: print("VICTIM:: Received command to start the keylog program...") keylogger.start_keylogger() return 1 elif command == 2: print("VICTIM:: Received command to stop the keylog program...") if not keylogger.get_status(): print("VICTIM:: Keylogger is not running.") return 2 val = keylogger.stop_keylogger() if val == 0: print("VICTIM:: Keylogger has been stopped.") return 2 elif command == 3: print("VICTIM:: Received command to transfer the keylog file...") return transfer_keylog_file(keylogger, covert, "keylog.txt") elif command == 4: print(f"VICTIM:: Received command to watch file...") file = covert.receive_data(for_victim=True)
i = check_exists(file)
4
2023-12-19 18:54:22+00:00
2k
yacinxx/dnakey
enginev2.py
[ { "identifier": "ConfigManager", "path": "profile_config/config_manager.py", "snippet": "class ConfigManager:\r\n def __init__(self, prime_key:str) -> None:\r\n with open(\"profile_config/profile_config.json\", \"r\") as f: \r\n self.profile_data = __import__(\"json\").loads(f.read(...
from cryptography.fernet import Fernet from profile_config.config_manager import ConfigManager from license.license_manager import VERSION import random, json, string, datetime
1,550
class DNAEngine(): def __init__( self, has_key="test", profile_name="profile_test", activate_merge=True, save_cookies=True, **advance_settings): self.has_key = has_key self.profile_name = profile_name self.length = advance_settings.get("length", 40) self.has_lower = advance_settings.get("has_lower", True) self.has_upper = advance_settings.get("has_upper", True) self.has_number = advance_settings.get("has_number", True) self.has_symbol = advance_settings.get("has_symbol", False) self.has_arabic = advance_settings.get("has_arabic", False) self.activate_merge = activate_merge self.save_cookies = save_cookies # Create a Fernet object with the secret key secret_key = self.has_key.encode("utf-8") self.fernet = Fernet(secret_key) self.create_date = datetime.datetime.now() # Convert datetime to string self.formatted_datetime = self.create_date.isoformat() self.random_func = { "lower": self.get_random_lower, "upper": self.get_random_upper, "number": self.get_random_number, "symbol": self.get_random_symbol, "arabic": self.get_random_arabic } def create_id_profile(self): self.config_has_key = f"dnakey${self.has_key[:32:2]}"
class DNAEngine(): def __init__( self, has_key="test", profile_name="profile_test", activate_merge=True, save_cookies=True, **advance_settings): self.has_key = has_key self.profile_name = profile_name self.length = advance_settings.get("length", 40) self.has_lower = advance_settings.get("has_lower", True) self.has_upper = advance_settings.get("has_upper", True) self.has_number = advance_settings.get("has_number", True) self.has_symbol = advance_settings.get("has_symbol", False) self.has_arabic = advance_settings.get("has_arabic", False) self.activate_merge = activate_merge self.save_cookies = save_cookies # Create a Fernet object with the secret key secret_key = self.has_key.encode("utf-8") self.fernet = Fernet(secret_key) self.create_date = datetime.datetime.now() # Convert datetime to string self.formatted_datetime = self.create_date.isoformat() self.random_func = { "lower": self.get_random_lower, "upper": self.get_random_upper, "number": self.get_random_number, "symbol": self.get_random_symbol, "arabic": self.get_random_arabic } def create_id_profile(self): self.config_has_key = f"dnakey${self.has_key[:32:2]}"
self.config_manager = ConfigManager(self.config_has_key)
0
2023-12-18 22:04:13+00:00
2k
tamnva/hydroecolstm
examples/example_run.py
[ { "identifier": "run_train", "path": "hydroecolstm/model_run.py", "snippet": "def run_train(config_file):\n \n # Load configuration\n config = read_config(config_file)\n\n # Read and split data\n data = read_train_test_data(config)\n \n # Scale/transformer name for static, dynamic, ...
from hydroecolstm.model_run import run_train from hydroecolstm.utility.plot import plot from hydroecolstm.interface.main_gui import show_gui
941
# Import hydroecolstm function #-----------------------------------------------------------------------------# # Run the model # #-----------------------------------------------------------------------------# # Configuration file config_file = "C:/Users/nguyenta/Documents/GitHub/config.yml" # Train the model => return model, x_scaler, y_scaler, data model, x_scaler, y_scaler, data, config = run_train(config_file) # Visualize result: train_test_period = "train" or "test" for object_id in config["object_id"]: for target in config["target_features"]: p = plot(data, object_id=str(object_id), train_test_period="test", target_feature=target) p.show() #-----------------------------------------------------------------------------# # Work with GUI, use the two lines below to call the GUI # #-----------------------------------------------------------------------------#
# Import hydroecolstm function #-----------------------------------------------------------------------------# # Run the model # #-----------------------------------------------------------------------------# # Configuration file config_file = "C:/Users/nguyenta/Documents/GitHub/config.yml" # Train the model => return model, x_scaler, y_scaler, data model, x_scaler, y_scaler, data, config = run_train(config_file) # Visualize result: train_test_period = "train" or "test" for object_id in config["object_id"]: for target in config["target_features"]: p = plot(data, object_id=str(object_id), train_test_period="test", target_feature=target) p.show() #-----------------------------------------------------------------------------# # Work with GUI, use the two lines below to call the GUI # #-----------------------------------------------------------------------------#
show_gui()
2
2023-12-20 09:11:36+00:00
2k
LuhhLu/Predictive-Video-Segmentation
unet_train.py
[ { "identifier": "Load_unet", "path": "Unet.py", "snippet": "def Load_unet(path=None):\n if path:\n unet_model = UNet(n_channels=3, n_classes=49)\n unet_model.load_state_dict(torch.load(path))\n else:\n unet_model = UNet(n_channels=3, n_classes=49)\n return unet_model" }, ...
from tqdm import tqdm from torch.utils.data import DataLoader from torchvision import transforms from Unet import Load_unet, CustomDataset, WeightedBCEWithLogitsLoss import torch import torch.optim as optim import argparse
943
def main(): # Command-line arguments parser = argparse.ArgumentParser(description='Train UNet with custom settings') parser.add_argument('--lr', type=float, default=0.001, help='Learning rate') parser.add_argument('--batch', type=int, default=64, help='Batch size') parser.add_argument('--res', type=str, default='full', help='Resolution in the format H,W') parser.add_argument('--epoch', type=int, default=10, help='number of training epochs') args = parser.parse_args() # Process resolution argument if args.res == 'full': transform = transforms.Compose([ transforms.ToTensor(), ]) resolution = (160, 240) else: try: res_value = int(args.res) resolution = (res_value, res_value) transform = transforms.Compose([ transforms.ToTensor(), transforms.Resize(resolution, antialias=True), transforms.Resize((160, 240), antialias=True) ]) except ValueError: raise ValueError("Invalid resolution value. Please provide 'full' or a single number.") if args.res == 'full': print("Training with Resolution: (160, 240)") filename_suffix = 'full' else: res_value = int(args.res) print(f"Training with Resolution: ({res_value}, {res_value})") filename_suffix = str(res_value)
def main(): # Command-line arguments parser = argparse.ArgumentParser(description='Train UNet with custom settings') parser.add_argument('--lr', type=float, default=0.001, help='Learning rate') parser.add_argument('--batch', type=int, default=64, help='Batch size') parser.add_argument('--res', type=str, default='full', help='Resolution in the format H,W') parser.add_argument('--epoch', type=int, default=10, help='number of training epochs') args = parser.parse_args() # Process resolution argument if args.res == 'full': transform = transforms.Compose([ transforms.ToTensor(), ]) resolution = (160, 240) else: try: res_value = int(args.res) resolution = (res_value, res_value) transform = transforms.Compose([ transforms.ToTensor(), transforms.Resize(resolution, antialias=True), transforms.Resize((160, 240), antialias=True) ]) except ValueError: raise ValueError("Invalid resolution value. Please provide 'full' or a single number.") if args.res == 'full': print("Training with Resolution: (160, 240)") filename_suffix = 'full' else: res_value = int(args.res) print(f"Training with Resolution: ({res_value}, {res_value})") filename_suffix = str(res_value)
train_dataset = CustomDataset('unet_train/images', 'unet_train/masks', transform)
1
2023-12-17 20:39:14+00:00
2k
garinops/chat-E-AI
embed/clients/itchat/messages/friend.py
[ { "identifier": "ITCHAT_CALL_CODE_SELF", "path": "config/settings.py", "snippet": "ITCHAT_CALL_CODE_SELF = \"AI\"" }, { "identifier": "ITCHAT_CALL_CODE", "path": "config/settings.py", "snippet": "ITCHAT_CALL_CODE = \"AI\"" }, { "identifier": "ITCHAT_WHITELIST_FRIEND", "path":...
from config.settings import ITCHAT_CALL_CODE_SELF, ITCHAT_CALL_CODE, ITCHAT_WHITELIST_FRIEND from embed.reply.text import EReplyText from models.messages import MessageItchat, MessageCea from models.send import Send
1,025
def handle_friend_message(client, message: MessageItchat) -> Send: _callCodeSelf = ITCHAT_CALL_CODE_SELF _callCode = ITCHAT_CALL_CODE
def handle_friend_message(client, message: MessageItchat) -> Send: _callCodeSelf = ITCHAT_CALL_CODE_SELF _callCode = ITCHAT_CALL_CODE
_whiteListFriend = ITCHAT_WHITELIST_FRIEND
2
2023-12-16 17:02:13+00:00
2k
ruudjuffermans/Event-Driven-Backtester
backtester/execution.py
[ { "identifier": "FillEvent", "path": "backtester/events.py", "snippet": "class FillEvent(Event):\n \"\"\"\n Fill event once an order based on the response from the broker\n\n Parameters:\n datetime - A datetime at which the signal is created.\n symbol - The symbol for current asset.\n ...
from abc import abstractmethod from datetime import datetime from .events import FillEvent, OrderEvent
653
class ExecutionHandler: def register(self, events): self.events = events @abstractmethod def execute_order(self, event): raise NotImplementedError("Should implement execute_order()") class SimulatedExecutionHandler(ExecutionHandler): def __init__(self): pass def execute_order(self, event):
class ExecutionHandler: def register(self, events): self.events = events @abstractmethod def execute_order(self, event): raise NotImplementedError("Should implement execute_order()") class SimulatedExecutionHandler(ExecutionHandler): def __init__(self): pass def execute_order(self, event):
if isinstance(event, OrderEvent):
1
2023-12-16 21:09:00+00:00
2k
liebrandapps/FindMyGUI
findmy/request_reports.py
[ { "identifier": "icloud_login_mobileme", "path": "findmy/pypush_gsa_icloud.py", "snippet": "def icloud_login_mobileme(ctx, second_factor='sms'):\n username = ctx.cfg.appleId_appleId\n password = ctx.cfg.appleId_password\n anisetteUrl = ctx.cfg.general_anisetteHost + \":\" + str(ctx.cfg.general_...
import base64 import datetime import hashlib import json import os import struct import requests from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import ec from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes from findmy.pypush_gsa_icloud import icloud_login_mobileme, generate_anisette_headers
1,373
class FindMy: def __init__(self, ctx): self.ctx = ctx def sha256(self, data): digest = hashlib.new("sha256") digest.update(data) return digest.digest() def decrypt(self, enc_data, algorithm_dkey, mode): decryptor = Cipher(algorithm_dkey, mode, default_backend()).decryptor() return decryptor.update(enc_data) + decryptor.finalize() def decode_tag(self, data): latitude = struct.unpack(">i", data[0:4])[0] / 10000000.0 longitude = struct.unpack(">i", data[4:8])[0] / 10000000.0 confidence = int.from_bytes(data[8:9], 'big') status = int.from_bytes(data[9:10], 'big') return {'lat': latitude, 'lon': longitude, 'conf': confidence, 'status': status} def getAuth(self, regenerate=False, second_factor='sms'): CONFIG_PATH = os.path.dirname(os.path.realpath(__file__)) + "/auth.json" if os.path.exists(CONFIG_PATH) and not regenerate: with open(CONFIG_PATH, "r") as f: j = json.load(f) else: mobileme = None try:
class FindMy: def __init__(self, ctx): self.ctx = ctx def sha256(self, data): digest = hashlib.new("sha256") digest.update(data) return digest.digest() def decrypt(self, enc_data, algorithm_dkey, mode): decryptor = Cipher(algorithm_dkey, mode, default_backend()).decryptor() return decryptor.update(enc_data) + decryptor.finalize() def decode_tag(self, data): latitude = struct.unpack(">i", data[0:4])[0] / 10000000.0 longitude = struct.unpack(">i", data[4:8])[0] / 10000000.0 confidence = int.from_bytes(data[8:9], 'big') status = int.from_bytes(data[9:10], 'big') return {'lat': latitude, 'lon': longitude, 'conf': confidence, 'status': status} def getAuth(self, regenerate=False, second_factor='sms'): CONFIG_PATH = os.path.dirname(os.path.realpath(__file__)) + "/auth.json" if os.path.exists(CONFIG_PATH) and not regenerate: with open(CONFIG_PATH, "r") as f: j = json.load(f) else: mobileme = None try:
mobileme = icloud_login_mobileme(self.ctx, second_factor=second_factor)
0
2023-12-16 12:39:52+00:00
2k
Samuel-Effiong/Django-Dynamic-Table
django_dynamic_table/models.py
[ { "identifier": "TableHaveNoRow", "path": "django_dynamic_table/errors.py", "snippet": "class TableHaveNoRow(DynamicTableError):\r\n pass\r" }, { "identifier": "TableHaveNoColumn", "path": "django_dynamic_table/errors.py", "snippet": "class TableHaveNoColumn(DynamicTableError):\r\n ...
from typing import Sequence from datetime import datetime from django.db import models from django.utils import timezone from django.utils.translation import gettext_lazy as _ from .errors import ( TableHaveNoRow, TableHaveNoColumn, ColumnNotInTable, RowNotInTable, DuplicateColumnInTable, DynamicTableError, UnSupportedDataType, CantParseValueToDataType, CellDoesNotExist )
934
""" Creating a Dynamic Table using conventional Django standard This Table gives you more control over it manipulation than Django models Developed by: Samuel Effiong Nkopuruk Email: senai.nkop@gmail.com """ __SUPPORTED_DATA_TYPE_CHOICES__ = ( ('char', 'Char'), ('int', 'Int'), ('float', 'Float'), ('bool', 'Bool'), ('textfield', 'TextField'), ('date', 'Date'), ) # Create your models here. class DynamicTable(models.Model): table_name = models.CharField(_('Table Name'), max_length=255, unique=True) table_description = models.TextField(_('Table Description'), blank=True) date_created = models.DateTimeField(_('Date Created'), default=timezone.now) table_columns = models.ManyToManyField('TableColumn', blank=True) table_rows = models.ManyToManyField('TableRow', blank=True) class Meta: ordering = ('-date_created', ) def __str__(self) -> str: return f"{self.table_name}" def __total_table_rows(self) -> int: field = self.table_columns.first() if field and isinstance(field, TableColumn): return self.table_columns.all().count() else: # the table is empty return 0 def __total_table_columns(self) -> int: return self.table_columns.all().count() def table_info(self) -> dict[str, int]: description = { 'rows': self.__total_table_rows(), 'columns': self.__total_table_columns() } return description def is_empty(self) -> bool: table_info = self.table_info() rows = table_info['rows'] columns = table_info['columns'] return True if columns == 0 or rows == 0 else False def is_column(self, column_name: str) -> bool: if not isinstance(column_name, str): raise ValueError("column name must be a str") try: column = self.table_columns.get(column_name=column_name) return True except TableColumn.DoesNotExist: return False def get_supported_data_types(self) -> list[str]: return [data_type[0] for data_type in __SUPPORTED_DATA_TYPE_CHOICES__] def data_type_is_supported(self, data_type: str | list) -> bool | list[bool]: supported_data_types = self.get_supported_data_types() if isinstance(data_type, str): return data_type.lower().strip() in supported_data_types elif isinstance(data_type, (list, tuple, set)): return [_type.lower().strip() in supported_data_types for _type in data_type] else: raise ValueError('arg must be either a str or a sequence') def add_column(self, column_name: str, data_type: str): if isinstance(column_name, str) and isinstance(data_type, str): if not self.data_type_is_supported(data_type): raise UnSupportedDataType() if self.is_column(column_name):
""" Creating a Dynamic Table using conventional Django standard This Table gives you more control over it manipulation than Django models Developed by: Samuel Effiong Nkopuruk Email: senai.nkop@gmail.com """ __SUPPORTED_DATA_TYPE_CHOICES__ = ( ('char', 'Char'), ('int', 'Int'), ('float', 'Float'), ('bool', 'Bool'), ('textfield', 'TextField'), ('date', 'Date'), ) # Create your models here. class DynamicTable(models.Model): table_name = models.CharField(_('Table Name'), max_length=255, unique=True) table_description = models.TextField(_('Table Description'), blank=True) date_created = models.DateTimeField(_('Date Created'), default=timezone.now) table_columns = models.ManyToManyField('TableColumn', blank=True) table_rows = models.ManyToManyField('TableRow', blank=True) class Meta: ordering = ('-date_created', ) def __str__(self) -> str: return f"{self.table_name}" def __total_table_rows(self) -> int: field = self.table_columns.first() if field and isinstance(field, TableColumn): return self.table_columns.all().count() else: # the table is empty return 0 def __total_table_columns(self) -> int: return self.table_columns.all().count() def table_info(self) -> dict[str, int]: description = { 'rows': self.__total_table_rows(), 'columns': self.__total_table_columns() } return description def is_empty(self) -> bool: table_info = self.table_info() rows = table_info['rows'] columns = table_info['columns'] return True if columns == 0 or rows == 0 else False def is_column(self, column_name: str) -> bool: if not isinstance(column_name, str): raise ValueError("column name must be a str") try: column = self.table_columns.get(column_name=column_name) return True except TableColumn.DoesNotExist: return False def get_supported_data_types(self) -> list[str]: return [data_type[0] for data_type in __SUPPORTED_DATA_TYPE_CHOICES__] def data_type_is_supported(self, data_type: str | list) -> bool | list[bool]: supported_data_types = self.get_supported_data_types() if isinstance(data_type, str): return data_type.lower().strip() in supported_data_types elif isinstance(data_type, (list, tuple, set)): return [_type.lower().strip() in supported_data_types for _type in data_type] else: raise ValueError('arg must be either a str or a sequence') def add_column(self, column_name: str, data_type: str): if isinstance(column_name, str) and isinstance(data_type, str): if not self.data_type_is_supported(data_type): raise UnSupportedDataType() if self.is_column(column_name):
raise DuplicateColumnInTable()
4
2023-12-19 15:50:38+00:00
2k
gsamil/text-classification
recommender/train.py
[ { "identifier": "vocab", "path": "data.py", "snippet": "class ClassificationSample(BaseModel):\ndef preprocess_text(text: str) -> str:\ndef get_samples_from_file(file_path: str) -> list[ClassificationSample]:\ndef stratify_samples(\n samples: list[ClassificationSample], number_per_sample: int\n) -> l...
import torch import time import os from torch import nn from torch.utils.data import DataLoader from torch.optim.lr_scheduler import ExponentialLR from data import ( vocab, get_samples_from_file, stratify_samples, save_categories, load_categories, ) from model import TextClassifier, TrainingParameters, device, HyperParameters from recommender.dataset import ClassificationDataset from settings import CATEGORIES_PATH
1,057
# Set `train_file`, `test_file` and `model_dir` apropriately. # Set `negative_samples` to the number of negative samples you want to use. # run with `export PYTHONPATH=. && python recommender/train.py` in the main directory. train_file = "./data/train_cleaned.csv" test_file = "./data/test_cleaned.csv" model_dir = "./recommender/saved_model" if __name__ == "__main__": hparams = HyperParameters(
# Set `train_file`, `test_file` and `model_dir` apropriately. # Set `negative_samples` to the number of negative samples you want to use. # run with `export PYTHONPATH=. && python recommender/train.py` in the main directory. train_file = "./data/train_cleaned.csv" test_file = "./data/test_cleaned.csv" model_dir = "./recommender/saved_model" if __name__ == "__main__": hparams = HyperParameters(
vocab_size=len(vocab),
0
2023-12-17 11:37:37+00:00
2k
zhcui/polar_preview
polar/basis/trans_1e.py
[ { "identifier": "mdot", "path": "polar/utils/misc.py", "snippet": "def mdot(*args):\n \"\"\"\n Reduced matrix dot.\n \"\"\"\n return reduce(np.dot, args)" }, { "identifier": "kdot", "path": "polar/utils/misc.py", "snippet": "def kdot(a, b):\n \"\"\"\n Matrix dot with kp...
import numpy as np import scipy.linalg as la from polar.utils.misc import (mdot, kdot, get_spin_dim, add_spin_dim)
798
#!/usr/bin/env python """ Transform 1e quantities. Authors: Zhi-Hao Cui Tianyu Zhu Shunyue Yuan """ # ***************************************************************************** # Transform functions AO -> LO and LO -> AO # for h1 and rdm1 # ***************************************************************************** def trans_h1_to_lo(h_ao_ao, C_ao_lo): r""" Transform h1 to lo basis, with kpts. h^{LO} = C^{\dagger} h^{AO} C """ h_ao_ao = np.asarray(h_ao_ao) C_ao_lo = np.asarray(C_ao_lo) nkpts = C_ao_lo.shape[-3] nlo = C_ao_lo.shape[-1] res_type = np.result_type(h_ao_ao.dtype, C_ao_lo.dtype) # treat the special case where h is 0 or [0, 0] if h_ao_ao.ndim == 0: # scalar return np.ones((nkpts, nlo, nlo), dtype=res_type) * h_ao_ao elif h_ao_ao.ndim == 1: # [0, 0] spin = len(h_ao_ao) h_lo_lo = np.ones((spin, nkpts, nlo, nlo), dtype=res_type) for s in range(spin): h_lo_lo[s] *= h_ao_ao[s] return h_lo_lo if C_ao_lo.ndim == 3 and h_ao_ao.ndim == 3: h_lo_lo = np.zeros((nkpts, nlo, nlo), dtype=res_type) for k in range(nkpts):
#!/usr/bin/env python """ Transform 1e quantities. Authors: Zhi-Hao Cui Tianyu Zhu Shunyue Yuan """ # ***************************************************************************** # Transform functions AO -> LO and LO -> AO # for h1 and rdm1 # ***************************************************************************** def trans_h1_to_lo(h_ao_ao, C_ao_lo): r""" Transform h1 to lo basis, with kpts. h^{LO} = C^{\dagger} h^{AO} C """ h_ao_ao = np.asarray(h_ao_ao) C_ao_lo = np.asarray(C_ao_lo) nkpts = C_ao_lo.shape[-3] nlo = C_ao_lo.shape[-1] res_type = np.result_type(h_ao_ao.dtype, C_ao_lo.dtype) # treat the special case where h is 0 or [0, 0] if h_ao_ao.ndim == 0: # scalar return np.ones((nkpts, nlo, nlo), dtype=res_type) * h_ao_ao elif h_ao_ao.ndim == 1: # [0, 0] spin = len(h_ao_ao) h_lo_lo = np.ones((spin, nkpts, nlo, nlo), dtype=res_type) for s in range(spin): h_lo_lo[s] *= h_ao_ao[s] return h_lo_lo if C_ao_lo.ndim == 3 and h_ao_ao.ndim == 3: h_lo_lo = np.zeros((nkpts, nlo, nlo), dtype=res_type) for k in range(nkpts):
h_lo_lo[k] = mdot(C_ao_lo[k].conj().T, h_ao_ao[k], C_ao_lo[k])
0
2023-12-18 07:39:51+00:00
2k