repo_name stringlengths 7 71 | file_path stringlengths 5 118 | context list | import_statement stringlengths 45 12.5k | token_num int64 641 99.4k | cropped_code stringlengths 44 17k | all_code stringlengths 43 754k | next_line stringlengths 2 330 | gold_snippet_index int64 0 68 | created_at stringlengths 25 25 | level stringclasses 9 values |
|---|---|---|---|---|---|---|---|---|---|---|
kkchara/get-beastsaber-score | plug/plugins/beatsaber/fun/main.py | [
{
"identifier": "make_pic",
"path": "plug/plugins/beatsaber/fun/make_pic.py",
"snippet": "def make_pic(account, up=\"get-beastsaber-score/plugins/beatsaber/\"):\n # 账户\n acc = str(account)\n\n # 获取用户信息\n with open(f\"{up}data/info/user.json\", \"r\", encoding=\"utf-8\") as f:\n user_d... | from plug.plugins.beatsaber.fun import make_pic
from plug.plugins.beatsaber.fun import get_info | 2,348 |
acc = input("your SS account:")
get_info.get_json(acc)
# 提取hash列表
hash_list = get_info.get_hash()
# 获取key
get_info.get_key(hash_list)
# 处理用户头像
get_info.get_avatar(acc)
# 处理歌曲封面
get_info.get_cover(hash_list)
# 生成图片
|
acc = input("your SS account:")
get_info.get_json(acc)
# 提取hash列表
hash_list = get_info.get_hash()
# 获取key
get_info.get_key(hash_list)
# 处理用户头像
get_info.get_avatar(acc)
# 处理歌曲封面
get_info.get_cover(hash_list)
# 生成图片 | make_pic.make_pic(acc) | 0 | 2023-12-05 09:36:30+00:00 | 4k |
lbcb-sci/GNNome | graph_dataset.py | [
{
"identifier": "get_config",
"path": "config.py",
"snippet": "def get_config():\n return {\n 'checkpoints_path': 'checkpoints',\n 'models_path': 'models',\n \n 'tool_dir': 'vendor',\n 'raven_dir': 'vendor/raven-1.8.1',\n 'hifiasm_dir': 'vendor/hifiasm-0.18.8... | import re
import os
import pickle
import subprocess
import dgl
import graph_parser
from dgl.data import DGLDataset
from config import get_config
from utils import preprocess_graph, add_positional_encoding, extract_contigs | 2,259 |
class AssemblyGraphDataset(DGLDataset):
def __init__(self, root, assembler, threads=32, generate=False):
self.root = os.path.abspath(root)
self.assembler = assembler
self.threads = threads
self.assembly_dir = os.path.join(self.root, self.assembler)
# print(self.assembly_dir)
if 'raw' not in os.listdir(self.root):
subprocess.run(f"mkdir 'raw'", shell=True, cwd=self.root)
if 'output' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'output'", shell=True, cwd=self.assembly_dir)
if f'processed' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'processed'", shell=True, cwd=self.assembly_dir)
if f'info' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'info'", shell=True, cwd=self.assembly_dir)
raw_dir = os.path.join(self.root, 'raw')
save_dir = os.path.join(self.assembly_dir, f'processed')
self.output_dir = os.path.join(self.assembly_dir, f'output')
self.info_dir = os.path.join(self.assembly_dir, f'info')
config = get_config()
raven_dir = config['raven_dir']
self.raven_path = os.path.join(raven_dir, f'build/bin/raven')
self.raven_path = os.path.abspath(self.raven_path)
hifiasm_dir = config['hifiasm_dir']
self.hifiasm_path = os.path.join(hifiasm_dir, f'hifiasm')
self.hifiasm_path = os.path.abspath(self.hifiasm_path)
super().__init__(name='assembly_graphs', raw_dir=raw_dir, save_dir=save_dir)
self.graph_list = []
if not generate:
for file in os.listdir(self.save_dir):
idx = int(file[:-4])
graph = dgl.load_graphs(os.path.join(self.save_dir, file))[0][0]
graph = preprocess_graph(graph, self.root, idx)
graph = add_positional_encoding(graph)
print(f'DGL graph idx={idx} info:\n',graph)
self.graph_list.append((idx, graph))
self.graph_list.sort(key=lambda x: x[0])
def has_cache(self):
"""Check if the raw data is already processed and stored."""
raw_files = {int(re.findall(r'(\d+).fast*', raw)[0]) for raw in os.listdir(self.raw_dir)}
prc_files = {int(re.findall(r'(\d+).dgl', prc)[0]) for prc in os.listdir(self.save_dir)}
return len(raw_files - prc_files) == 0 # set difference
def __len__(self):
return len(os.listdir(self.save_dir))
def __getitem__(self, idx):
i, graph = self.graph_list[idx]
return i, graph
def process(self):
pass
class AssemblyGraphDataset_HiFi(AssemblyGraphDataset):
def __init__(self, root, assembler='hifiasm', threads=32, generate=False):
super().__init__(root=root, assembler=assembler, threads=threads, generate=generate)
def process(self):
"""Process the raw data and save it on the disk."""
assembler = 'hifiasm'
assert assembler in ('raven', 'hifiasm'), 'Choose either "raven" or "hifiasm" assembler'
graphia_dir = os.path.join(self.assembly_dir, 'graphia')
if not os.path.isdir(graphia_dir):
os.mkdir(graphia_dir)
raw_files = {int(re.findall(r'(\d+).fast*', raw)[0]) for raw in os.listdir(self.raw_dir)}
prc_files = {int(re.findall(r'(\d+).dgl', prc)[0]) for prc in os.listdir(self.save_dir)}
diff = raw_files - prc_files
for cnt, idx in enumerate(diff):
fastq = f'{idx}.fasta'
if fastq not in os.listdir(self.raw_dir):
fastq = f'{idx}.fastq'
print(f'\nStep {cnt}: generating graphs for reads in {fastq}')
reads_path = os.path.abspath(os.path.join(self.raw_dir, fastq))
print(f'Path to the reads: {reads_path}')
print(f'Using assembler: {assembler}\n')
# Raven
if assembler == 'raven':
subprocess.run(f'{self.raven_path} --disable-checkpoints --identity 0.99 -k29 -w9 -t{self.threads} -p0 {reads_path} > {idx}_assembly.fasta', shell=True, cwd=self.output_dir)
subprocess.run(f'mv graph_1.gfa {idx}_graph_1.gfa', shell=True, cwd=self.output_dir)
gfa_path = os.path.join(self.output_dir, f'{idx}_graph_1.gfa')
# Hifiasm
elif assembler == 'hifiasm':
subprocess.run(f'{self.hifiasm_path} --prt-raw -o {idx}_asm -t{self.threads} -l0 {reads_path}', shell=True, cwd=self.output_dir)
subprocess.run(f'mv {idx}_asm.bp.raw.r_utg.gfa {idx}_graph_1.gfa', shell=True, cwd=self.output_dir)
gfa_path = os.path.join(self.output_dir, f'{idx}_graph_1.gfa')
|
class AssemblyGraphDataset(DGLDataset):
def __init__(self, root, assembler, threads=32, generate=False):
self.root = os.path.abspath(root)
self.assembler = assembler
self.threads = threads
self.assembly_dir = os.path.join(self.root, self.assembler)
# print(self.assembly_dir)
if 'raw' not in os.listdir(self.root):
subprocess.run(f"mkdir 'raw'", shell=True, cwd=self.root)
if 'output' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'output'", shell=True, cwd=self.assembly_dir)
if f'processed' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'processed'", shell=True, cwd=self.assembly_dir)
if f'info' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'info'", shell=True, cwd=self.assembly_dir)
raw_dir = os.path.join(self.root, 'raw')
save_dir = os.path.join(self.assembly_dir, f'processed')
self.output_dir = os.path.join(self.assembly_dir, f'output')
self.info_dir = os.path.join(self.assembly_dir, f'info')
config = get_config()
raven_dir = config['raven_dir']
self.raven_path = os.path.join(raven_dir, f'build/bin/raven')
self.raven_path = os.path.abspath(self.raven_path)
hifiasm_dir = config['hifiasm_dir']
self.hifiasm_path = os.path.join(hifiasm_dir, f'hifiasm')
self.hifiasm_path = os.path.abspath(self.hifiasm_path)
super().__init__(name='assembly_graphs', raw_dir=raw_dir, save_dir=save_dir)
self.graph_list = []
if not generate:
for file in os.listdir(self.save_dir):
idx = int(file[:-4])
graph = dgl.load_graphs(os.path.join(self.save_dir, file))[0][0]
graph = preprocess_graph(graph, self.root, idx)
graph = add_positional_encoding(graph)
print(f'DGL graph idx={idx} info:\n',graph)
self.graph_list.append((idx, graph))
self.graph_list.sort(key=lambda x: x[0])
def has_cache(self):
"""Check if the raw data is already processed and stored."""
raw_files = {int(re.findall(r'(\d+).fast*', raw)[0]) for raw in os.listdir(self.raw_dir)}
prc_files = {int(re.findall(r'(\d+).dgl', prc)[0]) for prc in os.listdir(self.save_dir)}
return len(raw_files - prc_files) == 0 # set difference
def __len__(self):
return len(os.listdir(self.save_dir))
def __getitem__(self, idx):
i, graph = self.graph_list[idx]
return i, graph
def process(self):
pass
class AssemblyGraphDataset_HiFi(AssemblyGraphDataset):
def __init__(self, root, assembler='hifiasm', threads=32, generate=False):
super().__init__(root=root, assembler=assembler, threads=threads, generate=generate)
def process(self):
"""Process the raw data and save it on the disk."""
assembler = 'hifiasm'
assert assembler in ('raven', 'hifiasm'), 'Choose either "raven" or "hifiasm" assembler'
graphia_dir = os.path.join(self.assembly_dir, 'graphia')
if not os.path.isdir(graphia_dir):
os.mkdir(graphia_dir)
raw_files = {int(re.findall(r'(\d+).fast*', raw)[0]) for raw in os.listdir(self.raw_dir)}
prc_files = {int(re.findall(r'(\d+).dgl', prc)[0]) for prc in os.listdir(self.save_dir)}
diff = raw_files - prc_files
for cnt, idx in enumerate(diff):
fastq = f'{idx}.fasta'
if fastq not in os.listdir(self.raw_dir):
fastq = f'{idx}.fastq'
print(f'\nStep {cnt}: generating graphs for reads in {fastq}')
reads_path = os.path.abspath(os.path.join(self.raw_dir, fastq))
print(f'Path to the reads: {reads_path}')
print(f'Using assembler: {assembler}\n')
# Raven
if assembler == 'raven':
subprocess.run(f'{self.raven_path} --disable-checkpoints --identity 0.99 -k29 -w9 -t{self.threads} -p0 {reads_path} > {idx}_assembly.fasta', shell=True, cwd=self.output_dir)
subprocess.run(f'mv graph_1.gfa {idx}_graph_1.gfa', shell=True, cwd=self.output_dir)
gfa_path = os.path.join(self.output_dir, f'{idx}_graph_1.gfa')
# Hifiasm
elif assembler == 'hifiasm':
subprocess.run(f'{self.hifiasm_path} --prt-raw -o {idx}_asm -t{self.threads} -l0 {reads_path}', shell=True, cwd=self.output_dir)
subprocess.run(f'mv {idx}_asm.bp.raw.r_utg.gfa {idx}_graph_1.gfa', shell=True, cwd=self.output_dir)
gfa_path = os.path.join(self.output_dir, f'{idx}_graph_1.gfa') | extract_contigs(self.output_dir, idx) | 3 | 2023-12-08 04:45:45+00:00 | 4k |
cubesat-lab/cc1101-spi-analyzer | HighLevelAnalyzer.py | [
{
"identifier": "CC1101SpiProtocol",
"path": "CC1101SpiProtocol.py",
"snippet": "class CC1101SpiProtocol:\n PROTOCOL_MSG = {\n \"request\": None,\n \"response\": None,\n }\n REQUEST = {\n \"type\": None,\n \"access\": None,\n \"burst\": None,\n \"regist... | from saleae.analyzers import HighLevelAnalyzer, AnalyzerFrame, StringSetting, NumberSetting, ChoicesSetting
from copy import deepcopy
from CC1101SpiProtocol import CC1101SpiProtocol, ProtocolFrameType, MARC_STATE | 2,259 | # High Level Analyzer
# For more information and documentation, please go to https://support.saleae.com/extensions/high-level-analyzer-extensions
SPI_DATA_FRAME = {"mosi": 0, "miso": 0}
class SpiFrameType:
error = "error"
enable = "enable"
disable = "disable"
result = "result"
class SpiFrameState:
idle = 0
start = 1
active = 2
end = 3
error = 4
# High level analyzers must subclass the HighLevelAnalyzer class.
class Hla(HighLevelAnalyzer):
# List of settings that a user can set for this High Level Analyzer.
# TODO Check the String/Number/Choice settings
# my_string_setting = StringSetting()
# my_number_setting = NumberSetting(min_value=0, max_value=100)
# my_choices_setting = ChoicesSetting(choices=('A', 'B'))
# An optional list of types this analyzer produces, providing a way to customize the way frames are displayed in Logic 2.
result_types = {
'spi error': {
'format': 'Error: {{type}}'
},
ProtocolFrameType.ERROR: {
'format': 'Error: {{type}} | {{data.error_details}}'
},
ProtocolFrameType.REGISTER: {
'format': 'Register: {{data.access}} | {{data.register}} = {{data.focus_data}}'
},
ProtocolFrameType.COMMAND: {
'format': 'Command: {{data.register}}'
},
ProtocolFrameType.STATUS: {
'format': 'Status: {{data.register}} = {{data.focus_data}}'
},
ProtocolFrameType.PA_TABLE: {
'format': 'PA Table: {{data.access}} = {{data.focus_data}}'
},
ProtocolFrameType.FIFO: {
'format': 'FIFO: {{data.access}} = {{data.focus_data}}'
},
}
def __init__(self):
'''
Initialize HLA.
Settings can be accessed using the same name used above.
'''
self.state = SpiFrameState.idle
self.spi_frame_queue = []
| # High Level Analyzer
# For more information and documentation, please go to https://support.saleae.com/extensions/high-level-analyzer-extensions
SPI_DATA_FRAME = {"mosi": 0, "miso": 0}
class SpiFrameType:
error = "error"
enable = "enable"
disable = "disable"
result = "result"
class SpiFrameState:
idle = 0
start = 1
active = 2
end = 3
error = 4
# High level analyzers must subclass the HighLevelAnalyzer class.
class Hla(HighLevelAnalyzer):
# List of settings that a user can set for this High Level Analyzer.
# TODO Check the String/Number/Choice settings
# my_string_setting = StringSetting()
# my_number_setting = NumberSetting(min_value=0, max_value=100)
# my_choices_setting = ChoicesSetting(choices=('A', 'B'))
# An optional list of types this analyzer produces, providing a way to customize the way frames are displayed in Logic 2.
result_types = {
'spi error': {
'format': 'Error: {{type}}'
},
ProtocolFrameType.ERROR: {
'format': 'Error: {{type}} | {{data.error_details}}'
},
ProtocolFrameType.REGISTER: {
'format': 'Register: {{data.access}} | {{data.register}} = {{data.focus_data}}'
},
ProtocolFrameType.COMMAND: {
'format': 'Command: {{data.register}}'
},
ProtocolFrameType.STATUS: {
'format': 'Status: {{data.register}} = {{data.focus_data}}'
},
ProtocolFrameType.PA_TABLE: {
'format': 'PA Table: {{data.access}} = {{data.focus_data}}'
},
ProtocolFrameType.FIFO: {
'format': 'FIFO: {{data.access}} = {{data.focus_data}}'
},
}
def __init__(self):
'''
Initialize HLA.
Settings can be accessed using the same name used above.
'''
self.state = SpiFrameState.idle
self.spi_frame_queue = [] | self.protocol = CC1101SpiProtocol() | 0 | 2023-12-10 22:55:07+00:00 | 4k |
Deltares/imod-python | imod/tests/test_mf6/test_utilities/test_schemata_utilities.py | [
{
"identifier": "River",
"path": "imod/mf6/riv.py",
"snippet": "class River(BoundaryCondition):\n \"\"\"\n River package.\n Any number of RIV Packages can be specified for a single groundwater flow\n model.\n https://water.usgs.gov/water-resources/software/MODFLOW-6/mf6io_6.0.4.pdf#page=7... | from pytest_cases import parametrize_with_cases
from imod.mf6.riv import River
from imod.mf6.utilities.schemata import filter_schemata_dict
from imod.schemata import AllNoDataSchema, IdentityNoDataSchema, IndexesSchema | 2,864 |
class CasesFilteredSchemata:
def case_empty(self):
schemata = {}
arg = (AllNoDataSchema,)
expected = {}
return schemata, arg, expected
def case_river_allnodata(self):
schemata = River._write_schemata
arg = (AllNoDataSchema,)
expected = {"stage": [AllNoDataSchema()]}
return schemata, arg, expected
def case_river_allnodata_identitynodataschema(self):
schemata = River._write_schemata
arg = (AllNoDataSchema, IdentityNoDataSchema)
expected = {
"stage": [AllNoDataSchema()],
"conductance": [IdentityNoDataSchema("stage")],
"bottom_elevation": [IdentityNoDataSchema("stage")],
"concentration": [IdentityNoDataSchema("stage")],
}
return schemata, arg, expected
def case_river_not_found(self):
# IndexesSchema part of _init_schemata, so should not be in
# _write_schemata.
schemata = River._write_schemata
arg = (IndexesSchema,)
expected = {}
return schemata, arg, expected
@parametrize_with_cases(("schemata", "arg", "expected"), cases=CasesFilteredSchemata)
def test_filter_schemata_dict(schemata, arg, expected):
# Act
|
class CasesFilteredSchemata:
def case_empty(self):
schemata = {}
arg = (AllNoDataSchema,)
expected = {}
return schemata, arg, expected
def case_river_allnodata(self):
schemata = River._write_schemata
arg = (AllNoDataSchema,)
expected = {"stage": [AllNoDataSchema()]}
return schemata, arg, expected
def case_river_allnodata_identitynodataschema(self):
schemata = River._write_schemata
arg = (AllNoDataSchema, IdentityNoDataSchema)
expected = {
"stage": [AllNoDataSchema()],
"conductance": [IdentityNoDataSchema("stage")],
"bottom_elevation": [IdentityNoDataSchema("stage")],
"concentration": [IdentityNoDataSchema("stage")],
}
return schemata, arg, expected
def case_river_not_found(self):
# IndexesSchema part of _init_schemata, so should not be in
# _write_schemata.
schemata = River._write_schemata
arg = (IndexesSchema,)
expected = {}
return schemata, arg, expected
@parametrize_with_cases(("schemata", "arg", "expected"), cases=CasesFilteredSchemata)
def test_filter_schemata_dict(schemata, arg, expected):
# Act | filtered_dict = filter_schemata_dict(schemata, arg) | 1 | 2023-12-08 13:57:59+00:00 | 4k |
YoungJeansKR/Llama2-Ko-Chatbot | llama/generation.py | [
{
"identifier": "ModelArgs",
"path": "llama/model.py",
"snippet": "class ModelArgs:\n dim: int = 4096\n n_layers: int = 32\n n_heads: int = 32\n n_kv_heads: Optional[int] = None\n vocab_size: int = -1 # defined later by tokenizer\n multiple_of: int = 256 # make SwiGLU hidden layer si... | import json
import os
import sys
import time
import torch
import torch.nn.functional as F
from pathlib import Path
from typing import List, Literal, Optional, Tuple, TypedDict
from fairscale.nn.model_parallel.initialize import (
get_model_parallel_rank,
initialize_model_parallel,
model_parallel_is_initialized,
)
from llama.model import ModelArgs, Transformer
from llama.tokenizer import Tokenizer | 2,317 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
Role = Literal["system", "user", "assistant"]
class Message(TypedDict):
role: Role
content: str
class CompletionPrediction(TypedDict, total=False):
generation: str
tokens: List[str] # not required
logprobs: List[float] # not required
class ChatPrediction(TypedDict, total=False):
generation: Message
tokens: List[str] # not required
logprobs: List[float] # not required
Dialog = List[Message]
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
SPECIAL_TAGS = [B_INST, E_INST, "<<SYS>>", "<</SYS>>"]
UNSAFE_ERROR = "Error: special tags are not allowed as part of the prompt."
class Llama:
@staticmethod
def build(
ckpt_dir: str,
tokenizer_path: str,
max_seq_len: int,
max_batch_size: int,
model_parallel_size: Optional[int] = None,
seed: int = 1,
) -> "Llama":
"""
Build a Llama instance by initializing and loading a pre-trained model.
Args:
ckpt_dir (str): Path to the directory containing checkpoint files.
tokenizer_path (str): Path to the tokenizer file.
max_seq_len (int): Maximum sequence length for input text.
max_batch_size (int): Maximum batch size for inference.
model_parallel_size (Optional[int], optional): Number of model parallel processes.
If not provided, it's determined from the environment. Defaults to None.
Returns:
Llama: An instance of the Llama class with the loaded model and tokenizer.
Raises:
AssertionError: If there are no checkpoint files in the specified directory,
or if the model parallel size does not match the number of checkpoint files.
Note:
This method initializes the distributed process group, sets the device to CUDA,
and loads the pre-trained model and tokenizer.
"""
if not torch.distributed.is_initialized():
torch.distributed.init_process_group("nccl")
if not model_parallel_is_initialized():
if model_parallel_size is None:
model_parallel_size = int(os.environ.get("WORLD_SIZE", 1))
initialize_model_parallel(model_parallel_size)
local_rank = int(os.environ.get("LOCAL_RANK", 0))
torch.cuda.set_device(local_rank)
# seed must be the same in all processes
torch.manual_seed(seed)
if local_rank > 0:
sys.stdout = open(os.devnull, "w")
start_time = time.time()
checkpoints = sorted(Path(ckpt_dir).glob("*.pth"))
assert len(checkpoints) > 0, f"no checkpoint files found in {ckpt_dir}"
assert model_parallel_size == len(
checkpoints
), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {model_parallel_size}"
ckpt_path = checkpoints[get_model_parallel_rank()]
checkpoint = torch.load(ckpt_path, map_location="cpu")
with open(Path(ckpt_dir) / "params.json", "r") as f:
params = json.loads(f.read())
model_args: ModelArgs = ModelArgs(
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
**params,
)
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
Role = Literal["system", "user", "assistant"]
class Message(TypedDict):
role: Role
content: str
class CompletionPrediction(TypedDict, total=False):
generation: str
tokens: List[str] # not required
logprobs: List[float] # not required
class ChatPrediction(TypedDict, total=False):
generation: Message
tokens: List[str] # not required
logprobs: List[float] # not required
Dialog = List[Message]
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
SPECIAL_TAGS = [B_INST, E_INST, "<<SYS>>", "<</SYS>>"]
UNSAFE_ERROR = "Error: special tags are not allowed as part of the prompt."
class Llama:
@staticmethod
def build(
ckpt_dir: str,
tokenizer_path: str,
max_seq_len: int,
max_batch_size: int,
model_parallel_size: Optional[int] = None,
seed: int = 1,
) -> "Llama":
"""
Build a Llama instance by initializing and loading a pre-trained model.
Args:
ckpt_dir (str): Path to the directory containing checkpoint files.
tokenizer_path (str): Path to the tokenizer file.
max_seq_len (int): Maximum sequence length for input text.
max_batch_size (int): Maximum batch size for inference.
model_parallel_size (Optional[int], optional): Number of model parallel processes.
If not provided, it's determined from the environment. Defaults to None.
Returns:
Llama: An instance of the Llama class with the loaded model and tokenizer.
Raises:
AssertionError: If there are no checkpoint files in the specified directory,
or if the model parallel size does not match the number of checkpoint files.
Note:
This method initializes the distributed process group, sets the device to CUDA,
and loads the pre-trained model and tokenizer.
"""
if not torch.distributed.is_initialized():
torch.distributed.init_process_group("nccl")
if not model_parallel_is_initialized():
if model_parallel_size is None:
model_parallel_size = int(os.environ.get("WORLD_SIZE", 1))
initialize_model_parallel(model_parallel_size)
local_rank = int(os.environ.get("LOCAL_RANK", 0))
torch.cuda.set_device(local_rank)
# seed must be the same in all processes
torch.manual_seed(seed)
if local_rank > 0:
sys.stdout = open(os.devnull, "w")
start_time = time.time()
checkpoints = sorted(Path(ckpt_dir).glob("*.pth"))
assert len(checkpoints) > 0, f"no checkpoint files found in {ckpt_dir}"
assert model_parallel_size == len(
checkpoints
), f"Loading a checkpoint for MP={len(checkpoints)} but world size is {model_parallel_size}"
ckpt_path = checkpoints[get_model_parallel_rank()]
checkpoint = torch.load(ckpt_path, map_location="cpu")
with open(Path(ckpt_dir) / "params.json", "r") as f:
params = json.loads(f.read())
model_args: ModelArgs = ModelArgs(
max_seq_len=max_seq_len,
max_batch_size=max_batch_size,
**params,
) | tokenizer = Tokenizer(model_path=tokenizer_path) | 2 | 2023-12-08 09:37:42+00:00 | 4k |
Dong142857/Live3DPortrait | models/eg3d/networks_stylegan2.py | [
{
"identifier": "misc",
"path": "torch_utils/misc.py",
"snippet": "def constant(value, shape=None, dtype=None, device=None, memory_format=None):\n def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin\ndef suppress_tracer_warnings():\ndef assert_sh... | import numpy as np
import torch
from torch_utils import misc
from torch_utils import persistence
from torch_utils.ops import conv2d_resample
from torch_utils.ops import upfirdn2d
from torch_utils.ops import bias_act
from torch_utils.ops import fma | 3,518 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Network architectures from the paper
"Analyzing and Improving the Image Quality of StyleGAN".
Matches the original implementation of configs E-F by Karras et al. at
https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py"""
#----------------------------------------------------------------------------
| # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Network architectures from the paper
"Analyzing and Improving the Image Quality of StyleGAN".
Matches the original implementation of configs E-F by Karras et al. at
https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py"""
#----------------------------------------------------------------------------
| @misc.profiled_function | 0 | 2023-12-09 15:18:53+00:00 | 4k |
blaise-tk/RVC_CLI | rvc/train/preprocess/preprocess.py | [
{
"identifier": "load_audio",
"path": "rvc/lib/utils.py",
"snippet": "def load_audio(file, sampling_rate):\n try:\n file = file.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n out, _ = (\n ffmpeg.input(file, threads=0)\n .output(\"-\", format=\"f... | from multiprocessing import cpu_count
from scipy import signal
from scipy.io import wavfile
from rvc.lib.utils import load_audio
from rvc.train.slicer import Slicer
import os
import sys
import librosa
import numpy as np
import multiprocessing | 1,634 |
now_directory = os.getcwd()
sys.path.append(now_directory)
experiment_directory = sys.argv[1]
input_root = sys.argv[2]
sampling_rate = int(sys.argv[3])
percentage = float(sys.argv[4])
num_processes = cpu_count()
no_parallel = "True"
class PreProcess:
def __init__(self, sr, exp_dir, per=3.0):
|
now_directory = os.getcwd()
sys.path.append(now_directory)
experiment_directory = sys.argv[1]
input_root = sys.argv[2]
sampling_rate = int(sys.argv[3])
percentage = float(sys.argv[4])
num_processes = cpu_count()
no_parallel = "True"
class PreProcess:
def __init__(self, sr, exp_dir, per=3.0): | self.slicer = Slicer( | 1 | 2023-12-10 21:09:41+00:00 | 4k |
lumi-ua/goit-project2-django-assistant | personal_assistant/app_contacts/views.py | [
{
"identifier": "ContactForm",
"path": "personal_assistant/app_contacts/forms.py",
"snippet": "class ContactForm(ModelForm):\n fullname = CharField(max_length=255, \n widget=forms.TextInput(attrs={'placeholder': 'Name Lastname', \"class\": \"form-control\"}))\n address = CharField(max_lengt... | from datetime import date
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.db.models import Q
from django.urls import reverse_lazy
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator
from datetime import date, timedelta
from .forms import ContactForm, PhoneNumberForm, EmailAddressForm
from .models import Contact, PhoneNumber, EmailAddress | 2,058 | email_address_form = EmailAddressForm()
return render(request, 'app_contacts/add_email_address.html',
{
'title': 'Email address',
'email_address_form': email_address_form,
'email_adress_add_url': email_adress_add_url
}
)
def upcoming_birthdays(request):
max_days = 10
today = date.today()
days_in_future = int(request.GET.get("days", 7))
if days_in_future > max_days : days_in_future=max_days
future_date = today + timedelta(days=days_in_future)
print(future_date)
contacts = Contact.objects.filter(
Q(birthday__month__gte=today.month, birthday__day__gte=today.day, birthday__year__lte=today.year) |
Q(birthday__month__gte=future_date.month, birthday__day__lte=future_date.day, birthday__year__lte=future_date.year),
user=request.user
)
if not contacts.exists():
return render(request, "app_contacts/upcoming_birthdays.html",
{
"title": "Upcoming birthdays list",
"message": "No upcoming birthdays.",
"max_days": str(max_days),
"days_in_future": str(days_in_future)
}
)
return render(request, "app_contacts/upcoming_birthdays.html",
{
"title": "Upcoming birthdays list",
"contacts": contacts,
"max_days": str(max_days),
"days_in_future": str(days_in_future)
}
)
@login_required
def search_contacts(request):
query = request.GET.get("query", "")
error_message = ""
contacts = None
try:
user_contacts = Contact.objects.filter(user=request.user)
contacts = user_contacts.filter(
Q(fullname__icontains=query)
| Q(phone_numbers__phone_number__icontains=query)
| Q(email_addresses__email__icontains=query)
).distinct()
except Contact.DoesNotExist:
contact=[]
error_message = "Contact not found"
return render(request, "app_contacts/search_contacts.html",
{
"title": "Searching contacts",
"contacts": contacts,
"error_message": error_message
})
@login_required
def edit_contact(request, pk):
contact = get_object_or_404(Contact, pk=pk)
if request.method == "POST":
form = ContactForm(request.POST, instance=contact)
if form.is_valid():
form.save()
return redirect(to="app_contacts:detail", pk=pk)
else:
form = ContactForm(instance=contact)
return render(request, "app_contacts/edit_contact.html", {
"title": "Editing contact",
"form": form,
"contact": contact
})
@login_required
def delete_contact(request, pk):
contact = get_object_or_404(Contact, pk=pk)
if request.method == "POST":
contact.delete()
messages.success(request, "Contact successfully deleted")
return redirect(to="app_contacts:dashboard")
else:
return render(request, "app_contacts/delete_contact.html",
{
"title": "Deleting contact",
"contact": contact,
"user": request.user
}
)
def delete_email(request, pk):
try:
email = EmailAddress.objects.get(pk=pk)
email.delete()
except ObjectDoesNotExist:
email = None
return detail(request, pk)
def delete_phone(request, pk):
try:
| # from django.db.models import Q
# Create your views here.
@login_required
def dashboard(request):
return render(request, 'app_contacts/dashboard.html', {"title": "Dashboard contact operations"})
@login_required
def contact(request):
contact_form = ContactForm()
phone_number_form = PhoneNumberForm()
email_address_form = EmailAddressForm()
if request.method == "POST":
contact_form = ContactForm(request.POST)
phone_number_form = PhoneNumberForm(request.POST)
email_address_form = EmailAddressForm(request.POST)
if contact_form.is_valid() and phone_number_form.is_valid() and email_address_form.is_valid():
new_contact = contact_form.save(commit=False)
new_contact.user = request.user
new_contact.save()
phone_number = phone_number_form.save(commit=False)
phone_number.contact = new_contact
phone_number.save()
email_address_data = email_address_form.cleaned_data
if email_address_data.get("email"):
email_address = email_address_form.save(commit=False)
email_address.contact = new_contact
email_address.save()
return redirect(to="app_contacts:dashboard")
return render(
request,
"app_contacts/contact.html",
{
"title": "Creation new contact",
"contact_form": contact_form,
"phone_number_form": phone_number_form,
"email_address_form": email_address_form,
}
)
@login_required
def contacts(request, page=1):
per_page = 10
contacts = Contact.objects.filter(user=request.user)
paginator = Paginator(list(contacts), per_page)
contacts_on_page = paginator.page(page)
return render(request, "app_contacts/all_contacts.html",
{
"title": "Contacts list",
"contacts": contacts_on_page
}
)
@login_required
def detail(request, pk):
contact = get_object_or_404(Contact, pk=pk)
return render(request, 'app_contacts/detail.html',
{
"title": "Contact details",
"contact": contact
}
)
@login_required
def add_phone_number(request, pk):
contact = Contact.objects.get(pk=pk)
phone_number_add_url = reverse_lazy('app_contacts:add_phone_number', kwargs={'pk': pk})
if request.method == 'POST':
phone_number_form = PhoneNumberForm(request.POST)
if phone_number_form.is_valid():
new_phone_number = phone_number_form.save()
new_phone_number.contact = contact
new_phone_number.save()
return redirect(to="app_contacts:detail", pk=pk)
else:
phone_number_form = PhoneNumberForm()
return render(request, 'app_contacts/add_phone_number.html',
{
'title': "Adding Phone-number",
'phone_number_form': phone_number_form,
'phone_number_add_url': phone_number_add_url,
}
)
@login_required
def add_email_address(request, pk):
contact = Contact.objects.get(pk=pk)
email_adress_add_url = reverse_lazy('app_contacts:add_email_address', kwargs={'pk': pk})
if request.method == 'POST':
email_address_form = EmailAddressForm(request.POST)
if email_address_form.is_valid():
new_email_address = email_address_form.save()
new_email_address.contact = contact
new_email_address.save()
return redirect(to="app_contacts:detail", pk=pk)
else:
email_address_form = EmailAddressForm()
return render(request, 'app_contacts/add_email_address.html',
{
'title': 'Email address',
'email_address_form': email_address_form,
'email_adress_add_url': email_adress_add_url
}
)
def upcoming_birthdays(request):
max_days = 10
today = date.today()
days_in_future = int(request.GET.get("days", 7))
if days_in_future > max_days : days_in_future=max_days
future_date = today + timedelta(days=days_in_future)
print(future_date)
contacts = Contact.objects.filter(
Q(birthday__month__gte=today.month, birthday__day__gte=today.day, birthday__year__lte=today.year) |
Q(birthday__month__gte=future_date.month, birthday__day__lte=future_date.day, birthday__year__lte=future_date.year),
user=request.user
)
if not contacts.exists():
return render(request, "app_contacts/upcoming_birthdays.html",
{
"title": "Upcoming birthdays list",
"message": "No upcoming birthdays.",
"max_days": str(max_days),
"days_in_future": str(days_in_future)
}
)
return render(request, "app_contacts/upcoming_birthdays.html",
{
"title": "Upcoming birthdays list",
"contacts": contacts,
"max_days": str(max_days),
"days_in_future": str(days_in_future)
}
)
@login_required
def search_contacts(request):
query = request.GET.get("query", "")
error_message = ""
contacts = None
try:
user_contacts = Contact.objects.filter(user=request.user)
contacts = user_contacts.filter(
Q(fullname__icontains=query)
| Q(phone_numbers__phone_number__icontains=query)
| Q(email_addresses__email__icontains=query)
).distinct()
except Contact.DoesNotExist:
contact=[]
error_message = "Contact not found"
return render(request, "app_contacts/search_contacts.html",
{
"title": "Searching contacts",
"contacts": contacts,
"error_message": error_message
})
@login_required
def edit_contact(request, pk):
contact = get_object_or_404(Contact, pk=pk)
if request.method == "POST":
form = ContactForm(request.POST, instance=contact)
if form.is_valid():
form.save()
return redirect(to="app_contacts:detail", pk=pk)
else:
form = ContactForm(instance=contact)
return render(request, "app_contacts/edit_contact.html", {
"title": "Editing contact",
"form": form,
"contact": contact
})
@login_required
def delete_contact(request, pk):
contact = get_object_or_404(Contact, pk=pk)
if request.method == "POST":
contact.delete()
messages.success(request, "Contact successfully deleted")
return redirect(to="app_contacts:dashboard")
else:
return render(request, "app_contacts/delete_contact.html",
{
"title": "Deleting contact",
"contact": contact,
"user": request.user
}
)
def delete_email(request, pk):
try:
email = EmailAddress.objects.get(pk=pk)
email.delete()
except ObjectDoesNotExist:
email = None
return detail(request, pk)
def delete_phone(request, pk):
try: | phone = PhoneNumber.objects.get(pk=pk) | 4 | 2023-12-08 17:26:59+00:00 | 4k |
SubConv/SubConv | modules/convert/converter.py | [
{
"identifier": "RandUserAgent",
"path": "modules/convert/util.py",
"snippet": "def RandUserAgent() -> str:\n return userAgents[random.randint(0, len(userAgents) - 1)]"
},
{
"identifier": "get",
"path": "modules/convert/util.py",
"snippet": "def get(content):\n if content is None:\... | from modules.convert.util import RandUserAgent
from modules.convert.util import get
from modules.convert.util import uniqueName
from modules.convert.util import urlSafe
from modules.convert.util import base64RawStdDecode
from modules.convert.util import base64RawURLDecode
from modules.convert.v import handleVShareLink
import json
import base64
import urllib.parse as urlparse
import distutils.util | 3,578 | except:
continue
vmess["alterId"] = 0
vmess["cipher"] = "auto"
encryption = get(query.get("encryption"))
if encryption != "":
vmess["cipher"] = encryption
proxies.append(vmess)
continue
values = {}
try:
values = json.loads(dcBuf)
except:
continue
try:
tempName = values["ps"]
except:
continue
name = uniqueName(names, tempName)
vmess = {}
vmess["name"] = name
vmess["type"] = scheme
vmess["server"] = values["add"]
vmess["port"] = values["port"]
vmess["uuid"] = values["id"]
alterId = values.get("aid")
if alterId is not None:
vmess["alterId"] = alterId
else:
vmess["alterId"] = 0
vmess["udp"] = True
vmess["xudp"] = True
vmess["tls"] = False
vmess["skip-cert-verify"] = False
vmess["cipher"] = "auto"
cipher = get(values.get("scy"))
if cipher != "":
vmess["cipher"] = cipher
sni = get(values.get("sni"))
if sni != "":
vmess["servername"] = sni
network = get(values.get("net")).lower()
if values.get("type") == "http":
network = "http"
elif network == "http":
network = "h2"
vmess["network"] = network
tls = values.get("tls")
if tls is not None:
tls = str(tls).lower()
if tls.endswith("tls"):
vmess["tls"] = True
alpn = values.get("alpn")
if alpn is not None and alpn != "":
vmess["alpn"] = alpn.split(",")
if network == "http":
headers = {}
httpOpts = {}
host = get(values.get("host"))
if host != "":
headers["Host"] = host
httpOpts["path"] = "/"
path = get(values.get("path"))
if path != "":
httpOpts["path"] = path
httpOpts["headers"] = headers
vmess["http-opts"] = httpOpts
elif network == "h2":
headers = {}
h2Opts = {}
host = get(values.get("host"))
if host != "":
headers["Host"] = host
h2Opts["path"] = get(values.get("path"))
h2Opts["headers"] = headers
vmess["h2-opts"] = h2Opts
elif network == "ws":
headers = {}
wsOpts = {}
wsOpts["path"] = "/"
host = get(values.get("host"))
if host != "":
headers["Host"] = host
path = get(values.get("path"))
if path != "":
wsOpts["path"] = path
wsOpts["headers"] = headers
vmess["ws-opts"] = wsOpts
elif network == "grpc":
grpcOpts = {}
grpcOpts["grpc-service-name"] = get(values.get("path"))
vmess["grpc-opts"] = grpcOpts
proxies.append(vmess)
# ss and ssr still WIP
elif scheme == "ss":
try:
urlSS = urlparse.urlparse(line)
except:
continue
name = uniqueName(names, urlparse.unquote(urlSS.fragment))
port = urlSS.port
if port == "":
try:
|
async def ConvertsV2Ray(buf):
try:
data = base64.b64decode(buf).decode("utf-8")
except:
try:
data = buf.decode("utf-8")
except:
data = buf
arr = data.splitlines()
proxies = []
names = {}
for line in arr:
if line == "":
continue
if -1 == line.find("://"):
continue
else:
scheme, body = line.split("://", 1)
scheme = scheme.lower()
if scheme == "hysteria":
try:
urlHysteria = urlparse.urlparse(line)
except:
continue
query = dict(urlparse.parse_qsl(urlHysteria.query))
name = uniqueName(names, urlparse.unquote(urlHysteria.fragment))
hysteria = {}
hysteria["name"] = name
hysteria["type"] = scheme
hysteria["server"] = urlHysteria.hostname
hysteria["port"] = urlHysteria.port
hysteria["sni"] = query.get("peer")
hysteria["obfs"] = query.get("obfs")
alpn = get(query.get("alpn"))
if alpn != "":
hysteria["alpn"] = alpn.split(",")
hysteria["auth_str"] = query.get("auth")
hysteria["protocol"] = query.get("protocol")
up = get(query.get("up"))
down = get(query.get("down"))
if up == "":
up = query.get("upmbps")
if down == "":
down = query.get("downmbps")
hysteria["up"] = up
hysteria["down"] = down
hysteria["skip-cert-verify"] = bool(
distutils.util.strtobool(query.get("insecure")))
proxies.append(hysteria)
elif scheme == "hysteria2" or scheme == "hy2":
# apply f6bf9c08577060bb199c2f746c7d91dd3c0ca7b9 from mihomo
try:
urlHysteria2 = urlparse.urlparse(line)
except:
continue
query = dict(urlparse.parse_qsl(urlHysteria2.query))
name = uniqueName(names, urlparse.unquote(urlHysteria2.fragment))
hysteria2 = {}
hysteria2["name"] = name
hysteria2["type"] = scheme
hysteria2["server"] = urlHysteria2.hostname
port = get(urlHysteria2.port)
if port != "":
hysteria2["port"] = int(port)
else:
hysteria2["port"] = 443
obfs = get(query.get("obfs"))
if obfs != "" and obfs not in ["none", "None"]:
hysteria2["obfs"] = query.get("obfs")
hysteria2["obfs-password"] = get(query.get("obfs-password"))
sni = get(query.get("sni"))
if sni == "":
sni = get(query.get("peer"))
if sni != "":
hysteria2["sni"] = sni
hysteria2["skip-cert-verify"] = bool(
distutils.util.strtobool(query.get("insecure")))
alpn = get(query.get("alpn"))
if alpn != "":
hysteria2["alpn"] = alpn.split(",")
auth = get(urlHysteria2.username)
if auth != "":
hysteria2["password"] = auth
hysteria2["fingerprint"] = get(query.get("pinSHA256"))
hysteria2["down"] = get(query.get("down"))
hysteria2["up"] = get(query.get("up"))
proxies.append(hysteria2)
elif scheme == "tuic":
# A temporary unofficial TUIC share link standard
# Modified from https://github.com/daeuniverse/dae/discussions/182
# Changes:
# 1. Support TUICv4, just replace uuid:password with token
# 2. Remove `allow_insecure` field
try:
urlTUIC = urlparse.urlparse(line)
except:
continue
query = dict(urlparse.parse_qsl(urlTUIC.query))
tuic = {}
tuic["name"] = uniqueName(
names, urlparse.unquote(urlTUIC.fragment))
tuic["type"] = scheme
tuic["server"] = urlTUIC.hostname
tuic["port"] = urlTUIC.port
tuic["udp"] = True
password = urlTUIC.password
if password is not None:
tuic["uuid"] = urlTUIC.username
tuic["password"] = password
else:
tuic["token"] = urlTUIC.username
cc = get(query.get("congestion_control"))
if cc != "":
tuic["congestion-control"] = cc
alpn = get(query.get("alpn"))
if alpn != "":
tuic["alpn"] = alpn.split(",")
sni = get(query.get("sni"))
if sni != "":
tuic["sni"] = sni
if query.get("disable_sni") == "1":
tuic["disable-sni"] = True
udpRelayMode = get(query.get("udp_relay_mode"))
if udpRelayMode != "":
tuic["udp-relay-mode"] = udpRelayMode
proxies.append(tuic)
elif scheme == "trojan":
try:
urlTrojan = urlparse.urlparse(line)
except:
continue
query = dict(urlparse.parse_qsl(urlTrojan.query))
name = uniqueName(names, urlparse.unquote(urlTrojan.fragment))
trojan = {}
trojan["name"] = name
trojan["type"] = scheme
trojan["server"] = urlTrojan.hostname
trojan["port"] = urlTrojan.port
trojan["password"] = urlTrojan.password
trojan["udp"] = True
trojan["skip-cert-verify"] = bool(
distutils.util.strtobool(query.get("allowInsecure")))
sni = get(query.get("sni"))
if sni != "":
trojan["sni"] = sni
alpn = get(query.get("alpn"))
if alpn != "":
trojan["alpn"] = alpn.split(",")
network = get(query.get("type"))
if network != "":
network = network.lower()
trojan["network"] = network
if network == "ws":
headers = {}
wsOpts = {}
headers["User-Agent"] = RandUserAgent()
wsOpts["path"] = query.get("path")
wsOpts["headers"] = headers
trojan["ws-opts"] = wsOpts
elif network == "grpc":
grpcOpts = {}
grpcOpts["serviceName"] = query.get("serviceName")
trojan["grpc-opts"] = grpcOpts
fingerprint = get(query.get("fp"))
if fingerprint == "":
trojan["client-fingerprint"] = "chrome"
else:
trojan["client-fingerprint"] = fingerprint
proxies.append(trojan)
elif scheme == "vless":
try:
urlVless = urlparse.urlparse(line)
except:
continue
query = dict(urlparse.parse_qsl(urlVless.query))
vless = {}
try:
handleVShareLink(names, urlVless, scheme, vless)
except:
continue
flow = get(query.get("flow"))
if flow != "":
vless["flow"] = str(flow).lower()
proxies.append(vless)
elif scheme == "vmess":
try:
dcBuf = base64.b64decode(body)
except:
# Xray VMessAEAD share link
try:
urlVMess = urlparse.urlparse(line)
except:
continue
query = dict(urlparse.parse_qsl(urlVMess.query))
vmess = {}
try:
handleVShareLink(names, urlVMess, scheme, vmess)
except:
continue
vmess["alterId"] = 0
vmess["cipher"] = "auto"
encryption = get(query.get("encryption"))
if encryption != "":
vmess["cipher"] = encryption
proxies.append(vmess)
continue
values = {}
try:
values = json.loads(dcBuf)
except:
continue
try:
tempName = values["ps"]
except:
continue
name = uniqueName(names, tempName)
vmess = {}
vmess["name"] = name
vmess["type"] = scheme
vmess["server"] = values["add"]
vmess["port"] = values["port"]
vmess["uuid"] = values["id"]
alterId = values.get("aid")
if alterId is not None:
vmess["alterId"] = alterId
else:
vmess["alterId"] = 0
vmess["udp"] = True
vmess["xudp"] = True
vmess["tls"] = False
vmess["skip-cert-verify"] = False
vmess["cipher"] = "auto"
cipher = get(values.get("scy"))
if cipher != "":
vmess["cipher"] = cipher
sni = get(values.get("sni"))
if sni != "":
vmess["servername"] = sni
network = get(values.get("net")).lower()
if values.get("type") == "http":
network = "http"
elif network == "http":
network = "h2"
vmess["network"] = network
tls = values.get("tls")
if tls is not None:
tls = str(tls).lower()
if tls.endswith("tls"):
vmess["tls"] = True
alpn = values.get("alpn")
if alpn is not None and alpn != "":
vmess["alpn"] = alpn.split(",")
if network == "http":
headers = {}
httpOpts = {}
host = get(values.get("host"))
if host != "":
headers["Host"] = host
httpOpts["path"] = "/"
path = get(values.get("path"))
if path != "":
httpOpts["path"] = path
httpOpts["headers"] = headers
vmess["http-opts"] = httpOpts
elif network == "h2":
headers = {}
h2Opts = {}
host = get(values.get("host"))
if host != "":
headers["Host"] = host
h2Opts["path"] = get(values.get("path"))
h2Opts["headers"] = headers
vmess["h2-opts"] = h2Opts
elif network == "ws":
headers = {}
wsOpts = {}
wsOpts["path"] = "/"
host = get(values.get("host"))
if host != "":
headers["Host"] = host
path = get(values.get("path"))
if path != "":
wsOpts["path"] = path
wsOpts["headers"] = headers
vmess["ws-opts"] = wsOpts
elif network == "grpc":
grpcOpts = {}
grpcOpts["grpc-service-name"] = get(values.get("path"))
vmess["grpc-opts"] = grpcOpts
proxies.append(vmess)
# ss and ssr still WIP
elif scheme == "ss":
try:
urlSS = urlparse.urlparse(line)
except:
continue
name = uniqueName(names, urlparse.unquote(urlSS.fragment))
port = urlSS.port
if port == "":
try: | dcBuf = base64RawStdDecode(urlSS.hostname) | 4 | 2023-12-06 12:57:11+00:00 | 4k |
Opt-Mucca/PySCIPOpt-ML | src/pyscipopt_ml/lightgbm/lgbgetter.py | [
{
"identifier": "NoModel",
"path": "src/pyscipopt_ml/exceptions.py",
"snippet": "class NoModel(Exception):\n \"\"\"No model is known for some structure.\"\"\"\n\n def __init__(self, predictor, reason):\n if not isinstance(predictor, str):\n predictor = type(predictor).__name__\n ... | import numpy as np
from sklearn.base import is_classifier
from ..exceptions import NoModel, NoSolution, ParameterError
from ..modelling import AbstractPredictorConstr
from ..modelling.var_utils import create_vars | 3,249 | """Implements some utility tools for all lightgbm objects."""
class LGBgetter(AbstractPredictorConstr):
"""Utility class for lightgbm models convertors.
Implement some common functionalities: check predictor is fitted, output dimension, get error
Attributes
----------
predictor
Lightgbm predictor embedded into SCIP model.
"""
def __init__(self, predictor, input_vars, output_type="regular", **kwargs):
if not hasattr(predictor, "booster_"):
| """Implements some utility tools for all lightgbm objects."""
class LGBgetter(AbstractPredictorConstr):
"""Utility class for lightgbm models convertors.
Implement some common functionalities: check predictor is fitted, output dimension, get error
Attributes
----------
predictor
Lightgbm predictor embedded into SCIP model.
"""
def __init__(self, predictor, input_vars, output_type="regular", **kwargs):
if not hasattr(predictor, "booster_"): | raise ParameterError( | 2 | 2023-12-10 20:28:22+00:00 | 4k |
DongqiShen/qwen-fast | eval.py | [
{
"identifier": "LLaMA",
"path": "model.py",
"snippet": "def find_multiple(n: int, k: int) -> int:\n def __post_init__(self):\n def from_name(cls, name: str):\n def __init__(self, max_batch_size, max_seq_length, n_heads, head_dim, dtype=torch.bfloat16):\n def update(self, input_pos, k_val, v... | import sys
import time
import torch
import torch._inductor.config
import torch._dynamo.config
import os
import sys
import main as lm_evaluation_harness_main
import lm_eval
import argparse
from pathlib import Path
from typing import Optional
from model import LLaMA
from sentencepiece import SentencePieceProcessor
from generate import (
_load_model,
encode_tokens,
model_forward,
) | 1,860 |
torch._dynamo.config.automatic_dynamic_shapes = True
torch._inductor.config.triton.unique_kernel_names = True
torch._inductor.config.epilogue_fusion = False
torch._inductor.config.triton.cudagraphs = True
torch._dynamo.config.cache_size_limit = 100000
# support running without installing as a package
wd = Path(__file__).parent.parent.resolve()
sys.path.append(str(wd))
# hacky path setup for lm-evaluation-harness
lm_evaluation_harness_path = '/'.join(
os.getcwd().split('/')[:-1] + ['lm-evaluation-harness'])
sys.path.insert(0, lm_evaluation_harness_path)
def setup_cache_padded_seq_input_pos_max_seq_length_for_prefill(
model: LLaMA,
prompt: torch.Tensor,
max_new_tokens: int,
max_seq_length: Optional[int] = None,
):
"""
Sets up model cache and does some bookkeeping calculations for prompt, input_pos and max_seq_length
that are needed for prefill or model_forward
Args:
model (LLaMA): The model whose cache gets set up
prompt (torch.Tensor): Tensor of shape (T) with indices of the prompt sequence.
max_new_tokens (int): The desired maximum number of new tokens that can be generated.
max_seq_length (Optional[int], optional): The maximum sequence length allowed.
Returns:
seq (torch.Tensor): prompt but padded with zeros to size max_seq_length
input_pos (torch.Tensor): tensor of integers in increasing order
max_seq_length (int): The maximum sequence length allowed, updated based on other numbers
"""
T = prompt.size(0)
T_new = T + max_new_tokens
if max_seq_length is None:
max_seq_length = min(T_new, model.config.block_size)
device, dtype = prompt.device, prompt.dtype
# create an empty tensor of the expected final shape and fill in the current tokens
empty = torch.empty(T_new, dtype=dtype, device=device)
empty[:T] = prompt
seq = empty
input_pos = torch.arange(0, T, device=device)
with torch.device(device):
model.setup_caches(max_batch_size=1, max_seq_length=max_seq_length)
return seq, input_pos, max_seq_length
class SimpleGPTEvalWrapper(lm_eval.base.BaseLM):
"""
A wrapper class for SimpleGPT, providing integration with the lm-evaluation-harness library.
"""
def __init__(
self,
model: LLaMA,
tokenizer,
max_seq_length: Optional[int]=None,
):
super().__init__()
self._model = model
self._tokenizer = tokenizer
self._device = torch.device('cuda')
self._max_seq_length = 2048 if max_seq_length is None else max_seq_length
@property
def eot_token_id(self):
return self._tokenizer.eos_id()
@property
def max_length(self):
return self._max_seq_length
@property
def max_gen_toks(self):
return 50
@property
def batch_size(self):
return 1
@property
def device(self):
return self._device
def tok_encode(self, string: str):
encoded = encode_tokens(self._tokenizer,
string, bos=True, eos=False, device=self._device)
# encoded is a pytorch tensor, but some internal logic in the
# eval harness expects it to be a list instead
# TODO: verify this for multi-batch as well
encoded = encoded.tolist()
return encoded
def tok_decode(self, tokens):
decoded = self._tokenizer.decode(tokens)
return decoded
def _model_call(self, inps):
# TODO: make batches work
inps = inps.squeeze(0)
max_new_tokens = 1
seq, input_pos, max_seq_length = \
setup_cache_padded_seq_input_pos_max_seq_length_for_prefill(
self._model,
inps,
max_new_tokens,
self.max_length,
)
x = seq.index_select(0, input_pos).view(1, -1)
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
torch._dynamo.config.automatic_dynamic_shapes = True
torch._inductor.config.triton.unique_kernel_names = True
torch._inductor.config.epilogue_fusion = False
torch._inductor.config.triton.cudagraphs = True
torch._dynamo.config.cache_size_limit = 100000
# support running without installing as a package
wd = Path(__file__).parent.parent.resolve()
sys.path.append(str(wd))
# hacky path setup for lm-evaluation-harness
lm_evaluation_harness_path = '/'.join(
os.getcwd().split('/')[:-1] + ['lm-evaluation-harness'])
sys.path.insert(0, lm_evaluation_harness_path)
def setup_cache_padded_seq_input_pos_max_seq_length_for_prefill(
model: LLaMA,
prompt: torch.Tensor,
max_new_tokens: int,
max_seq_length: Optional[int] = None,
):
"""
Sets up model cache and does some bookkeeping calculations for prompt, input_pos and max_seq_length
that are needed for prefill or model_forward
Args:
model (LLaMA): The model whose cache gets set up
prompt (torch.Tensor): Tensor of shape (T) with indices of the prompt sequence.
max_new_tokens (int): The desired maximum number of new tokens that can be generated.
max_seq_length (Optional[int], optional): The maximum sequence length allowed.
Returns:
seq (torch.Tensor): prompt but padded with zeros to size max_seq_length
input_pos (torch.Tensor): tensor of integers in increasing order
max_seq_length (int): The maximum sequence length allowed, updated based on other numbers
"""
T = prompt.size(0)
T_new = T + max_new_tokens
if max_seq_length is None:
max_seq_length = min(T_new, model.config.block_size)
device, dtype = prompt.device, prompt.dtype
# create an empty tensor of the expected final shape and fill in the current tokens
empty = torch.empty(T_new, dtype=dtype, device=device)
empty[:T] = prompt
seq = empty
input_pos = torch.arange(0, T, device=device)
with torch.device(device):
model.setup_caches(max_batch_size=1, max_seq_length=max_seq_length)
return seq, input_pos, max_seq_length
class SimpleGPTEvalWrapper(lm_eval.base.BaseLM):
"""
A wrapper class for SimpleGPT, providing integration with the lm-evaluation-harness library.
"""
def __init__(
self,
model: LLaMA,
tokenizer,
max_seq_length: Optional[int]=None,
):
super().__init__()
self._model = model
self._tokenizer = tokenizer
self._device = torch.device('cuda')
self._max_seq_length = 2048 if max_seq_length is None else max_seq_length
@property
def eot_token_id(self):
return self._tokenizer.eos_id()
@property
def max_length(self):
return self._max_seq_length
@property
def max_gen_toks(self):
return 50
@property
def batch_size(self):
return 1
@property
def device(self):
return self._device
def tok_encode(self, string: str):
encoded = encode_tokens(self._tokenizer,
string, bos=True, eos=False, device=self._device)
# encoded is a pytorch tensor, but some internal logic in the
# eval harness expects it to be a list instead
# TODO: verify this for multi-batch as well
encoded = encoded.tolist()
return encoded
def tok_decode(self, tokens):
decoded = self._tokenizer.decode(tokens)
return decoded
def _model_call(self, inps):
# TODO: make batches work
inps = inps.squeeze(0)
max_new_tokens = 1
seq, input_pos, max_seq_length = \
setup_cache_padded_seq_input_pos_max_seq_length_for_prefill(
self._model,
inps,
max_new_tokens,
self.max_length,
)
x = seq.index_select(0, input_pos).view(1, -1) | logits = model_forward(self._model, x, input_pos) | 3 | 2023-12-05 14:07:19+00:00 | 4k |
Yanyutin753/CowAndPandoraNext | bot/linkai/link_ai_bot.py | [
{
"identifier": "Bot",
"path": "bot/bot.py",
"snippet": "class Bot(object):\n def reply(self, query, context: Context = None) -> Reply:\n \"\"\"\n bot auto-reply content\n :param req: received message\n :return: reply content\n \"\"\"\n raise NotImplementedEr... | import time
import requests
from bot.bot import Bot
from bot.chatgpt.chat_gpt_session import ChatGPTSession
from bot.openai.open_ai_image import OpenAIImage
from bot.session_manager import SessionManager
from bridge.context import Context, ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf | 2,223 | # access LinkAI knowledge base platform
# docs: https://link-ai.tech/platform/link-app/wechat
class LinkAIBot(Bot, OpenAIImage):
# authentication failed
AUTH_FAILED_CODE = 401
NO_QUOTA_CODE = 406
def __init__(self):
super().__init__()
| # access LinkAI knowledge base platform
# docs: https://link-ai.tech/platform/link-app/wechat
class LinkAIBot(Bot, OpenAIImage):
# authentication failed
AUTH_FAILED_CODE = 401
NO_QUOTA_CODE = 406
def __init__(self):
super().__init__() | self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo") | 9 | 2023-12-14 15:21:17+00:00 | 4k |
nerdslab/bams | fruit_flies.py | [
{
"identifier": "KeypointsDataset",
"path": "bams/data/dataset.py",
"snippet": "class KeypointsDataset(Dataset):\n r\"\"\"Simplified dataset for cases where the data is keypoints only (usually obtained\n using vision-based pose estimation and tracking methods). \n \n The state is defined... | import os
import numpy as np
import argparse
import torch
import torch.nn.functional as F
from datetime import datetime
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from bams.data import KeypointsDataset
from bams.models import BAMS
from bams import HoALoss | 2,900 | os.path.join(path, "fly_group_train.npy"), allow_pickle=True
).item()
sequence_ids_train, sequence_data_train = zip(*data_train["sequences"].items())
keypoints_train = np.stack([data["keypoints"] for data in sequence_data_train])
# load submission data (no annoations)
data_submission = np.load(
os.path.join(path, "fly_group_test.npy"), allow_pickle=True
).item()
sequence_ids_submission, sequence_data_submission = zip(
*data_submission["sequences"].items()
)
keypoints_submission = np.stack(
[data["keypoints"] for data in sequence_data_submission]
)
# concatenate train and submission data
sequence_ids = np.concatenate([sequence_ids_train, sequence_ids_submission], axis=0)
keypoints = np.concatenate([keypoints_train, keypoints_submission], axis=0)
split_mask = np.ones(len(sequence_ids), dtype=bool)
split_mask[-len(sequence_ids_submission) :] = False
# treat each fly independently, keep track of which video each fly came from
num_samples, sequence_length, num_flies, num_keypoints, _ = keypoints.shape
keypoints = keypoints.transpose((0, 2, 1, 3, 4))
keypoints = keypoints.reshape((-1, sequence_length, num_keypoints * 2))
batch = np.repeat(np.arange(num_samples), num_flies)
return keypoints, split_mask, batch
def train(model, device, loader, optimizer, criterion, writer, step, log_every_step):
model.train()
for data in tqdm(loader, position=1, leave=False):
# todo convert to float
input = data["input"].float().to(device) # (B, N, L)
target = data["target_hist"].float().to(device)
ignore_weights = data["ignore_weights"].to(device)
# forward pass
optimizer.zero_grad()
embs, hoa_pred, byol_preds = model(input)
# prediction task
hoa_loss = criterion(target, hoa_pred, ignore_weights)
# contrastive loss: short term
batch_size, sequence_length, emb_dim = embs["short_term"].size()
skip_frames, delta = 60, 5
view_1_id = (
torch.randint(sequence_length - skip_frames - delta, (batch_size,))
+ skip_frames
)
view_2_id = torch.randint(delta + 1, (batch_size,)) + view_1_id
view_2_id = torch.clip(view_2_id, 0, sequence_length)
view_1 = byol_preds["short_term"][torch.arange(batch_size), view_1_id]
view_2 = embs["short_term"][torch.arange(batch_size), view_2_id]
byol_loss_short_term = (
1 - F.cosine_similarity(view_1, view_2.clone().detach(), dim=-1).mean()
)
# contrastive loss: long term
batch_size, sequence_length, emb_dim = embs["long_term"].size()
skip_frames = 100
view_1_id = (
torch.randint(sequence_length - skip_frames, (batch_size,)) + skip_frames
)
view_2_id = (
torch.randint(sequence_length - skip_frames, (batch_size,)) + skip_frames
)
view_1 = byol_preds["long_term"][torch.arange(batch_size), view_1_id]
view_2 = embs["long_term"][torch.arange(batch_size), view_2_id]
byol_loss_long_term = (
1 - F.cosine_similarity(view_1, view_2.clone().detach(), dim=-1).mean()
)
# backprop
loss = 5e2 * hoa_loss + 0.5 * byol_loss_short_term + 0.5 * byol_loss_long_term
loss.backward()
optimizer.step()
step += 1
if step % log_every_step == 0:
writer.add_scalar("train/hoa_loss", hoa_loss.item(), step)
writer.add_scalar(
"train/byol_loss_short_term", byol_loss_short_term.item(), step
)
writer.add_scalar(
"train/byol_loss_long_term", byol_loss_long_term.item(), step
)
writer.add_scalar("train/total_loss", loss.item(), step)
return step
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data_root", type=str, default="./data/mabe")
parser.add_argument("--cache_path", type=str, default="./data/mabe/fruit_flies")
parser.add_argument("--hoa_bins", type=int, default=32)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--num_workers", type=int, default=16)
parser.add_argument("--epochs", type=int, default=500)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--weight_decay", type=float, default=4e-5)
parser.add_argument("--log_every_step", type=int, default=50)
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# dataset
keypoints, split_mask, batch = load_fruit_flies(args.data_root)
|
def load_fruit_flies(path):
# load raw train data (with annotations for 2 tasks)
data_train = np.load(
os.path.join(path, "fly_group_train.npy"), allow_pickle=True
).item()
sequence_ids_train, sequence_data_train = zip(*data_train["sequences"].items())
keypoints_train = np.stack([data["keypoints"] for data in sequence_data_train])
# load submission data (no annoations)
data_submission = np.load(
os.path.join(path, "fly_group_test.npy"), allow_pickle=True
).item()
sequence_ids_submission, sequence_data_submission = zip(
*data_submission["sequences"].items()
)
keypoints_submission = np.stack(
[data["keypoints"] for data in sequence_data_submission]
)
# concatenate train and submission data
sequence_ids = np.concatenate([sequence_ids_train, sequence_ids_submission], axis=0)
keypoints = np.concatenate([keypoints_train, keypoints_submission], axis=0)
split_mask = np.ones(len(sequence_ids), dtype=bool)
split_mask[-len(sequence_ids_submission) :] = False
# treat each fly independently, keep track of which video each fly came from
num_samples, sequence_length, num_flies, num_keypoints, _ = keypoints.shape
keypoints = keypoints.transpose((0, 2, 1, 3, 4))
keypoints = keypoints.reshape((-1, sequence_length, num_keypoints * 2))
batch = np.repeat(np.arange(num_samples), num_flies)
return keypoints, split_mask, batch
def train(model, device, loader, optimizer, criterion, writer, step, log_every_step):
model.train()
for data in tqdm(loader, position=1, leave=False):
# todo convert to float
input = data["input"].float().to(device) # (B, N, L)
target = data["target_hist"].float().to(device)
ignore_weights = data["ignore_weights"].to(device)
# forward pass
optimizer.zero_grad()
embs, hoa_pred, byol_preds = model(input)
# prediction task
hoa_loss = criterion(target, hoa_pred, ignore_weights)
# contrastive loss: short term
batch_size, sequence_length, emb_dim = embs["short_term"].size()
skip_frames, delta = 60, 5
view_1_id = (
torch.randint(sequence_length - skip_frames - delta, (batch_size,))
+ skip_frames
)
view_2_id = torch.randint(delta + 1, (batch_size,)) + view_1_id
view_2_id = torch.clip(view_2_id, 0, sequence_length)
view_1 = byol_preds["short_term"][torch.arange(batch_size), view_1_id]
view_2 = embs["short_term"][torch.arange(batch_size), view_2_id]
byol_loss_short_term = (
1 - F.cosine_similarity(view_1, view_2.clone().detach(), dim=-1).mean()
)
# contrastive loss: long term
batch_size, sequence_length, emb_dim = embs["long_term"].size()
skip_frames = 100
view_1_id = (
torch.randint(sequence_length - skip_frames, (batch_size,)) + skip_frames
)
view_2_id = (
torch.randint(sequence_length - skip_frames, (batch_size,)) + skip_frames
)
view_1 = byol_preds["long_term"][torch.arange(batch_size), view_1_id]
view_2 = embs["long_term"][torch.arange(batch_size), view_2_id]
byol_loss_long_term = (
1 - F.cosine_similarity(view_1, view_2.clone().detach(), dim=-1).mean()
)
# backprop
loss = 5e2 * hoa_loss + 0.5 * byol_loss_short_term + 0.5 * byol_loss_long_term
loss.backward()
optimizer.step()
step += 1
if step % log_every_step == 0:
writer.add_scalar("train/hoa_loss", hoa_loss.item(), step)
writer.add_scalar(
"train/byol_loss_short_term", byol_loss_short_term.item(), step
)
writer.add_scalar(
"train/byol_loss_long_term", byol_loss_long_term.item(), step
)
writer.add_scalar("train/total_loss", loss.item(), step)
return step
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data_root", type=str, default="./data/mabe")
parser.add_argument("--cache_path", type=str, default="./data/mabe/fruit_flies")
parser.add_argument("--hoa_bins", type=int, default=32)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--num_workers", type=int, default=16)
parser.add_argument("--epochs", type=int, default=500)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--weight_decay", type=float, default=4e-5)
parser.add_argument("--log_every_step", type=int, default=50)
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# dataset
keypoints, split_mask, batch = load_fruit_flies(args.data_root)
| dataset = KeypointsDataset( | 0 | 2023-12-05 16:26:57+00:00 | 4k |
FF14CN/Sarean-arsenal | Utility/sqMall/sqMallDoSign.py | [
{
"identifier": "Daoyu",
"path": "Utility/sdoLogin/Daoyu.py",
"snippet": "def dykey_encrypt(self):\ndef config_handler():\ndef initialize():\ndef get_guid(device_id, manuid):\ndef get_flowid(manuid, deviceid, sessionid, show_username):\ndef get_account_id_list(flowid, deviceid, manuid, sessionid, show_u... | from Utility.sdoLogin import Daoyu
from Utility.sqMall.daoyuBuildinMallSign import daoyumall_sign
from Utility.sqMall.daoyuBuildinMallBalance import daoyu_mall_balance
import Utility.Notifications.push as pusher | 1,786 | """
Author: KuliPoi
Contact: me@pipirapira.com
Created: 2023-12-21
File: sqMailDoSign.py
Version: 2.5.0
Description: Do SQMALL AUTO SIGN, FUCK SQ BY THE WAY
"""
def main():
if Daoyu.initialize():
device_id, manuid, main_key, show_username = Daoyu.config_handler()
Daoyu.logger_logs.info(f'Get Config File Success,'
f'show_username: {show_username}'
f'daoyu_key: {Daoyu.dykey_encrypt(main_key)}'
f'device_id: {device_id}, '
f'manuid: {manuid}')
if main_key != '' and show_username != '':
Daoyu.logger_stream.info('读取到了你手动设置的DaoyuKey和ShowUserName')
elif main_key == '' or show_username == '':
Daoyu.logger_stream.info('DaoyuKey 或者 showUsername 为空 看Github上的教程 求求你辣')
exit()
else:
Daoyu.logger_stream.info('config.ini可能存在问题,发个issue看看,注意不要直接将你的Config文件直接发在issue里')
exit()
flowid = Daoyu.get_flowid(manuid, device_id, main_key, show_username)
account_id_list = Daoyu.get_account_id_list(flowid, device_id, manuid, main_key, show_username)
temp_account_sessionid = Daoyu.get_temp_sessionid(main_key)
if account_id_list is not None:
results = []
for index, account_id in enumerate(account_id_list):
if Daoyu.make_confirm(account_id["accountId"], flowid, device_id, manuid, main_key, show_username):
sub_account_key = Daoyu.get_sub_account_key(flowid, manuid, device_id, main_key, show_username)
sub_account_session = Daoyu.get_sub_account_session(sub_account_key, temp_account_sessionid)
sign_msg = daoyumall_sign(sub_account_session, account_id["accountId"])
if sign_msg == 0:
Daoyu.logger_stream.info(
| """
Author: KuliPoi
Contact: me@pipirapira.com
Created: 2023-12-21
File: sqMailDoSign.py
Version: 2.5.0
Description: Do SQMALL AUTO SIGN, FUCK SQ BY THE WAY
"""
def main():
if Daoyu.initialize():
device_id, manuid, main_key, show_username = Daoyu.config_handler()
Daoyu.logger_logs.info(f'Get Config File Success,'
f'show_username: {show_username}'
f'daoyu_key: {Daoyu.dykey_encrypt(main_key)}'
f'device_id: {device_id}, '
f'manuid: {manuid}')
if main_key != '' and show_username != '':
Daoyu.logger_stream.info('读取到了你手动设置的DaoyuKey和ShowUserName')
elif main_key == '' or show_username == '':
Daoyu.logger_stream.info('DaoyuKey 或者 showUsername 为空 看Github上的教程 求求你辣')
exit()
else:
Daoyu.logger_stream.info('config.ini可能存在问题,发个issue看看,注意不要直接将你的Config文件直接发在issue里')
exit()
flowid = Daoyu.get_flowid(manuid, device_id, main_key, show_username)
account_id_list = Daoyu.get_account_id_list(flowid, device_id, manuid, main_key, show_username)
temp_account_sessionid = Daoyu.get_temp_sessionid(main_key)
if account_id_list is not None:
results = []
for index, account_id in enumerate(account_id_list):
if Daoyu.make_confirm(account_id["accountId"], flowid, device_id, manuid, main_key, show_username):
sub_account_key = Daoyu.get_sub_account_key(flowid, manuid, device_id, main_key, show_username)
sub_account_session = Daoyu.get_sub_account_session(sub_account_key, temp_account_sessionid)
sign_msg = daoyumall_sign(sub_account_session, account_id["accountId"])
if sign_msg == 0:
Daoyu.logger_stream.info( | f'账号{account_id["displayName"]}签到成功,当前积分余额{daoyu_mall_balance(sub_account_session)}') | 2 | 2023-12-06 08:48:02+00:00 | 4k |
janmartchouk/vidgen | src/audio_generator.py | [
{
"identifier": "tts",
"path": "utils/tiktok_tts.py",
"snippet": "def tts(text: str, voice: str = \"none\", filename: str = \"output.mp3\", play_sound: bool = False) -> None:\n # checking if the website is available\n global current_endpoint\n\n if get_api_response().status_code == 200:\n ... | import random
import os
import sys
import logging
import tempfile
from pydub import AudioSegment
from tqdm import tqdm
from tqdm.contrib.logging import logging_redirect_tqdm
from utils.tiktok_tts import tts as tiktok_tts
from utils.logger import setup_logger
from models.post import Post
from config.structure import AUDIO_DIR
from config.dicts import TIKTOK_VOICES
from utils.text import split_text_into_chunks, shorten_hash, shorten_string | 2,727 |
class AudioGenerator:
def __init__(self, loglevel = logging.INFO):
self.logger = setup_logger(__name__, loglevel, emoji='🎵')
self.output_dir = AUDIO_DIR
def from_post(self, post):
"""
Generate audio from a post.
Args:
post (Post): The post content to generate audio from.
Returns:
bool: True if audio generation is successful, False otherwise.
"""
|
class AudioGenerator:
def __init__(self, loglevel = logging.INFO):
self.logger = setup_logger(__name__, loglevel, emoji='🎵')
self.output_dir = AUDIO_DIR
def from_post(self, post):
"""
Generate audio from a post.
Args:
post (Post): The post content to generate audio from.
Returns:
bool: True if audio generation is successful, False otherwise.
"""
| voice = random.choice(TIKTOK_VOICES) | 4 | 2023-12-14 13:00:22+00:00 | 4k |
asdfghjil/XMUCourseCheckin | app.py | [
{
"identifier": "courseCheckin",
"path": "checkin.py",
"snippet": "def courseCheckin(session, http_header, userInfo):\n lesson = printCheckinList(session, http_header, userInfo, today=True)\n checkin(session, http_header, userInfo, lesson)"
},
{
"identifier": "autoCheckin",
"path": "ch... | import json
import requests
import sys
from checkin import courseCheckin, autoCheckin
from checkinScanCode import scanCheckin
from courseQuery import courseQuery
from attendanceQuery import attendanceQuery
from courseReportQuery import CourseReportQuery | 2,210 |
serverUrl = "https://tingke.xmu.edu.cn/app"
serverImg = "https://tingke.xmu.edu.cn/uploadFile"
serverIcon = "https://tingke.xmu.edu.cn/images/icon"
serverPhoto = "https://tingke.xmu.edu.cn/photo"
serverPdf = "https://tingke.xmu.edu.cn/pdf/"
userInfo = json.load(open("userInfo.json", "r", encoding="utf-8"))
# print(userInfo)
http_header = {
"Host": "tingke.xmu.edu.cn",
"Content-Type": "application/x-www-form-urlencoded",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Accept": "*/*",
"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E217 MicroMessenger/6.8.0(0x16080000) NetType/WIFI Language/en Branch/Br_trunk MiniProgramEnv/Mac",
"Content-Length": "126",
"Accept-Language": "zh-CN,zh-Hans;q=0.9"
}
session = requests.Session()
while True:
print('')
print('------------------ 小鸾的智慧教务 ------------------')
print('1. 课程签到')
print('2. 扫码签到')
print('3. 课程自动签到')
print('4. 课程查询')
print('5. 学生出勤查询')
print('6. 课程举报查询')
print('0. 退出')
try:
choice = int(input('请选择:'))
if choice < 0 or choice > 6:
raise Exception
except:
print('输入错误,请重新输入')
continue
try:
if choice == 0:
break
if choice == 1:
courseCheckin(session, http_header, userInfo)
elif choice == 2:
|
serverUrl = "https://tingke.xmu.edu.cn/app"
serverImg = "https://tingke.xmu.edu.cn/uploadFile"
serverIcon = "https://tingke.xmu.edu.cn/images/icon"
serverPhoto = "https://tingke.xmu.edu.cn/photo"
serverPdf = "https://tingke.xmu.edu.cn/pdf/"
userInfo = json.load(open("userInfo.json", "r", encoding="utf-8"))
# print(userInfo)
http_header = {
"Host": "tingke.xmu.edu.cn",
"Content-Type": "application/x-www-form-urlencoded",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
"Accept": "*/*",
"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E217 MicroMessenger/6.8.0(0x16080000) NetType/WIFI Language/en Branch/Br_trunk MiniProgramEnv/Mac",
"Content-Length": "126",
"Accept-Language": "zh-CN,zh-Hans;q=0.9"
}
session = requests.Session()
while True:
print('')
print('------------------ 小鸾的智慧教务 ------------------')
print('1. 课程签到')
print('2. 扫码签到')
print('3. 课程自动签到')
print('4. 课程查询')
print('5. 学生出勤查询')
print('6. 课程举报查询')
print('0. 退出')
try:
choice = int(input('请选择:'))
if choice < 0 or choice > 6:
raise Exception
except:
print('输入错误,请重新输入')
continue
try:
if choice == 0:
break
if choice == 1:
courseCheckin(session, http_header, userInfo)
elif choice == 2: | scanCheckin(session, http_header, userInfo) | 2 | 2023-12-13 10:42:20+00:00 | 4k |
kurtnettle/wttrbarpy | wttrbarpy/__main__.py | [
{
"identifier": "Config",
"path": "wttrbarpy/config.py",
"snippet": "class Config:\n data: dict\n unit: str\n ampm: bool\n main_indicator: str\n custom_indicator: str\n format_type: int\n hour_text_only: bool\n plain_text: bool\n hide_wind_details: bool\n hide_conditions: b... | from argparse import ArgumentParser
from json import dumps, loads
from urllib.error import HTTPError
from urllib.parse import urlparse
from urllib.request import urlopen
from wttrbarpy.config import Config, build_config
from wttrbarpy.formats import format_text, format_tooltip | 2,207 | type=str,
default="temp_C",
help="decide which current_conditions key will be shown on waybar. defaults to temp_C",
)
parser.add_argument(
"--custom-indicator",
dest="custom_indicator",
type=str,
default=None,
help="customize the indicator. example: $temp_C",
)
parser.add_argument(
"--date-format",
dest="date_format",
type=str,
default="%A %b %d",
help="formats the date next to the days. defaults to %%A-%%b-%%d",
)
parser.add_argument(
"--hide-conditions",
action="store_true",
dest="hide_conditions",
help='hide extra conditions next to each hour description. like "20° Cloudy" instead of "20° Cloudy, Overcast 81%%, Sunshine 13%%". defaults to False',
)
parser.add_argument(
"--hide-wind-details",
action="store_true",
dest="hide_wind_details",
help="removes extra wind details (wind direction and degree). defaults to False",
)
parser.add_argument(
"--max-conditions",
dest="max_conditions",
type=int,
default=0,
help="limit the number of conditions to show next to each hour description. defaults to 0 (shows all available)",
)
parser.add_argument(
"--fahrenheit",
"-f",
action="store_true",
dest="fahrenheit",
help="use fahrenheit instead of celsius. defaults to False",
)
parser.add_argument(
"--vertical-view",
action="store_true",
dest="vertical_view",
help="shows the icon on the first line and temperature in a new line (doesn't work for custom-indicator). defaults to False",
)
parser.add_argument(
"--format-type",
dest="format_type",
type=int,
default=2,
help="specify the global output format type (1 only text, 2 only icon/emoji, 3 text with icon/emoji). defaults to 2",
)
parser.add_argument(
"--hour-text-only",
action="store_true",
dest="hour_text_only",
help="show hour as text only. defaults to False",
)
parser.add_argument(
"--emoji",
action="store_true",
dest="emoji",
help="replace icons with emojis. defaults to False",
)
parser.add_argument(
"--neutral-icon",
action="store_true",
dest="neutral_icon",
help="show neutral icon instead of daytime/nighttime icons. defaults to False",
)
parser.add_argument(
"--plain-text",
action="store_true",
dest="plain_text",
help="shows the plain text removing all pango markup tags and json output. defaults to False",
)
parser.add_argument(
"--show-temp-unit",
action="store_true",
dest="show_temp_unit",
help="show temperature value with unit like 20°C or 20°F. defaults to False",
)
parser.add_argument(
"--version",
action="version",
version="%(prog)s 1.0.0",
help="show wttrbarpy version.",
)
parser.add_argument(
"--debug",
action="store_true",
dest="debug_mode",
help="lets not spam wttr.in :)",
)
args = parser.parse_args()
api_url = "https://wttr.in/{}?format=j1".format(args.location)
if args.debug_mode:
api_url = "http://0.0.0.0:8000/{}.json?format=j1".format(args.location)
try:
with urlopen(api_url, timeout=60) as response:
resp = response.read()
data = loads(resp.decode())
except HTTPError as e:
output = {"text": "⚠️", "tooltip": str(e)}
print_json(output)
return
config = build_config(data, args)
output = {
"text": format_text(config=config),
|
def print_json(data: dict) -> None:
print(dumps(data, ensure_ascii=False))
def main() -> None:
parser = ArgumentParser(
prog="wttrbarpy",
description="a highly customizable weather module for Waybar",
allow_abbrev=False,
)
parser.add_argument(
"--ampm",
action="store_true",
dest="ampm",
help="show time in AM/PM format. defaults to False",
)
parser.add_argument(
"--location",
"-l",
dest="location",
type=str,
default="",
help="specify a location. defaults to None (i.e your current location)",
)
parser.add_argument(
"--main-indicator",
dest="main_indicator",
type=str,
default="temp_C",
help="decide which current_conditions key will be shown on waybar. defaults to temp_C",
)
parser.add_argument(
"--custom-indicator",
dest="custom_indicator",
type=str,
default=None,
help="customize the indicator. example: $temp_C",
)
parser.add_argument(
"--date-format",
dest="date_format",
type=str,
default="%A %b %d",
help="formats the date next to the days. defaults to %%A-%%b-%%d",
)
parser.add_argument(
"--hide-conditions",
action="store_true",
dest="hide_conditions",
help='hide extra conditions next to each hour description. like "20° Cloudy" instead of "20° Cloudy, Overcast 81%%, Sunshine 13%%". defaults to False',
)
parser.add_argument(
"--hide-wind-details",
action="store_true",
dest="hide_wind_details",
help="removes extra wind details (wind direction and degree). defaults to False",
)
parser.add_argument(
"--max-conditions",
dest="max_conditions",
type=int,
default=0,
help="limit the number of conditions to show next to each hour description. defaults to 0 (shows all available)",
)
parser.add_argument(
"--fahrenheit",
"-f",
action="store_true",
dest="fahrenheit",
help="use fahrenheit instead of celsius. defaults to False",
)
parser.add_argument(
"--vertical-view",
action="store_true",
dest="vertical_view",
help="shows the icon on the first line and temperature in a new line (doesn't work for custom-indicator). defaults to False",
)
parser.add_argument(
"--format-type",
dest="format_type",
type=int,
default=2,
help="specify the global output format type (1 only text, 2 only icon/emoji, 3 text with icon/emoji). defaults to 2",
)
parser.add_argument(
"--hour-text-only",
action="store_true",
dest="hour_text_only",
help="show hour as text only. defaults to False",
)
parser.add_argument(
"--emoji",
action="store_true",
dest="emoji",
help="replace icons with emojis. defaults to False",
)
parser.add_argument(
"--neutral-icon",
action="store_true",
dest="neutral_icon",
help="show neutral icon instead of daytime/nighttime icons. defaults to False",
)
parser.add_argument(
"--plain-text",
action="store_true",
dest="plain_text",
help="shows the plain text removing all pango markup tags and json output. defaults to False",
)
parser.add_argument(
"--show-temp-unit",
action="store_true",
dest="show_temp_unit",
help="show temperature value with unit like 20°C or 20°F. defaults to False",
)
parser.add_argument(
"--version",
action="version",
version="%(prog)s 1.0.0",
help="show wttrbarpy version.",
)
parser.add_argument(
"--debug",
action="store_true",
dest="debug_mode",
help="lets not spam wttr.in :)",
)
args = parser.parse_args()
api_url = "https://wttr.in/{}?format=j1".format(args.location)
if args.debug_mode:
api_url = "http://0.0.0.0:8000/{}.json?format=j1".format(args.location)
try:
with urlopen(api_url, timeout=60) as response:
resp = response.read()
data = loads(resp.decode())
except HTTPError as e:
output = {"text": "⚠️", "tooltip": str(e)}
print_json(output)
return
config = build_config(data, args)
output = {
"text": format_text(config=config), | "tooltip": format_tooltip(config=config), | 3 | 2023-12-08 19:59:06+00:00 | 4k |
camenduru/MotionDirector-hf | MotionDirector_inference_batch.py | [
{
"identifier": "export_to_video",
"path": "MotionDirector_train.py",
"snippet": "def export_to_video(video_frames, output_video_path, fps):\n video_writer = imageio.get_writer(output_video_path, fps=fps)\n for img in video_frames:\n video_writer.append_data(np.array(img))\n video_writer... | import argparse
import os
import platform
import re
import warnings
import torch
import random
import imageio
import decord
from typing import Optional
from diffusers import DDIMScheduler, TextToVideoSDPipeline
from einops import rearrange
from torch import Tensor
from torch.nn.functional import interpolate
from tqdm import trange
from MotionDirector_train import export_to_video, handle_memory_attention, load_primary_models, unet_and_text_g_c, freeze_models
from utils.lora_handler import LoraHandler
from utils.ddim_utils import ddim_inversion | 2,932 |
def initialize_pipeline(
model: str,
device: str = "cuda",
xformers: bool = False,
sdp: bool = False,
lora_path: str = "",
lora_rank: int = 64,
lora_scale: float = 1.0,
):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
scheduler, tokenizer, text_encoder, vae, unet = load_primary_models(model)
# Freeze any necessary models
freeze_models([vae, text_encoder, unet])
# Enable xformers if available
handle_memory_attention(xformers, sdp, unet)
lora_manager_temporal = LoraHandler(
version="cloneofsimo",
use_unet_lora=True,
use_text_lora=False,
save_for_webui=False,
only_for_webui=False,
unet_replace_modules=["TransformerTemporalModel"],
text_encoder_replace_modules=None,
lora_bias=None
)
unet_lora_params, unet_negation = lora_manager_temporal.add_lora_to_model(
True, unet, lora_manager_temporal.unet_replace_modules, 0, lora_path, r=lora_rank, scale=lora_scale)
unet.eval()
text_encoder.eval()
|
def initialize_pipeline(
model: str,
device: str = "cuda",
xformers: bool = False,
sdp: bool = False,
lora_path: str = "",
lora_rank: int = 64,
lora_scale: float = 1.0,
):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
scheduler, tokenizer, text_encoder, vae, unet = load_primary_models(model)
# Freeze any necessary models
freeze_models([vae, text_encoder, unet])
# Enable xformers if available
handle_memory_attention(xformers, sdp, unet)
lora_manager_temporal = LoraHandler(
version="cloneofsimo",
use_unet_lora=True,
use_text_lora=False,
save_for_webui=False,
only_for_webui=False,
unet_replace_modules=["TransformerTemporalModel"],
text_encoder_replace_modules=None,
lora_bias=None
)
unet_lora_params, unet_negation = lora_manager_temporal.add_lora_to_model(
True, unet, lora_manager_temporal.unet_replace_modules, 0, lora_path, r=lora_rank, scale=lora_scale)
unet.eval()
text_encoder.eval() | unet_and_text_g_c(unet, text_encoder, False, False) | 3 | 2023-12-11 04:51:39+00:00 | 4k |
Yingyue-L/Mamba-LLaVA | llava/model/llava_arch.py | [
{
"identifier": "build_vision_tower",
"path": "llava/model/multimodal_encoder/builder.py",
"snippet": "def build_vision_tower(vision_tower_cfg, **kwargs):\n vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))\n is_absolute_path_exists = os.p... | from abc import ABC, abstractmethod
from .multimodal_encoder.builder import build_vision_tower
from .multimodal_projector.builder import build_vision_projector
from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
import torch
import torch.nn as nn | 3,064 | # it is a headache to deal with None all the time.
# But it is not ideal, and if you have a better idea,
# please open an issue / submit a PR, thanks.
_labels = labels
_position_ids = position_ids
_attention_mask = attention_mask
if attention_mask is None:
attention_mask = torch.ones_like(input_ids, dtype=torch.bool)
else:
attention_mask = attention_mask.bool()
if position_ids is None:
position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device)
if labels is None:
labels = torch.full_like(input_ids, IGNORE_INDEX)
# remove the padding using attention_mask -- TODO: double check
input_ids = [cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask)]
labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)]
new_input_embeds = []
new_labels = []
cur_image_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids):
num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum()
if num_images == 0:
cur_image_features = image_features[cur_image_idx]
cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids)
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0]], dim=0)
new_input_embeds.append(cur_input_embeds)
new_labels.append(labels[batch_idx])
cur_image_idx += 1
continue
image_token_indices = [-1] + torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + [cur_input_ids.shape[0]]
cur_input_ids_noim = []
cur_labels = labels[batch_idx]
cur_labels_noim = []
for i in range(len(image_token_indices) - 1):
cur_input_ids_noim.append(cur_input_ids[image_token_indices[i]+1:image_token_indices[i+1]])
cur_labels_noim.append(cur_labels[image_token_indices[i]+1:image_token_indices[i+1]])
split_sizes = [x.shape[0] for x in cur_labels_noim]
cur_input_embeds = self.get_model().embed_tokens(torch.cat(cur_input_ids_noim))
cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0)
cur_new_input_embeds = []
cur_new_labels = []
for i in range(num_images + 1):
cur_new_input_embeds.append(cur_input_embeds_no_im[i])
cur_new_labels.append(cur_labels_noim[i])
if i < num_images:
cur_image_features = image_features[cur_image_idx]
cur_image_idx += 1
cur_new_input_embeds.append(cur_image_features)
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype))
cur_new_input_embeds = torch.cat(cur_new_input_embeds)
cur_new_labels = torch.cat(cur_new_labels)
new_input_embeds.append(cur_new_input_embeds)
new_labels.append(cur_new_labels)
# Truncate sequences to max length as image embeddings can make the sequence longer
tokenizer_model_max_length = getattr(self.config, 'tokenizer_model_max_length', None)
if tokenizer_model_max_length is not None:
new_input_embeds = [x[:tokenizer_model_max_length] for x in new_input_embeds]
new_labels = [x[:tokenizer_model_max_length] for x in new_labels]
# Combine them
max_len = max(x.shape[0] for x in new_input_embeds)
batch_size = len(new_input_embeds)
new_input_embeds_padded = []
new_labels_padded = torch.full((batch_size, max_len), IGNORE_INDEX, dtype=new_labels[0].dtype, device=new_labels[0].device)
attention_mask = torch.zeros((batch_size, max_len), dtype=attention_mask.dtype, device=attention_mask.device)
position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device)
for i, (cur_new_embed, cur_new_labels) in enumerate(zip(new_input_embeds, new_labels)):
cur_len = cur_new_embed.shape[0]
if getattr(self.config, 'tokenizer_padding_side', 'right') == "left":
new_input_embeds_padded.append(torch.cat((
torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device),
cur_new_embed
), dim=0))
if cur_len > 0:
new_labels_padded[i, -cur_len:] = cur_new_labels
attention_mask[i, -cur_len:] = True
position_ids[i, -cur_len:] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
else:
new_input_embeds_padded.append(torch.cat((
cur_new_embed,
torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)
), dim=0))
if cur_len > 0:
new_labels_padded[i, :cur_len] = cur_new_labels
attention_mask[i, :cur_len] = True
position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
new_input_embeds = torch.stack(new_input_embeds_padded, dim=0)
if _labels is None:
new_labels = None
else:
new_labels = new_labels_padded
if _attention_mask is None:
attention_mask = None
else:
attention_mask = attention_mask.to(dtype=_attention_mask.dtype)
if _position_ids is None:
position_ids = None
return None, position_ids, attention_mask, past_key_values, new_input_embeds, new_labels
def initialize_vision_tokenizer(self, model_args, tokenizer):
if model_args.mm_use_im_patch_token:
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
self.resize_token_embeddings(len(tokenizer))
if model_args.mm_use_im_start_end:
| # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LlavaMetaModel:
def __init__(self, config):
super(LlavaMetaModel, self).__init__(config)
if hasattr(config, "mm_vision_tower"):
self.vision_tower = build_vision_tower(config, delay_load=True)
self.mm_projector = build_vision_projector(config)
def get_vision_tower(self):
vision_tower = getattr(self, 'vision_tower', None)
if type(vision_tower) is list:
vision_tower = vision_tower[0]
return vision_tower
def initialize_vision_modules(self, model_args, fsdp=None):
vision_tower = model_args.vision_tower
mm_vision_select_layer = model_args.mm_vision_select_layer
mm_vision_select_feature = model_args.mm_vision_select_feature
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
self.config.mm_vision_tower = vision_tower
if self.get_vision_tower() is None:
vision_tower = build_vision_tower(model_args)
if fsdp is not None and len(fsdp) > 0:
self.vision_tower = [vision_tower]
else:
self.vision_tower = vision_tower
else:
if fsdp is not None and len(fsdp) > 0:
vision_tower = self.vision_tower[0]
else:
vision_tower = self.vision_tower
vision_tower.load_model()
self.config.use_mm_proj = True
self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')
self.config.mm_hidden_size = vision_tower.hidden_size
self.config.mm_vision_select_layer = mm_vision_select_layer
self.config.mm_vision_select_feature = mm_vision_select_feature
if getattr(self, 'mm_projector', None) is None:
self.mm_projector = build_vision_projector(self.config)
else:
# In case it is frozen by LoRA
for p in self.mm_projector.parameters():
p.requires_grad = True
if pretrain_mm_mlp_adapter is not None:
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
def get_w(weights, keyword):
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
class LlavaMetaForCausalLM(ABC):
@abstractmethod
def get_model(self):
pass
def get_vision_tower(self):
return self.get_model().get_vision_tower()
def encode_images(self, images):
image_features = self.get_model().get_vision_tower()(images)
image_features = self.get_model().mm_projector(image_features)
return image_features
def prepare_inputs_labels_for_multimodal(
self, input_ids, position_ids, attention_mask, past_key_values, labels, images
):
vision_tower = self.get_vision_tower()
if vision_tower is None or images is None or input_ids.shape[1] == 1:
if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:
target_shape = past_key_values[-1][-1].shape[-2] + 1
attention_mask = torch.cat((attention_mask, torch.ones(
(attention_mask.shape[0], target_shape - attention_mask.shape[1]),
dtype=attention_mask.dtype,
device=attention_mask.device
)), dim=1)
position_ids = torch.sum(attention_mask, dim=1).unsqueeze(-1) - 1
return input_ids, position_ids, attention_mask, past_key_values, None, labels
if type(images) is list or images.ndim == 5:
concat_images = torch.cat([image for image in images], dim=0)
image_features = self.encode_images(concat_images)
split_sizes = [image.shape[0] for image in images]
image_features = torch.split(image_features, split_sizes, dim=0)
image_features = [x.flatten(0, 1).to(self.device) for x in image_features]
else:
image_features = self.encode_images(images).to(self.device)
# TODO: image start / end is not implemented here to support pretraining.
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
raise NotImplementedError
# Let's just add dummy tensors if they do not exist,
# it is a headache to deal with None all the time.
# But it is not ideal, and if you have a better idea,
# please open an issue / submit a PR, thanks.
_labels = labels
_position_ids = position_ids
_attention_mask = attention_mask
if attention_mask is None:
attention_mask = torch.ones_like(input_ids, dtype=torch.bool)
else:
attention_mask = attention_mask.bool()
if position_ids is None:
position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device)
if labels is None:
labels = torch.full_like(input_ids, IGNORE_INDEX)
# remove the padding using attention_mask -- TODO: double check
input_ids = [cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask)]
labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)]
new_input_embeds = []
new_labels = []
cur_image_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids):
num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum()
if num_images == 0:
cur_image_features = image_features[cur_image_idx]
cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids)
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0]], dim=0)
new_input_embeds.append(cur_input_embeds)
new_labels.append(labels[batch_idx])
cur_image_idx += 1
continue
image_token_indices = [-1] + torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + [cur_input_ids.shape[0]]
cur_input_ids_noim = []
cur_labels = labels[batch_idx]
cur_labels_noim = []
for i in range(len(image_token_indices) - 1):
cur_input_ids_noim.append(cur_input_ids[image_token_indices[i]+1:image_token_indices[i+1]])
cur_labels_noim.append(cur_labels[image_token_indices[i]+1:image_token_indices[i+1]])
split_sizes = [x.shape[0] for x in cur_labels_noim]
cur_input_embeds = self.get_model().embed_tokens(torch.cat(cur_input_ids_noim))
cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0)
cur_new_input_embeds = []
cur_new_labels = []
for i in range(num_images + 1):
cur_new_input_embeds.append(cur_input_embeds_no_im[i])
cur_new_labels.append(cur_labels_noim[i])
if i < num_images:
cur_image_features = image_features[cur_image_idx]
cur_image_idx += 1
cur_new_input_embeds.append(cur_image_features)
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=cur_labels.device, dtype=cur_labels.dtype))
cur_new_input_embeds = torch.cat(cur_new_input_embeds)
cur_new_labels = torch.cat(cur_new_labels)
new_input_embeds.append(cur_new_input_embeds)
new_labels.append(cur_new_labels)
# Truncate sequences to max length as image embeddings can make the sequence longer
tokenizer_model_max_length = getattr(self.config, 'tokenizer_model_max_length', None)
if tokenizer_model_max_length is not None:
new_input_embeds = [x[:tokenizer_model_max_length] for x in new_input_embeds]
new_labels = [x[:tokenizer_model_max_length] for x in new_labels]
# Combine them
max_len = max(x.shape[0] for x in new_input_embeds)
batch_size = len(new_input_embeds)
new_input_embeds_padded = []
new_labels_padded = torch.full((batch_size, max_len), IGNORE_INDEX, dtype=new_labels[0].dtype, device=new_labels[0].device)
attention_mask = torch.zeros((batch_size, max_len), dtype=attention_mask.dtype, device=attention_mask.device)
position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device)
for i, (cur_new_embed, cur_new_labels) in enumerate(zip(new_input_embeds, new_labels)):
cur_len = cur_new_embed.shape[0]
if getattr(self.config, 'tokenizer_padding_side', 'right') == "left":
new_input_embeds_padded.append(torch.cat((
torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device),
cur_new_embed
), dim=0))
if cur_len > 0:
new_labels_padded[i, -cur_len:] = cur_new_labels
attention_mask[i, -cur_len:] = True
position_ids[i, -cur_len:] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
else:
new_input_embeds_padded.append(torch.cat((
cur_new_embed,
torch.zeros((max_len - cur_len, cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)
), dim=0))
if cur_len > 0:
new_labels_padded[i, :cur_len] = cur_new_labels
attention_mask[i, :cur_len] = True
position_ids[i, :cur_len] = torch.arange(0, cur_len, dtype=position_ids.dtype, device=position_ids.device)
new_input_embeds = torch.stack(new_input_embeds_padded, dim=0)
if _labels is None:
new_labels = None
else:
new_labels = new_labels_padded
if _attention_mask is None:
attention_mask = None
else:
attention_mask = attention_mask.to(dtype=_attention_mask.dtype)
if _position_ids is None:
position_ids = None
return None, position_ids, attention_mask, past_key_values, new_input_embeds, new_labels
def initialize_vision_tokenizer(self, model_args, tokenizer):
if model_args.mm_use_im_patch_token:
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
self.resize_token_embeddings(len(tokenizer))
if model_args.mm_use_im_start_end: | num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True) | 5 | 2023-12-09 09:39:13+00:00 | 4k |
Theia-4869/MoSA | src/engine/trainer.py | [
{
"identifier": "Evaluator",
"path": "src/engine/evaluator.py",
"snippet": "class Evaluator():\n \"\"\"\n An evaluator with below logics:\n\n 1. find which eval module to use.\n 2. store the eval results, pretty print it in log file as well.\n \"\"\"\n\n def __init__(\n self,\n ... | import datetime
import time
import torch
import torch.nn as nn
import os
import shutil
import random
import wandb
from fvcore.common.config import CfgNode
from fvcore.common.checkpoint import Checkpointer
from ..engine.evaluator import Evaluator
from ..solver.lr_scheduler import make_scheduler
from ..solver.optimizer import make_optimizer
from ..solver.losses import build_loss, symmetric_KL_loss, deepreg_MSE_loss
from ..utils import logging
from ..utils.train_utils import AverageMeter, gpu_mem_usage | 3,228 | #!/usr/bin/env python3
"""
a trainer class
"""
logger = logging.get_logger("MOSA")
class Trainer():
"""
a trainer with below logics:
1. Build optimizer, scheduler
2. Load checkpoints if provided
3. Train and eval at each epoch
"""
def __init__(
self,
cfg: CfgNode,
args,
model: nn.Module,
evaluator: Evaluator,
device: torch.device,
) -> None:
self.cfg = cfg
self.args = args
self.model = model
self.device = device
# solver related
logger.info("Setting up the optimizer...")
self.optimizer = make_optimizer([self.model], cfg.SOLVER)
| #!/usr/bin/env python3
"""
a trainer class
"""
logger = logging.get_logger("MOSA")
class Trainer():
"""
a trainer with below logics:
1. Build optimizer, scheduler
2. Load checkpoints if provided
3. Train and eval at each epoch
"""
def __init__(
self,
cfg: CfgNode,
args,
model: nn.Module,
evaluator: Evaluator,
device: torch.device,
) -> None:
self.cfg = cfg
self.args = args
self.model = model
self.device = device
# solver related
logger.info("Setting up the optimizer...")
self.optimizer = make_optimizer([self.model], cfg.SOLVER) | self.scheduler = make_scheduler(self.optimizer, cfg.SOLVER) | 1 | 2023-12-06 07:50:16+00:00 | 4k |
khwong-c/syn-magia | magia/std/bundles.py | [
{
"identifier": "IOBundle",
"path": "magia/bundle.py",
"snippet": "class IOBundle:\n \"\"\"\n Define a bundle of I/O, which can be used as the input or output of a module.\n An IOBundle can be added with Input and Output.\n However, the bundle cannot be used as normal signals.\n The actua... | from typing import Union
from magia import Input, IOBundle, Output | 2,399 |
class StdIO:
@classmethod
def valid_multi(cls, bundle_name: str, data_spec: dict[str, Union[tuple[int, bool], int]], sep="_") -> IOBundle:
if not isinstance(data_spec, dict):
raise TypeError("data_spec must be a dict")
if len(data_spec) == 0:
raise ValueError("data_spec must not be empty")
new_bundle = IOBundle()
for name, spec in data_spec.items():
if isinstance(spec, tuple):
width, signed = spec
else:
width, signed = spec, False
|
class StdIO:
@classmethod
def valid_multi(cls, bundle_name: str, data_spec: dict[str, Union[tuple[int, bool], int]], sep="_") -> IOBundle:
if not isinstance(data_spec, dict):
raise TypeError("data_spec must be a dict")
if len(data_spec) == 0:
raise ValueError("data_spec must not be empty")
new_bundle = IOBundle()
for name, spec in data_spec.items():
if isinstance(spec, tuple):
width, signed = spec
else:
width, signed = spec, False | new_bundle += Output(f"{name}", width, signed) | 2 | 2023-12-12 22:50:43+00:00 | 4k |
IBM/AI-assisted-chemical-sensing | src/chemsense/vision/cli/few_shot_analysis.py | [
{
"identifier": "setup_basic_logging_for_scripts",
"path": "src/chemsense/vision/logging_configuration.py",
"snippet": "def setup_basic_logging_for_scripts() -> None:\n \"\"\"Setup basic stdout logging for scripts.\"\"\"\n logging.basicConfig(\n stream=sys.stdout,\n level=logging.INF... | import os
import random
import click
import numpy as np
import pandas as pd
import torch.utils.data
from copy import deepcopy
from pathlib import Path
from typing import Dict, List
from sklearn.decomposition import PCA
from sklearn.metrics import accuracy_score
from torchvision import datasets, transforms
from ..logging_configuration import setup_basic_logging_for_scripts
from ..modeling.classification import CLASSIFICATION_HEADS
from ..modeling.encoders import ENCODERS_REGISTRY
| 1,614 | """Train and test models with few shots and image augmentation."""
__copyright__ = """
LICENSED INTERNAL CODE. PROPERTY OF IBM.
IBM Research Licensed Internal Code
(C) Copyright IBM Corp. 2023
ALL RIGHTS RESERVED
"""
num_images = int(os.getenv("NUMBER_OF_IMAGES", 50))
num_rep = int(os.getenv("NUMBER_OF_REPEATS", 50))
@click.command()
@click.option("--task", type=str, default="red_wines", help="Dataset name identifier.")
@click.option(
"--n_comp",
type=int,
default=10,
help="Number of principal components to be used as predictors.",
)
@click.option(
"--mix_ratio",
type=float,
default=0.95,
help="Fraction of pixel intensity for image mixing and data augmentation. Needs to be between 0 and 1.",
)
@click.option(
"--batch_size",
type=int,
default=10,
help="Batch size for image loading and processing.",
)
@click.option(
"--data_path",
required=True,
type=click.Path(path_type=Path, exists=True),
help="Path to image directory.",
)
@click.option(
"--output_path",
required=True,
type=click.Path(path_type=Path),
help="Path to save classification model validation results.",
)
def main(
task: str,
n_comp: int,
mix_ratio: float,
batch_size: int,
data_path: Path,
output_path: Path,
) -> None:
setup_basic_logging_for_scripts()
w_class = mix_ratio
w_other = 1 - w_class
data_path = Path.joinpath(data_path, task)
data_transforms = transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.ToTensor(),
]
)
dataset = datasets.ImageFolder(data_path, transform=data_transforms)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False
)
class_names = np.array(dataset.classes)
Path(output_path).mkdir(exist_ok=True)
result_path = Path.joinpath(output_path, task)
| """Train and test models with few shots and image augmentation."""
__copyright__ = """
LICENSED INTERNAL CODE. PROPERTY OF IBM.
IBM Research Licensed Internal Code
(C) Copyright IBM Corp. 2023
ALL RIGHTS RESERVED
"""
num_images = int(os.getenv("NUMBER_OF_IMAGES", 50))
num_rep = int(os.getenv("NUMBER_OF_REPEATS", 50))
@click.command()
@click.option("--task", type=str, default="red_wines", help="Dataset name identifier.")
@click.option(
"--n_comp",
type=int,
default=10,
help="Number of principal components to be used as predictors.",
)
@click.option(
"--mix_ratio",
type=float,
default=0.95,
help="Fraction of pixel intensity for image mixing and data augmentation. Needs to be between 0 and 1.",
)
@click.option(
"--batch_size",
type=int,
default=10,
help="Batch size for image loading and processing.",
)
@click.option(
"--data_path",
required=True,
type=click.Path(path_type=Path, exists=True),
help="Path to image directory.",
)
@click.option(
"--output_path",
required=True,
type=click.Path(path_type=Path),
help="Path to save classification model validation results.",
)
def main(
task: str,
n_comp: int,
mix_ratio: float,
batch_size: int,
data_path: Path,
output_path: Path,
) -> None:
setup_basic_logging_for_scripts()
w_class = mix_ratio
w_other = 1 - w_class
data_path = Path.joinpath(data_path, task)
data_transforms = transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.ToTensor(),
]
)
dataset = datasets.ImageFolder(data_path, transform=data_transforms)
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False
)
class_names = np.array(dataset.classes)
Path(output_path).mkdir(exist_ok=True)
result_path = Path.joinpath(output_path, task)
| model_heads = CLASSIFICATION_HEADS.keys()
| 1 | 2023-12-05 15:56:12+00:00 | 4k |
batmanlab/DrasCLR | extract_feature.py | [
{
"identifier": "Encoder",
"path": "models/cnn3d.py",
"snippet": "class Encoder(nn.Module):\n\n def __init__(self, rep_dim, moco_dim, num_experts, num_coordinates):\n super(Encoder, self).__init__()\n self.rep_dim = rep_dim\n self.moco_dim = moco_dim\n self.num_experts = n... | import os
import argparse
import json
import random
import numpy as np
import torch
from easydict import EasyDict as edict
from tqdm import tqdm
from models.cnn3d import Encoder
from data.copd_patch import COPD_dataset | 3,143 |
parser = argparse.ArgumentParser(description='Extract 3D Images Representations')
parser.add_argument('--exp-name', default='./ssl_exp/exp_neighbor_0_128')
parser.add_argument('--checkpoint-patch', default='checkpoint_patch_0001.pth.tar')
parser.add_argument('--batch-size', type=int, default=1)
def main():
# read configurations
p = parser.parse_args()
patch_epoch = p.checkpoint_patch.split('.')[0][-4:]
with open(os.path.join(p.exp_name, 'configs.json')) as f:
args = edict(json.load(f))
args.checkpoint = os.path.join(p.exp_name, p.checkpoint_patch)
args.batch_size = p.batch_size
args.patch_rep_dir = os.path.join(p.exp_name, 'patch_rep', patch_epoch)
os.makedirs(args.patch_rep_dir, exist_ok=True)
# Set random seed
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.benchmark = True
main_worker(args)
def main_worker(args):
#args.gpu = 0
#torch.cuda.set_device(args.gpu)
# create patch-level encoder
|
parser = argparse.ArgumentParser(description='Extract 3D Images Representations')
parser.add_argument('--exp-name', default='./ssl_exp/exp_neighbor_0_128')
parser.add_argument('--checkpoint-patch', default='checkpoint_patch_0001.pth.tar')
parser.add_argument('--batch-size', type=int, default=1)
def main():
# read configurations
p = parser.parse_args()
patch_epoch = p.checkpoint_patch.split('.')[0][-4:]
with open(os.path.join(p.exp_name, 'configs.json')) as f:
args = edict(json.load(f))
args.checkpoint = os.path.join(p.exp_name, p.checkpoint_patch)
args.batch_size = p.batch_size
args.patch_rep_dir = os.path.join(p.exp_name, 'patch_rep', patch_epoch)
os.makedirs(args.patch_rep_dir, exist_ok=True)
# Set random seed
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.benchmark = True
main_worker(args)
def main_worker(args):
#args.gpu = 0
#torch.cuda.set_device(args.gpu)
# create patch-level encoder | model_patch = Encoder(rep_dim=args.rep_dim_patch, moco_dim=args.moco_dim_patch, num_experts=args.num_experts, num_coordinates=args.num_coordinates) | 0 | 2023-12-09 02:33:53+00:00 | 4k |
casiatao/PAD | detection/detectron2/modeling/backbone/vit_adapt.py | [
{
"identifier": "Backbone",
"path": "detection/detectron2/modeling/backbone/backbone.py",
"snippet": "class Backbone(nn.Module, metaclass=ABCMeta):\n \"\"\"\n Abstract base class for network backbones.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n The `__init__` method of any sub... | import logging
import math
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn as nn
import torch.nn.functional as F
from detectron2.layers import CNNBlockBase, Conv2d, get_norm
from detectron2.modeling.backbone.fpn import _assert_strides_are_log2_contiguous
from .backbone import Backbone
from .utils import (
PatchEmbed,
add_decomposed_rel_pos,
get_abs_pos,
window_partition,
window_unpartition,
)
from timm.models.layers import DropPath, Mlp
from fairscale.nn.checkpoint import checkpoint_wrapper | 3,319 |
logger = logging.getLogger(__name__)
__all__ = ["ViT_adapt", "SimpleFeaturePyramid", "get_vit_lr_decay_rate"]
class Adapter(nn.Module):
def __init__(self,
d_model,
down_size = 64,
dropout=0.0,
adapter_scalar="frozen",
init_value="0.0",
adapter_layernorm_option="in",
patch_wise_scalar=False):
super().__init__()
self.n_embd = d_model
self.down_size = down_size
#_before
self.adapter_layernorm_option = adapter_layernorm_option
self.adapter_layer_norm_before = None
if adapter_layernorm_option == "in" or adapter_layernorm_option == "out":
self.adapter_layer_norm_before = nn.LayerNorm(self.n_embd)
self.patch_wise_scalar = patch_wise_scalar
if patch_wise_scalar:
self.scale = None
else:
if adapter_scalar == "learnable_scalar":
self.scale = nn.Parameter(torch.ones(1) * 0.5)
else:
if init_value != "0.0":
self.scale = float(init_value)
else:
self.register_buffer('scale', torch.ones(1) * 0.5)
self.down_proj = nn.Linear(self.n_embd, self.down_size)
self.non_linear_func = nn.ReLU()
self.up_proj = nn.Linear(self.down_size, self.n_embd)
self.dropout = dropout
def forward(self, x, add_residual=False, residual=None):
residual = x if residual is None else residual
if self.adapter_layernorm_option == 'in':
x = self.adapter_layer_norm_before(x)
down = self.down_proj(x)
down = self.non_linear_func(down)
down = nn.functional.dropout(down, p=self.dropout, training=self.training)
up = self.up_proj(down)
if add_residual:
output = up + residual
else:
output = up
return output, self.scale
class Attention(nn.Module):
"""Multi-head Attention block with relative position embeddings."""
def __init__(
self,
dim,
num_heads=8,
qkv_bias=True,
use_rel_pos=False,
rel_pos_zero_init=True,
input_size=None,
):
"""
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
qkv_bias (bool: If True, add a learnable bias to query, key, value.
rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
input_size (int or None): Input resolution for calculating the relative positional
parameter size.
"""
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.use_rel_pos = use_rel_pos
if self.use_rel_pos:
# initialize relative positional embeddings
self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
if not rel_pos_zero_init:
nn.init.trunc_normal_(self.rel_pos_h, std=0.02)
nn.init.trunc_normal_(self.rel_pos_w, std=0.02)
def forward(self, x):
B, H, W, _ = x.shape
# qkv with shape (3, B, nHead, H * W, C)
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
# q, k, v with shape (B * nHead, H * W, C)
q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
attn = (q * self.scale) @ k.transpose(-2, -1)
if self.use_rel_pos:
|
logger = logging.getLogger(__name__)
__all__ = ["ViT_adapt", "SimpleFeaturePyramid", "get_vit_lr_decay_rate"]
class Adapter(nn.Module):
def __init__(self,
d_model,
down_size = 64,
dropout=0.0,
adapter_scalar="frozen",
init_value="0.0",
adapter_layernorm_option="in",
patch_wise_scalar=False):
super().__init__()
self.n_embd = d_model
self.down_size = down_size
#_before
self.adapter_layernorm_option = adapter_layernorm_option
self.adapter_layer_norm_before = None
if adapter_layernorm_option == "in" or adapter_layernorm_option == "out":
self.adapter_layer_norm_before = nn.LayerNorm(self.n_embd)
self.patch_wise_scalar = patch_wise_scalar
if patch_wise_scalar:
self.scale = None
else:
if adapter_scalar == "learnable_scalar":
self.scale = nn.Parameter(torch.ones(1) * 0.5)
else:
if init_value != "0.0":
self.scale = float(init_value)
else:
self.register_buffer('scale', torch.ones(1) * 0.5)
self.down_proj = nn.Linear(self.n_embd, self.down_size)
self.non_linear_func = nn.ReLU()
self.up_proj = nn.Linear(self.down_size, self.n_embd)
self.dropout = dropout
def forward(self, x, add_residual=False, residual=None):
residual = x if residual is None else residual
if self.adapter_layernorm_option == 'in':
x = self.adapter_layer_norm_before(x)
down = self.down_proj(x)
down = self.non_linear_func(down)
down = nn.functional.dropout(down, p=self.dropout, training=self.training)
up = self.up_proj(down)
if add_residual:
output = up + residual
else:
output = up
return output, self.scale
class Attention(nn.Module):
"""Multi-head Attention block with relative position embeddings."""
def __init__(
self,
dim,
num_heads=8,
qkv_bias=True,
use_rel_pos=False,
rel_pos_zero_init=True,
input_size=None,
):
"""
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
qkv_bias (bool: If True, add a learnable bias to query, key, value.
rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
input_size (int or None): Input resolution for calculating the relative positional
parameter size.
"""
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.use_rel_pos = use_rel_pos
if self.use_rel_pos:
# initialize relative positional embeddings
self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
if not rel_pos_zero_init:
nn.init.trunc_normal_(self.rel_pos_h, std=0.02)
nn.init.trunc_normal_(self.rel_pos_w, std=0.02)
def forward(self, x):
B, H, W, _ = x.shape
# qkv with shape (3, B, nHead, H * W, C)
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
# q, k, v with shape (B * nHead, H * W, C)
q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
attn = (q * self.scale) @ k.transpose(-2, -1)
if self.use_rel_pos: | attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W)) | 2 | 2023-12-13 13:14:36+00:00 | 4k |
pymike00/tinychat | tinychat/ui/chat.py | [
{
"identifier": "FONT_FAMILY",
"path": "tinychat/settings.py",
"snippet": "FONT_FAMILY = \"Verdana\" # Consolas"
},
{
"identifier": "MAIN_WINDOW_RESOLUTION",
"path": "tinychat/settings.py",
"snippet": "MAIN_WINDOW_RESOLUTION = \"1200x700\""
},
{
"identifier": "MAIN_WINDOW_TITLE"... | import os
import threading
import tkinter as tk
import customtkinter as ctk
from tkinter import PhotoImage
from tinychat.settings import FONT_FAMILY, MAIN_WINDOW_RESOLUTION, MAIN_WINDOW_TITLE
from tinychat.settings import get_icon_path
from tinychat.ui.frames import SettingsFrame | 2,009 |
class ChatApp(ctk.CTk):
def __init__(self, backend) -> None:
super().__init__()
self.set_icon()
self.model_name = ""
# Initialize font object to use with the chat text areas
|
class ChatApp(ctk.CTk):
def __init__(self, backend) -> None:
super().__init__()
self.set_icon()
self.model_name = ""
# Initialize font object to use with the chat text areas | chat_font = ctk.CTkFont(family=FONT_FAMILY, size=14) | 0 | 2023-12-11 20:40:02+00:00 | 4k |
nickruggeri/hypergraph-message-passing | test/model/test_numerical.py | [
{
"identifier": "approx_log_factorial",
"path": "src/model/numerical.py",
"snippet": "def approx_log_factorial(a: int | float) -> float:\n \"\"\"Compute :math::`\\log(a!)` utilizing a Ramanujan approximation, see\n https://math.stackexchange.com/questions/152342/ramanujans-approximation-to-factori... | import itertools
import numpy as np
import pytest
from scipy import sparse, special
from src.model.numerical import (
approx_log_factorial,
log_binomial_coefficient,
log_factorial,
sparse_reduce_lse,
) | 1,938 |
########################################################################################
# Test sparse_reduce_lse
# Some arbitrary matrices created by hand.
matrix_list = [
[
np.array([[1, 1, 0], [-1, 0, 0], [2, 0, 2]]),
np.array([[10, 1, 0], [-1, 0, 0], [3, 0, 1]]),
],
[
np.array([[-5, 0, 0, 0], [2, 3, 4, 5], [1.3, -5.1, 0, 1]]),
np.array([[2, 0, 0, 0], [1, 2, -1.1, 1.3], [1.3, -5.1, 0, 1]]),
np.array([[-1.1, 0, 0, 0], [3, 3, 5, 0.3], [-2.1, 2.0, 0, 1]]),
],
[
np.array([[10, 0, 100, 400], [-1, 0.3, 0, 1000], [0, 0, 0, 0]]),
np.array([[100, 0, -100, 123], [-40, 10, 0, 1100], [0, 0, 0, 0]]),
np.array([[102, 0, -97, 133], [-33, 11, 0, 900], [0, 0, 0, 0]]),
],
[
np.array([[10, 0, 100, 400], [-1, 0.3, 0, 1000], [2, 3, 4, 5]]),
np.array([[100, 0, -100, 123], [-40, 10, 0, 1100], [-1, 2, 3, 5]]),
np.array([[102, 0, -97, 133], [-33, 11, 0, 900], [1, 1, 1, 1]]),
np.array([[-2.7, 0, 33, 133], [-33, 11, 0, 900], [1, 1, 1, 1]]),
np.array([[-2.7, 0, 33, 133], [-33, 11, 0, 900], [1, 1, 1, 1]]),
],
]
# Some additional random matrices.
def generate_random_matrices_with_same_non_zeros(rng, shape, n, scale, sparsity):
zero_idx = rng.random(shape) > sparsity
matrices = [rng.random(shape) * scale for _ in range(n)]
for mat in matrices:
mat[zero_idx] = 0
return matrices
rng = np.random.default_rng(seed=123)
shapes = [
(10, 3),
(100, 4),
(1000, 50),
]
scales = [1, 10, 100]
n_matrices = [5, 10, 20]
sparsity_vals = [0.1, 0.5, 0.9]
matrix_list += [
generate_random_matrices_with_same_non_zeros(rng, shape, n, scale, sparsity)
for scale in scales
for shape in shapes
for n in n_matrices
for sparsity in sparsity_vals
]
@pytest.fixture(params=[sparse.csc_matrix, sparse.csr_matrix])
def sparsity_type(request):
return request.param
@pytest.fixture(params=range(len(matrix_list)))
def sparse_and_dense_matrices(sparsity_type, request):
matrices = matrix_list[request.param]
sparse_mat = [sparsity_type(mat) for mat in matrices]
return matrices, sparse_mat, sparsity_type
@pytest.fixture
def sparse_and_dense_matrices_and_lse(sparse_and_dense_matrices):
matrices, sparse_mat, sparsity_type = sparse_and_dense_matrices
lse = sparse_reduce_lse(*sparse_mat)
return matrices, sparse_mat, sparsity_type, lse
def test_reduce_sparse_lse_type(sparse_and_dense_matrices_and_lse):
_, _, sparsity_type, lse = sparse_and_dense_matrices_and_lse
assert isinstance(lse, sparsity_type)
def test_reduce_sparse_lse_with_dense(sparse_and_dense_matrices_and_lse):
matrices, sparse_mat, sparsity_type, lse = sparse_and_dense_matrices_and_lse
dense_lse = special.logsumexp(np.stack(matrices, axis=2), axis=2)
dense_lse[matrices[0] == 0] = 0
assert np.all(dense_lse == lse)
########################################################################################
# Test log_factorial, approx_log_factorial and log_binomial_coefficient
@pytest.mark.parametrize("a", range(100))
def test_stirling_approx_against_log_factorial(a):
|
########################################################################################
# Test sparse_reduce_lse
# Some arbitrary matrices created by hand.
matrix_list = [
[
np.array([[1, 1, 0], [-1, 0, 0], [2, 0, 2]]),
np.array([[10, 1, 0], [-1, 0, 0], [3, 0, 1]]),
],
[
np.array([[-5, 0, 0, 0], [2, 3, 4, 5], [1.3, -5.1, 0, 1]]),
np.array([[2, 0, 0, 0], [1, 2, -1.1, 1.3], [1.3, -5.1, 0, 1]]),
np.array([[-1.1, 0, 0, 0], [3, 3, 5, 0.3], [-2.1, 2.0, 0, 1]]),
],
[
np.array([[10, 0, 100, 400], [-1, 0.3, 0, 1000], [0, 0, 0, 0]]),
np.array([[100, 0, -100, 123], [-40, 10, 0, 1100], [0, 0, 0, 0]]),
np.array([[102, 0, -97, 133], [-33, 11, 0, 900], [0, 0, 0, 0]]),
],
[
np.array([[10, 0, 100, 400], [-1, 0.3, 0, 1000], [2, 3, 4, 5]]),
np.array([[100, 0, -100, 123], [-40, 10, 0, 1100], [-1, 2, 3, 5]]),
np.array([[102, 0, -97, 133], [-33, 11, 0, 900], [1, 1, 1, 1]]),
np.array([[-2.7, 0, 33, 133], [-33, 11, 0, 900], [1, 1, 1, 1]]),
np.array([[-2.7, 0, 33, 133], [-33, 11, 0, 900], [1, 1, 1, 1]]),
],
]
# Some additional random matrices.
def generate_random_matrices_with_same_non_zeros(rng, shape, n, scale, sparsity):
zero_idx = rng.random(shape) > sparsity
matrices = [rng.random(shape) * scale for _ in range(n)]
for mat in matrices:
mat[zero_idx] = 0
return matrices
rng = np.random.default_rng(seed=123)
shapes = [
(10, 3),
(100, 4),
(1000, 50),
]
scales = [1, 10, 100]
n_matrices = [5, 10, 20]
sparsity_vals = [0.1, 0.5, 0.9]
matrix_list += [
generate_random_matrices_with_same_non_zeros(rng, shape, n, scale, sparsity)
for scale in scales
for shape in shapes
for n in n_matrices
for sparsity in sparsity_vals
]
@pytest.fixture(params=[sparse.csc_matrix, sparse.csr_matrix])
def sparsity_type(request):
return request.param
@pytest.fixture(params=range(len(matrix_list)))
def sparse_and_dense_matrices(sparsity_type, request):
matrices = matrix_list[request.param]
sparse_mat = [sparsity_type(mat) for mat in matrices]
return matrices, sparse_mat, sparsity_type
@pytest.fixture
def sparse_and_dense_matrices_and_lse(sparse_and_dense_matrices):
matrices, sparse_mat, sparsity_type = sparse_and_dense_matrices
lse = sparse_reduce_lse(*sparse_mat)
return matrices, sparse_mat, sparsity_type, lse
def test_reduce_sparse_lse_type(sparse_and_dense_matrices_and_lse):
_, _, sparsity_type, lse = sparse_and_dense_matrices_and_lse
assert isinstance(lse, sparsity_type)
def test_reduce_sparse_lse_with_dense(sparse_and_dense_matrices_and_lse):
matrices, sparse_mat, sparsity_type, lse = sparse_and_dense_matrices_and_lse
dense_lse = special.logsumexp(np.stack(matrices, axis=2), axis=2)
dense_lse[matrices[0] == 0] = 0
assert np.all(dense_lse == lse)
########################################################################################
# Test log_factorial, approx_log_factorial and log_binomial_coefficient
@pytest.mark.parametrize("a", range(100))
def test_stirling_approx_against_log_factorial(a): | assert np.allclose(approx_log_factorial(a), log_factorial(a)) | 2 | 2023-12-06 22:01:38+00:00 | 4k |
sailfishos-chum/sailfishos-chum.github.io | chumweb/atom_feed.py | [
{
"identifier": "CONFIG",
"path": "chumweb/config.py",
"snippet": "CONFIG = init_config()"
},
{
"identifier": "Package",
"path": "chumweb/package.py",
"snippet": "class Package:\n \"\"\"\n Metadata of a RPM package with associated Chum metadata\n \"\"\"\n name: str\n summa... | from datetime import datetime
from typing import List, Optional, Iterable
from xml.dom.minidom import Document, Element
from chumweb import CONFIG
from chumweb.package import Package | 2,745 | """
This package contains methods for writing Atom feeds
"""
# Reuse the namespace that the primary.xml.gz file uses
REPO_NS = "http://linux.duke.edu/metadata/common"
def create_atom_feed(public_url: str, title: str, updated: datetime) -> Document:
"""
Creates a basic Atom feed, with no entries
https://validator.w3.org/feed/docs/atom.html
:return: The created feed as an XML Document
"""
doc = Document()
feed = doc.createElementNS("http://www.w3.org/2005/Atom", "feed")
feed.setAttribute("xmlns", "http://www.w3.org/2005/Atom")
feed.setAttribute("xmlns:repo", "http://linux.duke.edu/metadata/common")
doc.appendChild(feed)
el_id = _create_simple_element(doc, "id", public_url)
feed.appendChild(el_id)
el_title = _create_simple_element(doc, "title", title)
feed.appendChild(el_title)
el_updated = _create_simple_element(doc, "updated", updated.isoformat())
feed.appendChild(el_updated)
| """
This package contains methods for writing Atom feeds
"""
# Reuse the namespace that the primary.xml.gz file uses
REPO_NS = "http://linux.duke.edu/metadata/common"
def create_atom_feed(public_url: str, title: str, updated: datetime) -> Document:
"""
Creates a basic Atom feed, with no entries
https://validator.w3.org/feed/docs/atom.html
:return: The created feed as an XML Document
"""
doc = Document()
feed = doc.createElementNS("http://www.w3.org/2005/Atom", "feed")
feed.setAttribute("xmlns", "http://www.w3.org/2005/Atom")
feed.setAttribute("xmlns:repo", "http://linux.duke.edu/metadata/common")
doc.appendChild(feed)
el_id = _create_simple_element(doc, "id", public_url)
feed.appendChild(el_id)
el_title = _create_simple_element(doc, "title", title)
feed.appendChild(el_title)
el_updated = _create_simple_element(doc, "updated", updated.isoformat())
feed.appendChild(el_updated)
| el_icon = _create_simple_element(doc, "icon", CONFIG.public_url + "static/img/sailfishos-chum.png") | 0 | 2023-12-14 19:25:31+00:00 | 4k |
oVo-HxBots/URLUploadBot | Uploader/echo.py | [
{
"identifier": "Translation",
"path": "Uploader/script.py",
"snippet": "class Translation(object):\n\n START_TEXT = \"\"\"\nHi {} \n\nI am Powerful Url Uploader Bot\n \n\"\"\"\n\n HELP_TEXT = \"\"\"\n\n# Send me the Google Drive | ytdl | direct links.\n\n# Select the desired option.\n\n# Then be ... | import os
import time
import json
import asyncio
import logging
from opencc import OpenCC
from pyrogram.types import Thumbnail
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from Uploader.config import Config
from sample_config import Config
from Uploader.script import Translation
from Uploader.functions.ran_text import random_char
from Uploader.functions.display_progress import humanbytes
from Uploader.functions.display_progress import humanbytes | 3,432 | l = entity.length
url = url[o:o + l]
if Config.HTTP_PROXY != "":
command_to_exec = [
"yt-dlp",
"--no-warnings",
"--allow-dynamic-mpd",
"-j",
url,
"--proxy", Config.HTTP_PROXY
]
else:
command_to_exec = [
"yt-dlp",
"--no-warnings",
"--allow-dynamic-mpd",
"-j",
url
]
if youtube_dl_username is not None:
command_to_exec.append("--username")
command_to_exec.append(youtube_dl_username)
if youtube_dl_password is not None:
command_to_exec.append("--password")
command_to_exec.append(youtube_dl_password)
logger.info(command_to_exec)
chk = await bot.send_message(
chat_id=update.chat.id,
text='Proccesing your ⌛',
disable_web_page_preview=True,
reply_to_message_id=update.id
)
if update.from_user.id not in Config.AUTH_USERS:
if str(update.from_user.id) in Config.ADL_BOT_RQ:
current_time = time.time()
previous_time = Config.ADL_BOT_RQ[str(update.from_user.id)]
process_max_timeout = round(Config.PROCESS_MAX_TIMEOUT/60)
present_time = round(Config.PROCESS_MAX_TIMEOUT -
(current_time - previous_time))
Config.ADL_BOT_RQ[str(update.from_user.id)] = time.time()
if round(current_time - previous_time) < Config.PROCESS_MAX_TIMEOUT:
await bot.edit_message_text(chat_id=update.chat.id, text=Translation.FREE_USER_LIMIT_Q_SZE.format(process_max_timeout, present_time), disable_web_page_preview=True, message_id=chk.id)
return
else:
Config.ADL_BOT_RQ[str(update.from_user.id)] = time.time()
process = await asyncio.create_subprocess_exec(
*command_to_exec,
# stdout must a pipe to be accessible as process.stdout
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
# Wait for the subprocess to finish
stdout, stderr = await process.communicate()
e_response = stderr.decode().strip()
logger.info(e_response)
t_response = stdout.decode().strip()
# logger.info(t_response)
# https://github.com/rg3/youtube-dl/issues/2630#issuecomment-38635239
if e_response and "nonnumeric port" not in e_response:
# logger.warn("Status : FAIL", exc.returncode, exc.output)
error_message = e_response.replace(
"please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.", "")
if "This video is only available for registered users." in error_message:
error_message += Translation.SET_CUSTOM_USERNAME_PASSWORD
await chk.delete()
time.sleep(40.5)
await bot.send_message(
chat_id=update.chat.id,
text=Translation.NO_VOID_FORMAT_FOUND.format(str(error_message)),
reply_to_message_id=update.id,
disable_web_page_preview=True
)
return False
if t_response:
# logger.info(t_response)
x_reponse = t_response
if "\n" in x_reponse:
x_reponse, _ = x_reponse.split("\n")
response_json = json.loads(x_reponse)
randem = random_char(5)
save_ytdl_json_path = Config.DOWNLOAD_LOCATION + \
"/" + str(update.from_user.id) + f'{randem}' + ".json"
with open(save_ytdl_json_path, "w", encoding="utf8") as outfile:
json.dump(response_json, outfile, ensure_ascii=False)
# logger.info(response_json)
inline_keyboard = []
duration = None
if "duration" in response_json:
duration = response_json["duration"]
if "formats" in response_json:
for formats in response_json["formats"]:
format_id = formats.get("format_id")
format_string = formats.get("format_note")
if format_string is None:
format_string = formats.get("format")
if "DASH" in format_string.upper():
continue
format_ext = formats.get("ext")
if formats.get('filesize'):
size = formats['filesize']
elif formats.get('filesize_approx'):
size = formats['filesize_approx']
else:
size = 0
cb_string_video = "{}|{}|{}|{}".format(
"video", format_id, format_ext, randem)
cb_string_file = "{}|{}|{}|{}".format(
"file", format_id, format_ext, randem)
if format_string is not None and not "audio only" in format_string:
ikeyboard = [
InlineKeyboardButton(
"🎬 " + format_string + " " + format_ext +
| # MIT License
# Copyright (c) 2022 Hash Minner
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
if bool(os.environ.get("WEBHOOK")):
else:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
logging.getLogger("pyrogram").setLevel(logging.WARNING)
s2tw = OpenCC('s2tw.json').convert
@Client.on_message(filters.private & filters.regex(pattern=".*http.*"))
async def echo(bot, update):
logger.info(update.from_user)
url = update.text
youtube_dl_username = None
youtube_dl_password = None
file_name = None
if "youtu.be" in url:
return await update.reply_text(
"**Choose Download type**",
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
"Audio 🎵", callback_data="ytdl_audio"),
InlineKeyboardButton(
"Video 🎬", callback_data="ytdl_video")
]
]
),
quote=True
)
if "|" in url:
url_parts = url.split("|")
if len(url_parts) == 2:
url = url_parts[0]
file_name = url_parts[1]
elif len(url_parts) == 4:
url = url_parts[0]
file_name = url_parts[1]
youtube_dl_username = url_parts[2]
youtube_dl_password = url_parts[3]
else:
for entity in update.entities:
if entity.type == "text_link":
url = entity.url
elif entity.type == "url":
o = entity.offset
l = entity.length
url = url[o:o + l]
if url is not None:
url = url.strip()
if file_name is not None:
file_name = file_name.strip()
# https://stackoverflow.com/a/761825/4723940
if youtube_dl_username is not None:
youtube_dl_username = youtube_dl_username.strip()
if youtube_dl_password is not None:
youtube_dl_password = youtube_dl_password.strip()
logger.info(url)
logger.info(file_name)
else:
for entity in update.entities:
if entity.type == "text_link":
url = entity.url
elif entity.type == "url":
o = entity.offset
l = entity.length
url = url[o:o + l]
if Config.HTTP_PROXY != "":
command_to_exec = [
"yt-dlp",
"--no-warnings",
"--allow-dynamic-mpd",
"-j",
url,
"--proxy", Config.HTTP_PROXY
]
else:
command_to_exec = [
"yt-dlp",
"--no-warnings",
"--allow-dynamic-mpd",
"-j",
url
]
if youtube_dl_username is not None:
command_to_exec.append("--username")
command_to_exec.append(youtube_dl_username)
if youtube_dl_password is not None:
command_to_exec.append("--password")
command_to_exec.append(youtube_dl_password)
logger.info(command_to_exec)
chk = await bot.send_message(
chat_id=update.chat.id,
text='Proccesing your ⌛',
disable_web_page_preview=True,
reply_to_message_id=update.id
)
if update.from_user.id not in Config.AUTH_USERS:
if str(update.from_user.id) in Config.ADL_BOT_RQ:
current_time = time.time()
previous_time = Config.ADL_BOT_RQ[str(update.from_user.id)]
process_max_timeout = round(Config.PROCESS_MAX_TIMEOUT/60)
present_time = round(Config.PROCESS_MAX_TIMEOUT -
(current_time - previous_time))
Config.ADL_BOT_RQ[str(update.from_user.id)] = time.time()
if round(current_time - previous_time) < Config.PROCESS_MAX_TIMEOUT:
await bot.edit_message_text(chat_id=update.chat.id, text=Translation.FREE_USER_LIMIT_Q_SZE.format(process_max_timeout, present_time), disable_web_page_preview=True, message_id=chk.id)
return
else:
Config.ADL_BOT_RQ[str(update.from_user.id)] = time.time()
process = await asyncio.create_subprocess_exec(
*command_to_exec,
# stdout must a pipe to be accessible as process.stdout
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
# Wait for the subprocess to finish
stdout, stderr = await process.communicate()
e_response = stderr.decode().strip()
logger.info(e_response)
t_response = stdout.decode().strip()
# logger.info(t_response)
# https://github.com/rg3/youtube-dl/issues/2630#issuecomment-38635239
if e_response and "nonnumeric port" not in e_response:
# logger.warn("Status : FAIL", exc.returncode, exc.output)
error_message = e_response.replace(
"please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; see https://yt-dl.org/update on how to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.", "")
if "This video is only available for registered users." in error_message:
error_message += Translation.SET_CUSTOM_USERNAME_PASSWORD
await chk.delete()
time.sleep(40.5)
await bot.send_message(
chat_id=update.chat.id,
text=Translation.NO_VOID_FORMAT_FOUND.format(str(error_message)),
reply_to_message_id=update.id,
disable_web_page_preview=True
)
return False
if t_response:
# logger.info(t_response)
x_reponse = t_response
if "\n" in x_reponse:
x_reponse, _ = x_reponse.split("\n")
response_json = json.loads(x_reponse)
randem = random_char(5)
save_ytdl_json_path = Config.DOWNLOAD_LOCATION + \
"/" + str(update.from_user.id) + f'{randem}' + ".json"
with open(save_ytdl_json_path, "w", encoding="utf8") as outfile:
json.dump(response_json, outfile, ensure_ascii=False)
# logger.info(response_json)
inline_keyboard = []
duration = None
if "duration" in response_json:
duration = response_json["duration"]
if "formats" in response_json:
for formats in response_json["formats"]:
format_id = formats.get("format_id")
format_string = formats.get("format_note")
if format_string is None:
format_string = formats.get("format")
if "DASH" in format_string.upper():
continue
format_ext = formats.get("ext")
if formats.get('filesize'):
size = formats['filesize']
elif formats.get('filesize_approx'):
size = formats['filesize_approx']
else:
size = 0
cb_string_video = "{}|{}|{}|{}".format(
"video", format_id, format_ext, randem)
cb_string_file = "{}|{}|{}|{}".format(
"file", format_id, format_ext, randem)
if format_string is not None and not "audio only" in format_string:
ikeyboard = [
InlineKeyboardButton(
"🎬 " + format_string + " " + format_ext + | " " + humanbytes(size) + " ", | 3 | 2023-12-09 03:24:55+00:00 | 4k |
ZS-YANG/FemtoDet-v3 | mmdet/models/losses/iou_loss.py | [
{
"identifier": "MODELS",
"path": "mmdet/registry.py",
"snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])"
},
{
"identifier": "bbox_overlaps",
"path": "mmdet/structures/bbox/bbox_overlaps.py",
"snippet": "def bbox_overlaps(bboxes1, bboxes2, mode='i... | import math
import warnings
import torch
import torch.nn as nn
from typing import Optional
from torch import Tensor
from mmdet.registry import MODELS
from mmdet.structures.bbox import bbox_overlaps
from .utils import weighted_loss | 3,363 | # Copyright (c) OpenMMLab. All rights reserved.
@weighted_loss
def iou_loss(pred: Tensor,
target: Tensor,
linear: bool = False,
mode: str = 'log',
eps: float = 1e-6) -> Tensor:
"""IoU loss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
The loss is calculated as negative log of IoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
linear (bool, optional): If True, use linear scale of loss instead of
log scale. Default: False.
mode (str): Loss scaling mode, including "linear", "square", and "log".
Default: 'log'
eps (float): Epsilon to avoid log(0).
Return:
Tensor: Loss tensor.
"""
assert mode in ['linear', 'square', 'log']
if linear:
mode = 'linear'
warnings.warn('DeprecationWarning: Setting "linear=True" in '
'iou_loss is deprecated, please use "mode=`linear`" '
'instead.')
# avoid fp16 overflow
if pred.dtype == torch.float16:
fp16 = True
pred = pred.to(torch.float32)
else:
fp16 = False
| # Copyright (c) OpenMMLab. All rights reserved.
@weighted_loss
def iou_loss(pred: Tensor,
target: Tensor,
linear: bool = False,
mode: str = 'log',
eps: float = 1e-6) -> Tensor:
"""IoU loss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
The loss is calculated as negative log of IoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
linear (bool, optional): If True, use linear scale of loss instead of
log scale. Default: False.
mode (str): Loss scaling mode, including "linear", "square", and "log".
Default: 'log'
eps (float): Epsilon to avoid log(0).
Return:
Tensor: Loss tensor.
"""
assert mode in ['linear', 'square', 'log']
if linear:
mode = 'linear'
warnings.warn('DeprecationWarning: Setting "linear=True" in '
'iou_loss is deprecated, please use "mode=`linear`" '
'instead.')
# avoid fp16 overflow
if pred.dtype == torch.float16:
fp16 = True
pred = pred.to(torch.float32)
else:
fp16 = False
| ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps) | 1 | 2023-12-11 15:23:03+00:00 | 4k |
mit-ll-ai-technology/maite | src/maite/_internals/interop/base_model.py | [
{
"identifier": "is_list_of_type",
"path": "src/maite/_internals/protocols/type_guards.py",
"snippet": "def is_list_of_type(d: Any, guard: Type[T] = Any) -> TypeGuard[List[T]]:\n \"\"\"\n Check if object is a list of dictionaries.\n\n Parameters\n ----------\n d : Any\n The object ... | from collections import UserDict
from dataclasses import dataclass
from typing import Any, Callable, Iterable, Optional, Sequence, Tuple, Union
from torch import Tensor
from ..protocols.type_guards import is_list_of_type, is_typed_dict
from ..protocols.typing import ArrayLike, HasDataImage, SupportsArray
from .utils import is_pil_image
import torch as tr | 1,667 | # Copyright 2023, MASSACHUSETTS INSTITUTE OF TECHNOLOGY
# Subject to FAR 52.227-11 – Patent Rights – Ownership by the Contractor (May 2014).
# SPDX-License-Identifier: MIT
@dataclass
class InteropModelMetadata:
model_name: str = ""
provider: str = ""
task: str = ""
class BaseModel:
| # Copyright 2023, MASSACHUSETTS INSTITUTE OF TECHNOLOGY
# Subject to FAR 52.227-11 – Patent Rights – Ownership by the Contractor (May 2014).
# SPDX-License-Identifier: MIT
@dataclass
class InteropModelMetadata:
model_name: str = ""
provider: str = ""
task: str = ""
class BaseModel: | preprocessor: Optional[Callable[[Union[HasDataImage, SupportsArray]], HasDataImage]] | 2 | 2023-12-12 15:34:16+00:00 | 4k |
berglh/zen-focus | zf/ui/sidebar_pane.py | [
{
"identifier": "About",
"path": "zf/ui/about_pane.py",
"snippet": "class About():\n \"\"\"\n About class defines the About content pane\n\n It generates the structure in memory to apply to the navigation split view\n \"\"\"\n def __init__(self):\n self.scrolled_window = Gtk.Scroll... | from gi.repository import Adw, Gdk, Gio, GObject, Gtk
from .about_pane import About
from .power_pane import Power
from .processor_pane import Processor
from .settings_pane import Settings
from .temperature_pane import Temperature | 2,510 |
class ListItem():
"""
ListItem class defines the sidebar button widget
"""
def __init__(self, title: str, icon: str, pane: object) -> None:
self.title = title
self.icon = icon
self.pane = pane
class Sidebar(Adw.NavigationPage):
"""
Sidebar class defines the sidebar pane for Zen Focus
"""
def __init__(self):
super().__init__()
# Primary Settings for Sidebar
self.set_title("Zen Focus")
self.set_vexpand(True)
# Set menu bar min width
self.set_size_request(220, -1)
# Define sidebar header box
self.header_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6)
self.theme = Gtk.IconTheme.get_for_display(Gdk.Display.get_default())
self.theme.add_search_path(path='icons')
self.header_logo = Gtk.Image.new_from_icon_name("zen-focus-symbolic")
self.header_label = Gtk.Label(label="Zen Focus")
self.header_box.append(self.header_logo)
self.header_box.append(self.header_label)
# The sidebar show content button when collapsed
self.show_button = Gtk.ToggleButton(
icon_name="go-next-symbolic",
active=False, visible=False, margin_top=0, margin_bottom=0
)
# Bind to the parent Window split view show-content property
self.application = Gio.Application.get_default()
self.show_button.bind_property(
"active",
self.application.split_view,
"show-content",
GObject.BindingFlags.BIDIRECTIONAL
)
# Connect to the 'notify::folded' signal of the Adw.NavigationSplitView to show the button
self.application.split_view.connect("notify::collapsed", self.on_split_view_folded, self.show_button)
# Add the toolbar and header to the sidebar
self.toolbar = Adw.ToolbarView()
self.header = Adw.HeaderBar()
self.header.set_title_widget(self.header_box)
self.header.set_show_back_button(True)
self.header.set_can_focus(False)
self.header.set_decoration_layout('menu:close')
self.header.pack_end(self.show_button)
self.toolbar.set_content()
self.toolbar.add_top_bar(self.header)
self.set_child(self.toolbar)
self.list = Gtk.ListBox()
self.list.set_vexpand(False)
self.list.set_margin_top(12)
self.list.set_margin_start(6)
self.list.set_margin_end(6)
self.list.set_selection_mode(Gtk.SelectionMode.SINGLE)
# Connect the signal
self.list.connect("row-activated", self.on_row_activated)
# The sidebar list items to render as buttons
# These need to be defined in the sidebar class otherwise the
# the primary Adw.ApplicationWindow and settings is undefined
top_list_items = [
|
class ListItem():
"""
ListItem class defines the sidebar button widget
"""
def __init__(self, title: str, icon: str, pane: object) -> None:
self.title = title
self.icon = icon
self.pane = pane
class Sidebar(Adw.NavigationPage):
"""
Sidebar class defines the sidebar pane for Zen Focus
"""
def __init__(self):
super().__init__()
# Primary Settings for Sidebar
self.set_title("Zen Focus")
self.set_vexpand(True)
# Set menu bar min width
self.set_size_request(220, -1)
# Define sidebar header box
self.header_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=6)
self.theme = Gtk.IconTheme.get_for_display(Gdk.Display.get_default())
self.theme.add_search_path(path='icons')
self.header_logo = Gtk.Image.new_from_icon_name("zen-focus-symbolic")
self.header_label = Gtk.Label(label="Zen Focus")
self.header_box.append(self.header_logo)
self.header_box.append(self.header_label)
# The sidebar show content button when collapsed
self.show_button = Gtk.ToggleButton(
icon_name="go-next-symbolic",
active=False, visible=False, margin_top=0, margin_bottom=0
)
# Bind to the parent Window split view show-content property
self.application = Gio.Application.get_default()
self.show_button.bind_property(
"active",
self.application.split_view,
"show-content",
GObject.BindingFlags.BIDIRECTIONAL
)
# Connect to the 'notify::folded' signal of the Adw.NavigationSplitView to show the button
self.application.split_view.connect("notify::collapsed", self.on_split_view_folded, self.show_button)
# Add the toolbar and header to the sidebar
self.toolbar = Adw.ToolbarView()
self.header = Adw.HeaderBar()
self.header.set_title_widget(self.header_box)
self.header.set_show_back_button(True)
self.header.set_can_focus(False)
self.header.set_decoration_layout('menu:close')
self.header.pack_end(self.show_button)
self.toolbar.set_content()
self.toolbar.add_top_bar(self.header)
self.set_child(self.toolbar)
self.list = Gtk.ListBox()
self.list.set_vexpand(False)
self.list.set_margin_top(12)
self.list.set_margin_start(6)
self.list.set_margin_end(6)
self.list.set_selection_mode(Gtk.SelectionMode.SINGLE)
# Connect the signal
self.list.connect("row-activated", self.on_row_activated)
# The sidebar list items to render as buttons
# These need to be defined in the sidebar class otherwise the
# the primary Adw.ApplicationWindow and settings is undefined
top_list_items = [ | ListItem("Temperatures", 'temp-symbolic', Temperature()), | 4 | 2023-12-07 21:58:54+00:00 | 4k |
jupyter-server/pycrdt-websocket | tests/test_ystore.py | [
{
"identifier": "SQLiteYStore",
"path": "pycrdt_websocket/ystore.py",
"snippet": "class SQLiteYStore(BaseYStore):\n \"\"\"A YStore which uses an SQLite database.\n Unlike file-based YStores, the Y updates of all documents are stored in the same database.\n\n Subclass to point to your database f... | import os
import tempfile
import time
import aiosqlite
import pytest
from pathlib import Path
from unittest.mock import patch
from pycrdt_websocket.ystore import SQLiteYStore, TempFileYStore | 1,962 |
class MetadataCallback:
def __init__(self):
self.i = 0
async def __call__(self):
res = str(self.i).encode()
self.i += 1
return res
class MyTempFileYStore(TempFileYStore):
prefix_dir = "test_temp_"
MY_SQLITE_YSTORE_DB_PATH = str(Path(tempfile.mkdtemp(prefix="test_sql_")) / "ystore.db")
|
class MetadataCallback:
def __init__(self):
self.i = 0
async def __call__(self):
res = str(self.i).encode()
self.i += 1
return res
class MyTempFileYStore(TempFileYStore):
prefix_dir = "test_temp_"
MY_SQLITE_YSTORE_DB_PATH = str(Path(tempfile.mkdtemp(prefix="test_sql_")) / "ystore.db")
| class MySQLiteYStore(SQLiteYStore): | 0 | 2023-12-08 10:38:31+00:00 | 4k |
juniberry/PacketIRC | packetirc.py | [
{
"identifier": "LOG_FILE",
"path": "settings.py",
"snippet": "LOG_FILE = \"packetirc.log\""
},
{
"identifier": "LOG_LEVEL",
"path": "settings.py",
"snippet": "LOG_LEVEL = logging.INFO"
},
{
"identifier": "SERVER",
"path": "settings.py",
"snippet": "SERVER = \"\""
},
... | import socket
import threading
import random
import time
import logging
import re
import irc.client
import os
import sys
from settings import LOG_FILE, LOG_LEVEL, SERVER, PORT, PASS, CHANNEL, HIDE_SERVER, MAX_RETRIES, RETRY_DELAY, HELP_INFO, WELCOME_MESSAGE, BAD_WORDS_FILE, BAD_WORDS_FILTER | 2,606 | def on_namreply(self, connection, event):
"""
Triggered when joining a channel or requesting NAMES.
"""
channel = event.arguments[1]
names = event.arguments[2].split()
# Print the names directly
print(f"Users in {channel}: {', '.join(names)}")
def on_quit(self, connection, event):
"""
Triggered when a luser quits in a channel we are in.
"""
nickname = event.source.nick
reason = event.arguments[0] if event.arguments else ""
print(f"* {nickname} has quit ({reason})")
def on_privmsg(self, connection, event):
"""
Triggered when a user sends us a directed PRIVMSG.
"""
sender = event.source.nick
message = event.arguments[0]
print(f"** {sender}: {message}")
def on_pubmsg(self, connection, event):
"""
Triggered from a PRIVMSG sent to a channel we are in.
"""
# Handle public messages received in the channel
nickname = event.source.nick
message = event.arguments[0]
print(f"<{nickname}> {message}")
def on_action(self, connection, event):
"""
Triggered by emotive ACTIONs, be they on a channel or directed.
"""
nickname = event.source.nick
message = event.arguments[0]
channel = event.target
print(f"* {nickname} {message}")
def on_topicprotected(self, connection, event):
"""
Apparently this is supposed to trigger when we try to change the topic
but are not permitted to.
"""
print(f"** You don't have permission to change the topic.")
# TODO:
## User doesn't have perm to set topic.
## This seems to be broken?
def on_topic(self, connection, event):
"""
Triggered by the server to indicate that the topic has been changed.
"""
who = event.source.nick
#channel = event.target
new_topic = event.arguments[0]
print(f"* {who} changed the topic to: {new_topic}")
def on_currenttopic(self, connection, event):
"""
Triggered by the server to indicate the current topic of a channel, from our query request.
"""
channel = event.arguments[0]
topic = event.arguments[1]
print(f"** {channel}: {topic}")
def on_list(self, connection, event):
"""
Handles the event for LISTing channels. This method is called for each channel in the list.
This can be a firehose ...we might want to put a flood or limit on this eventually...soon..ish
"""
channel = event.arguments[0] if event.arguments else ''
user_count = event.arguments[1] if len(event.arguments) > 1 else ''
topic = event.arguments[2] if len(event.arguments) > 2 else ''
# Truncate topic to 60 characters if longer.
if len(topic) > 60:
topic = topic[:57] + '...'
print(f"{channel} [{user_count}] {topic}")
def handle_user_input(irc_client):
"""
Continuously handle user input and processes IRC style commands.
This handler is run within it's own thread aside from the PacketIRC client class.
"""
global is_running
# Threaded runloop. Toggle is_running to False to exit the user interface thread.
while is_running:
try:
# Fetch user input, strip whitespace and log it.
message = input().strip()
logging.info(f"{callsign} >>> {message}")
# Check to see if the user message is a command.
if message.startswith('/'):
# It likely is, try to process it.
#
# Split the message into command and command_args
parts = message.split(' ', 1)
command = parts[0].lower()
command_args = parts[1] if len(parts) > 1 else ""
# /QUIT - Disconnect and exit with optional quit message.
if command == '/quit':
# Set the running state flag off, to exit thread runloop.
is_running = False
# If the user specified a message, use it, otherwise plug in 73.
quit_message = command_args if command_args else "73"
# We checking for naughty words? If so clean them.
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
______ _ _____ ______ ______
(_____ \ | | _ (_____|_____ \ / _____)
_____) )___ ____| | _ ____| |_ _ _____) ) /
| ____/ _ |/ ___) | / ) _ ) _) | | (_____ (| |
| | ( ( | ( (___| |< ( (/ /| |__ _| |_ | | \_____
|_| \_||_|\____)_| \_)____)\___|_____) |_|\______)
PacketIRC is a bandwidth-conscious IRC client specifically designed for packet radio communication.
It includes a client-side implementation with simplified IRC functionalities.
File: client.py
Author: Daria Juniper @juniberry
Date: 10-Dec-2023
Changes:
12-Dec-2023 - Initial version 1.0 beta.
"""
# Import settings from an external configuration file.
# Globals
VERSION = 'v1.1b'
BAD_WORDS = []
HOME_PATH = os.path.dirname(os.path.abspath(__file__)) # Grab home path for use with logging et al.
# State
is_running = True
# Initialize logging.
logging.basicConfig(filename=os.path.join(HOME_PATH, LOG_FILE), filemode='w', level=LOG_LEVEL, format='%(asctime)s - %(levelname)s - %(message)s')
# PacketIRC Client Class
class PacketIRCClient(irc.client.SimpleIRCClient):
"""
PacketIRCClient class extends irc.client.SimpleIRCClient.
It includes methods for handling various IRC events and actions.
"""
def __init__(self, callsign):
"""
Initialize the IRC client with a callsign and current_channel property.
The callsign should be passed from the packet switch and the client is
designed to only operate on a single channel for sanity/bandwidth.
"""
super().__init__()
self.callsign = callsign
self.current_channel = None
def on_disconnect(self, connection, event):
global is_running
is_running = False
logging.info(f"{callsign} Disconnected from server.")
print("** Disconnected.")
def on_error(self, connection, event):
"""
Handle any errors encountered during the IRC session.
We will not inform the user since many of these errors can be unhelpful
or contain information you dont wish broadcast on the air. So we log it.
"""
logging.error(f"{callsign} on_error(): {event}")
def on_motdstart(self, connection, event):
"""
Triggered when the MOTD listing begins.
"""
print("** Message of the Day")
def on_motd(self, connection, event):
"""
Dump out lines of the MOTD
Apparently this is only fired once? But could be multiple times?
"""
for line in event.arguments:
print(line)
def on_notice(self, connection, event):
"""
Handle Notices
Notices can come from the server, users and sometimes seemingly out of the aether.
"""
source = event.source.nick if event.source else "SERVER"
text = event.arguments[0]
print(f"-{source}- {text}")
def on_welcome(self, connection, event):
"""
Triggered when initially connected to an IRC server.
We are going to use this to set up our initial channel if set in settings.
"""
server_name = connection.get_server_name()
print(f"** Connected to {server_name}")
# Request client send a keepalive message every 30 sec.
connection.set_keepalive(30)
# If CHANNEL is specified in settings.py then join it.
if CHANNEL:
connection.join(CHANNEL)
def on_whoisuser(self, connection, event):
"""
Triggered when the server returns query info for a WHOIS
"""
nick = event.arguments[0]
username = event.arguments[1]
hostname = event.arguments[2]
server = event.arguments[3]
real_name = event.arguments[4]
print(f"** WHOIS for {nick}")
print(f" {username}@{hostname}")
# Not all IRCd's will return the server, so this needs to be optional.
if not all(char in ' *' for char in server):
print(f" Server: {server}")
print(f" Name: {real_name}")
def on_nicknameinuse(self, connection, event):
"""
Nickname is in use!
Oh noes! Let's do something silly like randomly pick a number, tack that on
to the callsign and back away slowly............ >_>
"""
self.callsign += "_" + str(random.randint(0, 999))
connection.nick(self.callsign)
def on_join(self, connection, event):
"""
Triggered when the user joins a channel (including the user).
If this is us joining a channel, action it as such.
If this is a new user joining our channel, action it as....
"""
nickname = event.source.nick
channel = event.target
# If the target of the notice is us, we're the ones joining.
if nickname == self.connection.get_nickname():
# Reset current channel if we're joining a new one
self.current_channel = channel
print(f"** Joined {channel}")
# Request the topic for the new channel
connection.topic(channel)
else:
# Nope, just another luser joining the idle sesh.
print(f"* {nickname} has joined {channel}")
def on_part(self, connection, event):
"""
Triggered when a luser leaves a channel.
"""
nickname = event.source.nick
channel = event.target
reason = event.arguments[0] if event.arguments else ""
print(f"* {nickname} has left {channel} ({reason})")
def on_namreply(self, connection, event):
"""
Triggered when joining a channel or requesting NAMES.
"""
channel = event.arguments[1]
names = event.arguments[2].split()
# Print the names directly
print(f"Users in {channel}: {', '.join(names)}")
def on_quit(self, connection, event):
"""
Triggered when a luser quits in a channel we are in.
"""
nickname = event.source.nick
reason = event.arguments[0] if event.arguments else ""
print(f"* {nickname} has quit ({reason})")
def on_privmsg(self, connection, event):
"""
Triggered when a user sends us a directed PRIVMSG.
"""
sender = event.source.nick
message = event.arguments[0]
print(f"** {sender}: {message}")
def on_pubmsg(self, connection, event):
"""
Triggered from a PRIVMSG sent to a channel we are in.
"""
# Handle public messages received in the channel
nickname = event.source.nick
message = event.arguments[0]
print(f"<{nickname}> {message}")
def on_action(self, connection, event):
"""
Triggered by emotive ACTIONs, be they on a channel or directed.
"""
nickname = event.source.nick
message = event.arguments[0]
channel = event.target
print(f"* {nickname} {message}")
def on_topicprotected(self, connection, event):
"""
Apparently this is supposed to trigger when we try to change the topic
but are not permitted to.
"""
print(f"** You don't have permission to change the topic.")
# TODO:
## User doesn't have perm to set topic.
## This seems to be broken?
def on_topic(self, connection, event):
"""
Triggered by the server to indicate that the topic has been changed.
"""
who = event.source.nick
#channel = event.target
new_topic = event.arguments[0]
print(f"* {who} changed the topic to: {new_topic}")
def on_currenttopic(self, connection, event):
"""
Triggered by the server to indicate the current topic of a channel, from our query request.
"""
channel = event.arguments[0]
topic = event.arguments[1]
print(f"** {channel}: {topic}")
def on_list(self, connection, event):
"""
Handles the event for LISTing channels. This method is called for each channel in the list.
This can be a firehose ...we might want to put a flood or limit on this eventually...soon..ish
"""
channel = event.arguments[0] if event.arguments else ''
user_count = event.arguments[1] if len(event.arguments) > 1 else ''
topic = event.arguments[2] if len(event.arguments) > 2 else ''
# Truncate topic to 60 characters if longer.
if len(topic) > 60:
topic = topic[:57] + '...'
print(f"{channel} [{user_count}] {topic}")
def handle_user_input(irc_client):
"""
Continuously handle user input and processes IRC style commands.
This handler is run within it's own thread aside from the PacketIRC client class.
"""
global is_running
# Threaded runloop. Toggle is_running to False to exit the user interface thread.
while is_running:
try:
# Fetch user input, strip whitespace and log it.
message = input().strip()
logging.info(f"{callsign} >>> {message}")
# Check to see if the user message is a command.
if message.startswith('/'):
# It likely is, try to process it.
#
# Split the message into command and command_args
parts = message.split(' ', 1)
command = parts[0].lower()
command_args = parts[1] if len(parts) > 1 else ""
# /QUIT - Disconnect and exit with optional quit message.
if command == '/quit':
# Set the running state flag off, to exit thread runloop.
is_running = False
# If the user specified a message, use it, otherwise plug in 73.
quit_message = command_args if command_args else "73"
# We checking for naughty words? If so clean them. | if BAD_WORDS_FILTER: | 12 | 2023-12-13 19:08:48+00:00 | 4k |
Tps-F/rvc-onnx-test | onnxlib/modules.py | [
{
"identifier": "commons",
"path": "onnxlib/commons.py",
"snippet": "def init_weights(m, mean=0.0, std=0.01):\ndef get_padding(kernel_size, dilation=1):\ndef kl_divergence(m_p, logs_p, m_q, logs_q):\ndef rand_gumbel(shape):\ndef rand_gumbel_like(x):\ndef slice_segments(x, ids_str, segment_size=4):\ndef ... | import math
import torch
from typing import Optional, Tuple
from torch import nn
from torch.nn import Conv1d
from torch.nn import functional as F
from torch.nn.utils import remove_weight_norm, weight_norm
from onnxlib import commons
from onnxlib.commons import get_padding, init_weights
from onnxlib.transforms import piecewise_rational_quadratic_transform | 2,163 | class DDSConv(nn.Module):
"""
Dialted and Depth-Separable Convolution
"""
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
super(DDSConv, self).__init__()
self.channels = channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = float(p_dropout)
self.drop = nn.Dropout(float(p_dropout))
self.convs_sep = nn.ModuleList()
self.convs_1x1 = nn.ModuleList()
self.norms_1 = nn.ModuleList()
self.norms_2 = nn.ModuleList()
for i in range(n_layers):
dilation = kernel_size**i
padding = (kernel_size * dilation - dilation) // 2
self.convs_sep.append(
nn.Conv1d(
channels,
channels,
kernel_size,
groups=channels,
dilation=dilation,
padding=padding,
)
)
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
self.norms_1.append(LayerNorm(channels))
self.norms_2.append(LayerNorm(channels))
def forward(self, x, x_mask, g: Optional[torch.Tensor] = None):
if g is not None:
x = x + g
for i in range(self.n_layers):
y = self.convs_sep[i](x * x_mask)
y = self.norms_1[i](y)
y = F.gelu(y)
y = self.convs_1x1[i](y)
y = self.norms_2[i](y)
y = F.gelu(y)
y = self.drop(y)
x = x + y
return x * x_mask
class WN(torch.nn.Module):
def __init__(
self,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0,
p_dropout=0,
):
super(WN, self).__init__()
assert kernel_size % 2 == 1
self.hidden_channels = hidden_channels
self.kernel_size = (kernel_size,)
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = float(p_dropout)
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(float(p_dropout))
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(
gin_channels, 2 * hidden_channels * n_layers, 1
)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
for i in range(n_layers):
dilation = dilation_rate**i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(
hidden_channels,
2 * hidden_channels,
kernel_size,
dilation=dilation,
padding=padding,
)
in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
self.res_skip_layers.append(res_skip_layer)
def forward(
self, x: torch.Tensor, x_mask: torch.Tensor, g: Optional[torch.Tensor] = None
):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i, (in_layer, res_skip_layer) in enumerate(
zip(self.in_layers, self.res_skip_layers)
):
x_in = in_layer(x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
else:
g_l = torch.zeros_like(x_in)
|
LRELU_SLOPE = 0.1
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super(LayerNorm, self).__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
class ConvReluNorm(nn.Module):
def __init__(
self,
in_channels,
hidden_channels,
out_channels,
kernel_size,
n_layers,
p_dropout,
):
super(ConvReluNorm, self).__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = float(p_dropout)
assert n_layers > 1, "Number of layers should be larger than 0."
self.conv_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
self.conv_layers.append(
nn.Conv1d(
in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
)
)
self.norm_layers.append(LayerNorm(hidden_channels))
self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(float(p_dropout)))
for _ in range(n_layers - 1):
self.conv_layers.append(
nn.Conv1d(
hidden_channels,
hidden_channels,
kernel_size,
padding=kernel_size // 2,
)
)
self.norm_layers.append(LayerNorm(hidden_channels))
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask):
x_org = x
for i in range(self.n_layers):
x = self.conv_layers[i](x * x_mask)
x = self.norm_layers[i](x)
x = self.relu_drop(x)
x = x_org + self.proj(x)
return x * x_mask
class DDSConv(nn.Module):
"""
Dialted and Depth-Separable Convolution
"""
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
super(DDSConv, self).__init__()
self.channels = channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = float(p_dropout)
self.drop = nn.Dropout(float(p_dropout))
self.convs_sep = nn.ModuleList()
self.convs_1x1 = nn.ModuleList()
self.norms_1 = nn.ModuleList()
self.norms_2 = nn.ModuleList()
for i in range(n_layers):
dilation = kernel_size**i
padding = (kernel_size * dilation - dilation) // 2
self.convs_sep.append(
nn.Conv1d(
channels,
channels,
kernel_size,
groups=channels,
dilation=dilation,
padding=padding,
)
)
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
self.norms_1.append(LayerNorm(channels))
self.norms_2.append(LayerNorm(channels))
def forward(self, x, x_mask, g: Optional[torch.Tensor] = None):
if g is not None:
x = x + g
for i in range(self.n_layers):
y = self.convs_sep[i](x * x_mask)
y = self.norms_1[i](y)
y = F.gelu(y)
y = self.convs_1x1[i](y)
y = self.norms_2[i](y)
y = F.gelu(y)
y = self.drop(y)
x = x + y
return x * x_mask
class WN(torch.nn.Module):
def __init__(
self,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0,
p_dropout=0,
):
super(WN, self).__init__()
assert kernel_size % 2 == 1
self.hidden_channels = hidden_channels
self.kernel_size = (kernel_size,)
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = float(p_dropout)
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(float(p_dropout))
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(
gin_channels, 2 * hidden_channels * n_layers, 1
)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
for i in range(n_layers):
dilation = dilation_rate**i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(
hidden_channels,
2 * hidden_channels,
kernel_size,
dilation=dilation,
padding=padding,
)
in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
self.res_skip_layers.append(res_skip_layer)
def forward(
self, x: torch.Tensor, x_mask: torch.Tensor, g: Optional[torch.Tensor] = None
):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i, (in_layer, res_skip_layer) in enumerate(
zip(self.in_layers, self.res_skip_layers)
):
x_in = in_layer(x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
else:
g_l = torch.zeros_like(x_in)
| acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) | 0 | 2023-12-09 04:08:04+00:00 | 4k |
zengydd/ProphDR | Models/Proph_DR.py | [
{
"identifier": "load_config",
"path": "utils/optimizer.py",
"snippet": "def load_config(path):\n with open(path, 'r') as f:\n return EasyDict(yaml.safe_load(f))"
},
{
"identifier": "get_optimizer",
"path": "utils/optimizer.py",
"snippet": "def get_optimizer(cfg, model):\n i... | import os, sys
import pandas as pd
import numpy as np
import random
import copy
import time
import datetime
import math
import pickle
import optuna
import yaml
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils import data
from torch.nn.parallel import DataParallel
from torch.autograd import Variable
from torch.optim.lr_scheduler import ReduceLROnPlateau, MultiStepLR
from sklearn.model_selection import train_test_split, KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import SequentialSampler
from prettytable import PrettyTable
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn.metrics import roc_auc_score, average_precision_score, accuracy_score, roc_curve, f1_score, precision_recall_curve
from lifelines.utils import concordance_index
from scipy.stats import pearsonr,spearmanr
from utils.optimizer import load_config, get_optimizer, get_scheduler
from collections import defaultdict
from utils.load import nested_dict_factory, load_pickle, save_pickle, EarlyStopping, FocalLoss
from utils.mydata import mydata, dataset_split
from utils.Drug_encode import encoder_D_pred, kbert
from Models.RCCA_ca import CCNet
from Models.cross_attention_dual import cross_EncoderBlock_G, cross_EncoderBlock_D
from Models.k_bert.atom_embedding_generator import bert_atom_embedding | 3,147 | os.environ['NUMEXPR_MAX_THREADS'] = '32'
sys.path.append("..")
torch.set_default_dtype(torch.float32)
config = './utils/train_res.yml'
config = load_config(config)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
float2str = lambda x: '%0.4f' % x
root = os.getcwd()
drug_smiles_df = pd.read_csv(os.path.join(root,'data_collect/drug_smiles_atom_pad.csv'), index_col='drug_id')
def unreg_atomf_list2tensor_pred(f, max_len=96):
# 进行padding操作,是对一个batchlist里的数据
valid_l = len(f)
f_pad = np.pad(f,((0, max_len-f.shape[0]),(0,0)), constant_values=0)
return f_pad, max_len, valid_l
def encoder_D_pred(smiles):
h_global, h_atom = bert_atom_embedding(smiles)
# print('h_global', h_global)
f_pad, max_len, valid_lens = unreg_atomf_list2tensor_pred(h_atom)
valid_lenD_list = [valid_lens]
valid_lens = torch.tensor(valid_lenD_list)
encode_D_pred = np.vstack((h_global, f_pad))
encode_D_pred_list = [encode_D_pred]
encode_D_pred = torch.stack([torch.tensor(arr) for arr in list(encode_D_pred_list)])
return encode_D_pred, valid_lens
def encoder_D(drug_id):
drug_id_list = list(drug_id.cpu())
valid_lenD_list = drug_smiles_df.loc[drug_id_list]['valid_lens'].to_list()
valid_lenD_list = [i+1 for i in valid_lenD_list]
valid_lens = torch.tensor(valid_lenD_list)
| os.environ['NUMEXPR_MAX_THREADS'] = '32'
sys.path.append("..")
torch.set_default_dtype(torch.float32)
config = './utils/train_res.yml'
config = load_config(config)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
float2str = lambda x: '%0.4f' % x
root = os.getcwd()
drug_smiles_df = pd.read_csv(os.path.join(root,'data_collect/drug_smiles_atom_pad.csv'), index_col='drug_id')
def unreg_atomf_list2tensor_pred(f, max_len=96):
# 进行padding操作,是对一个batchlist里的数据
valid_l = len(f)
f_pad = np.pad(f,((0, max_len-f.shape[0]),(0,0)), constant_values=0)
return f_pad, max_len, valid_l
def encoder_D_pred(smiles):
h_global, h_atom = bert_atom_embedding(smiles)
# print('h_global', h_global)
f_pad, max_len, valid_lens = unreg_atomf_list2tensor_pred(h_atom)
valid_lenD_list = [valid_lens]
valid_lens = torch.tensor(valid_lenD_list)
encode_D_pred = np.vstack((h_global, f_pad))
encode_D_pred_list = [encode_D_pred]
encode_D_pred = torch.stack([torch.tensor(arr) for arr in list(encode_D_pred_list)])
return encode_D_pred, valid_lens
def encoder_D(drug_id):
drug_id_list = list(drug_id.cpu())
valid_lenD_list = drug_smiles_df.loc[drug_id_list]['valid_lens'].to_list()
valid_lenD_list = [i+1 for i in valid_lenD_list]
valid_lens = torch.tensor(valid_lenD_list) | encode_D_list = kbert(drug_id_list) | 11 | 2023-12-13 11:56:08+00:00 | 4k |
merlresearch/PixPNet | pixpnet/symbolic/models.py | [
{
"identifier": "_make_divisible",
"path": "pixpnet/symbolic/misc.py",
"snippet": "def _make_divisible(v: float, divisor: int, min_value: Optional[int] = None) -> int:\n \"\"\"\n This function is taken from the original tf repo.\n It ensures that all layers have a channel number that is divisib... | import copy
import math
import os
import os.path as osp
import pickle
import sys
import numpy as np
import pixpnet
import pixpnet.symbolic.index_layers as nn
import torch
from collections import OrderedDict
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Type, Union, cast
from filelock import FileLock
from pixpnet.symbolic.misc import _make_divisible, _overwrite_named_param, sym_scope
from pixpnet.utils import get_logger | 2,123 | else:
os.makedirs(save_dir, exist_ok=True)
write_data = {
"out": out,
"intermediates": [(k, v) for k, v in intermediates.items()],
}
with open(save_path, "wb") as fp:
pickle.dump(write_data, fp)
fp.flush()
def load_cache(model_name, height, width, num_classes=1, insert_at=None):
save_dir = _get_cache_dir(model_name, height, width, num_classes, insert_at)
save_path = osp.join(save_dir, "rf_data.pkl")
with open(save_path, "rb") as fp:
sys.modules["ngn"] = pixpnet # legacy naming
data = pickle.load(fp)
logger.info(f'Reusing cached data "{save_path}"')
out = data["out"]
intermediates = OrderedDict(((k, v) for k, v in data["intermediates"]))
return out, intermediates
def compute_rf_data(model_name, height, width, num_classes=1, insert_at=None):
name_is_name = isinstance(model_name, str)
lock_path = _get_cache_lockfile(model_name, height, width, num_classes, insert_at)
with FileLock(lock_path):
if name_is_name and check_cache(model_name, height, width, num_classes, insert_at):
try:
out, intermediates = load_cache(model_name, height, width, num_classes, insert_at)
except pickle.UnpicklingError:
logger.warning("UnpicklingError when loading rf data! " "Recomputing...")
else:
return out, intermediates
# It is not in the cache at this point.
if name_is_name:
try:
sym_model_cls = globals()[model_name]
except KeyError:
raise ValueError(f'Invalid name "{model_name}". Valid: ' f"{[*globals().keys()]}")
else:
sym_model_cls = model_name
img_shape = (height, width)
with unique_syms() as ctx:
x = ctx.Tensor(shape=(1, 1, *img_shape), name="x")
sym_model = sym_model_cls(num_classes=num_classes)
if insert_at:
_, rf_data_from_x = compute_rf_data(model_name, height, width, num_classes)
shape_at_insert_layer = rf_data_from_x[insert_at].shape
with unique_syms() as ctx:
intermediate_x = ctx.Tensor(shape=shape_at_insert_layer, name="intermediate_x")
out, intermediates = sym_model(x, intermediate_x=intermediate_x, insert_at=insert_at)
else:
out, intermediates = sym_model(x)
if name_is_name:
write_cache(out, intermediates, model_name, height, width, num_classes, insert_at)
return out, intermediates
class StochasticDepth(nn.Module):
"""
See :func:`stochastic_depth`.
"""
def __init__(self, p: float, mode: str) -> None:
super().__init__()
self.p = p
self.mode = mode
def forward(self, input: Tensor) -> Tensor:
return input
def __repr__(self) -> str:
s = f"{self.__class__.__name__}(p={self.p}, mode={self.mode})"
return s
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
| # Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL)
# Copyright (c) PyTorch Contributors 2022
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# SPDX-License-Identifier: BSD-3-Clause
# Code largely based on PyTorch https://github.com/pytorch/pytorch
logger = get_logger(__name__)
major, minor = sys.version_info[:2]
if major > 3 or (major == 3 and minor >= 9):
OrderedDict_T = OrderedDict
else:
OrderedDict_T = Dict
unique_syms = nn.unique_syms
Tensor = nn.Tensor
ROOT_DIR = osp.dirname(osp.dirname(osp.realpath(pixpnet.__file__)))
CACHE_DIR = osp.join(ROOT_DIR, "rf_cache")
def _get_cache_dir(model_name, height, width, num_classes, insert_at):
insert_at_args = (f"insert_at_{insert_at}",) if insert_at else ()
return osp.join(CACHE_DIR, model_name, f"{height}x{width}", f"{num_classes}_classes", *insert_at_args)
def _get_cache_lockfile(model_name, height, width, num_classes, insert_at):
os.makedirs(CACHE_DIR, exist_ok=True)
insert_at_str = f"__insert_at_{insert_at}" if insert_at else ""
return osp.join(CACHE_DIR, f".{model_name}__{height}x{width}__{num_classes}_classes{insert_at_str}" f".lock")
def check_cache(model_name, height, width, num_classes=1, insert_at=None):
save_dir = _get_cache_dir(model_name, height, width, num_classes, insert_at)
save_path = osp.join(save_dir, "rf_data.pkl")
if os.path.isfile(save_path):
return save_path
def _serialize_ndarray(arr: np.ndarray):
return {
"shape": arr.shape,
"data": [v.serialize() for v in arr.flat],
}
def _deserialize_ndarray(data):
return np.asarray([nn.HypercubeCollection.deserialize(arr_indices) for arr_indices in data["data"]]).reshape(
data["shape"]
)
def write_cache(out, intermediates, model_name, height, width, num_classes, insert_at):
save_dir = _get_cache_dir(model_name, height, width, num_classes, insert_at)
save_path = osp.join(save_dir, "rf_data.pkl")
if os.path.isfile(save_path):
logger.warning(f'Will overwrite "{save_path}" which already exists')
else:
os.makedirs(save_dir, exist_ok=True)
write_data = {
"out": out,
"intermediates": [(k, v) for k, v in intermediates.items()],
}
with open(save_path, "wb") as fp:
pickle.dump(write_data, fp)
fp.flush()
def load_cache(model_name, height, width, num_classes=1, insert_at=None):
save_dir = _get_cache_dir(model_name, height, width, num_classes, insert_at)
save_path = osp.join(save_dir, "rf_data.pkl")
with open(save_path, "rb") as fp:
sys.modules["ngn"] = pixpnet # legacy naming
data = pickle.load(fp)
logger.info(f'Reusing cached data "{save_path}"')
out = data["out"]
intermediates = OrderedDict(((k, v) for k, v in data["intermediates"]))
return out, intermediates
def compute_rf_data(model_name, height, width, num_classes=1, insert_at=None):
name_is_name = isinstance(model_name, str)
lock_path = _get_cache_lockfile(model_name, height, width, num_classes, insert_at)
with FileLock(lock_path):
if name_is_name and check_cache(model_name, height, width, num_classes, insert_at):
try:
out, intermediates = load_cache(model_name, height, width, num_classes, insert_at)
except pickle.UnpicklingError:
logger.warning("UnpicklingError when loading rf data! " "Recomputing...")
else:
return out, intermediates
# It is not in the cache at this point.
if name_is_name:
try:
sym_model_cls = globals()[model_name]
except KeyError:
raise ValueError(f'Invalid name "{model_name}". Valid: ' f"{[*globals().keys()]}")
else:
sym_model_cls = model_name
img_shape = (height, width)
with unique_syms() as ctx:
x = ctx.Tensor(shape=(1, 1, *img_shape), name="x")
sym_model = sym_model_cls(num_classes=num_classes)
if insert_at:
_, rf_data_from_x = compute_rf_data(model_name, height, width, num_classes)
shape_at_insert_layer = rf_data_from_x[insert_at].shape
with unique_syms() as ctx:
intermediate_x = ctx.Tensor(shape=shape_at_insert_layer, name="intermediate_x")
out, intermediates = sym_model(x, intermediate_x=intermediate_x, insert_at=insert_at)
else:
out, intermediates = sym_model(x)
if name_is_name:
write_cache(out, intermediates, model_name, height, width, num_classes, insert_at)
return out, intermediates
class StochasticDepth(nn.Module):
"""
See :func:`stochastic_depth`.
"""
def __init__(self, p: float, mode: str) -> None:
super().__init__()
self.p = p
self.mode = mode
def forward(self, input: Tensor) -> Tensor:
return input
def __repr__(self) -> str:
s = f"{self.__class__.__name__}(p={self.p}, mode={self.mode})"
return s
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock") | with sym_scope("conv1"): | 2 | 2023-12-06 23:49:31+00:00 | 4k |
dhh1995/MeGraph | megraph/datasets/utils/graph_generators.py | [
{
"identifier": "barabasi_albert",
"path": "megraph/datasets/utils/graph_generation.py",
"snippet": "def barabasi_albert(N, degree=None, seed=None):\n \"\"\"Creates a random graph according to the Barabási-Albert preferential attachment model\n of size N and where nodes are atteched with degree ed... | from collections import deque
from enum import Enum
from functools import partial
from typing import List, Tuple
from megraph.datasets.utils.graph_generation import (barabasi_albert,
caterpillar, caveman,
erdos_renyi,
generate_graph_geo,
generate_graph_sbm, grid,
ladder, line, lobster,
star, tree)
from megraph.rng_utils import (sample_between_min_max, sample_from_mixture,
sample_partition)
from IPython import embed
import networkx as nx
import numpy as np
import numpy.random as random | 3,047 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : graph_generators.py
# Author : Honghua Dong, Yang Yu
# Email : dhh19951@gmail.com, 773964676@qq.com
#
# Distributed under terms of the MIT license.
__all__ = [
"generate_graph_pseudotree",
"generate_graph_cycle",
"get_random_graph_builder",
"generate_pseudotree",
]
def sample_random_edge(g: nx.Graph):
n = g.number_of_nodes()
while True:
u, v = random.randint(n), random.randint(n)
if (not g.has_edge(u, v)) and (u != v):
return u, v
def generate_graph_pseudotree(
num_nodes: int,
cycle_ratio_min_max: List[float] = [0.3, 0.6],
partition_method: str = "sep",
) -> Tuple[nx.DiGraph, int]:
"""[v2] Generate a random tree with sampled cycle length"""
cycle_ratio = sample_between_min_max(cycle_ratio_min_max)
cycle_len = max(min(3, num_nodes), int(num_nodes * cycle_ratio))
g = nx.cycle_graph(cycle_len)
expander_sizes = sample_partition(
num_nodes - cycle_len, cycle_len, method=partition_method
)
cur_idx = cycle_len
for i in range(cycle_len):
tree_size = expander_sizes[i] + 1 # the root
if tree_size > 1:
tree = nx.random_tree(tree_size)
# Merge tree to g while the root of the tree is node i on g
re_index = lambda x: i if x == 0 else cur_idx + x - 1
for u, v in tree.edges():
g.add_edge(re_index(u), re_index(v))
cur_idx += tree_size - 1
return g, cycle_len
def generate_graph_cycle(n: int) -> nx.DiGraph:
return nx.cycle_graph(n)
def generate_graph_blooming(n: int, degree=None, edge_factor=0.2) -> nx.DiGraph:
"""A fractal tree plus some random edges"""
degree = degree or 2
g = nx.empty_graph(n)
edges = []
cur = 1
q = deque([0])
while cur < n:
x = q.popleft()
for _ in range(degree):
if cur < n:
edges.append((x, cur))
q.append(cur)
cur += 1
g.add_edges_from(edges)
# random new edges
for _ in range(int(n * edge_factor)):
u, v = sample_random_edge(g)
g.add_edge(u, v)
return g
# Graph generators and default graph scales
GRAPH_GENERATORS_PAIRS = [
("er", erdos_renyi),
("ba", barabasi_albert),
("grid", grid),
("caveman", caveman),
("tree", tree),
| #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : graph_generators.py
# Author : Honghua Dong, Yang Yu
# Email : dhh19951@gmail.com, 773964676@qq.com
#
# Distributed under terms of the MIT license.
__all__ = [
"generate_graph_pseudotree",
"generate_graph_cycle",
"get_random_graph_builder",
"generate_pseudotree",
]
def sample_random_edge(g: nx.Graph):
n = g.number_of_nodes()
while True:
u, v = random.randint(n), random.randint(n)
if (not g.has_edge(u, v)) and (u != v):
return u, v
def generate_graph_pseudotree(
num_nodes: int,
cycle_ratio_min_max: List[float] = [0.3, 0.6],
partition_method: str = "sep",
) -> Tuple[nx.DiGraph, int]:
"""[v2] Generate a random tree with sampled cycle length"""
cycle_ratio = sample_between_min_max(cycle_ratio_min_max)
cycle_len = max(min(3, num_nodes), int(num_nodes * cycle_ratio))
g = nx.cycle_graph(cycle_len)
expander_sizes = sample_partition(
num_nodes - cycle_len, cycle_len, method=partition_method
)
cur_idx = cycle_len
for i in range(cycle_len):
tree_size = expander_sizes[i] + 1 # the root
if tree_size > 1:
tree = nx.random_tree(tree_size)
# Merge tree to g while the root of the tree is node i on g
re_index = lambda x: i if x == 0 else cur_idx + x - 1
for u, v in tree.edges():
g.add_edge(re_index(u), re_index(v))
cur_idx += tree_size - 1
return g, cycle_len
def generate_graph_cycle(n: int) -> nx.DiGraph:
return nx.cycle_graph(n)
def generate_graph_blooming(n: int, degree=None, edge_factor=0.2) -> nx.DiGraph:
"""A fractal tree plus some random edges"""
degree = degree or 2
g = nx.empty_graph(n)
edges = []
cur = 1
q = deque([0])
while cur < n:
x = q.popleft()
for _ in range(degree):
if cur < n:
edges.append((x, cur))
q.append(cur)
cur += 1
g.add_edges_from(edges)
# random new edges
for _ in range(int(n * edge_factor)):
u, v = sample_random_edge(g)
g.add_edge(u, v)
return g
# Graph generators and default graph scales
GRAPH_GENERATORS_PAIRS = [
("er", erdos_renyi),
("ba", barabasi_albert),
("grid", grid),
("caveman", caveman),
("tree", tree), | ("ladder", ladder), | 7 | 2023-12-12 04:17:13+00:00 | 4k |
gpavanb1/NODEFit | examples/fit_data.py | [
{
"identifier": "DEVICE",
"path": "nodefit/constants.py",
"snippet": "DEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")"
},
{
"identifier": "NeuralODE",
"path": "nodefit/neural_ode.py",
"snippet": "class NeuralODE:\n def __init__(self, neural_net: nn.Module, ... | import numpy as np
import torch.nn as nn
from nodefit.constants import DEVICE
from nodefit.neural_ode import NeuralODE
from nodefit.neural_sde import NeuralSDE | 3,081 |
###
# DEFINE NETWORKS
###
# Neural ODE parameters
ndim, drift_nhidden, diffusion_nhidden = 2, 10, 2
drift_nn = nn.Sequential(
nn.Linear(ndim+1, drift_nhidden),
nn.Sigmoid(),
nn.Linear(drift_nhidden, ndim)
).double().to(DEVICE)
diffusion_nn = nn.Sequential(
nn.Linear(ndim+1, diffusion_nhidden),
nn.Sigmoid(),
nn.Linear(diffusion_nhidden, ndim)
).double().to(DEVICE)
###
# PROVIDE DATA
###
t = np.linspace(0, 5, 10)
# Provide data as list of lists with starting condition
data = np.array([[1., 1.],
[1.52210594, 1.23757532],
[2.0570346, 1.37814989],
[2.47603815, 1.46040018],
[2.75026795, 1.50703724],
[2.91602961, 1.5343292],
[3.01170625, 1.5498438],
[3.06584853, 1.5585547],
[3.09827458, 1.56379774],
[3.11650095, 1.56674226]])
###
# FIT USING NEURALODE
###
print('Performing fit using Neural ODE...')
|
###
# DEFINE NETWORKS
###
# Neural ODE parameters
ndim, drift_nhidden, diffusion_nhidden = 2, 10, 2
drift_nn = nn.Sequential(
nn.Linear(ndim+1, drift_nhidden),
nn.Sigmoid(),
nn.Linear(drift_nhidden, ndim)
).double().to(DEVICE)
diffusion_nn = nn.Sequential(
nn.Linear(ndim+1, diffusion_nhidden),
nn.Sigmoid(),
nn.Linear(diffusion_nhidden, ndim)
).double().to(DEVICE)
###
# PROVIDE DATA
###
t = np.linspace(0, 5, 10)
# Provide data as list of lists with starting condition
data = np.array([[1., 1.],
[1.52210594, 1.23757532],
[2.0570346, 1.37814989],
[2.47603815, 1.46040018],
[2.75026795, 1.50703724],
[2.91602961, 1.5343292],
[3.01170625, 1.5498438],
[3.06584853, 1.5585547],
[3.09827458, 1.56379774],
[3.11650095, 1.56674226]])
###
# FIT USING NEURALODE
###
print('Performing fit using Neural ODE...')
| neural_ode = NeuralODE(drift_nn, t, data) | 1 | 2023-12-12 18:10:25+00:00 | 4k |
SJTU-Quant/SUNNY-GNN | train/utils.py | [
{
"identifier": "snexgnn",
"path": "models/snexgnn.py",
"snippet": "class ExtractorMLP(nn.Module):\nclass SNexGNN(nn.Module):\nclass SNexHGN(SNexGNN):\n def __init__(self, in_dim, bias=True):\n def forward(self, emb):\n def __init__(self, pret_encoder, encoder, extractor, in_dim, target_ntype, ... | import os
import torch
import dgl
from tqdm import tqdm
from models import snexgnn, hgn, gat, gcn | 1,799 |
def edge_hop_mask(sg, target_ntype=None, k=2):
is_homogeneous = sg.is_homogeneous
if not is_homogeneous:
edge_types = sg.etypes
node_types = sg.ntypes
sg = dgl.to_homogeneous(sg)
src_target = torch.nonzero(sg.ndata['_TYPE']==node_types.index(target_ntype))[0].item()
else:
src_target = 0
e_h_mask = torch.tensor([], dtype=torch.bool)
src = [[src_target]]
for i in range(k):
one_hop_sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)
one_hop_loader = dgl.dataloading.DataLoader(sg, src[i],
one_hop_sampler, batch_size=1, shuffle=False)
neighbors = []
h_mask = torch.zeros(sg.number_of_edges(), dtype=torch.bool)
for j, (ng, _, _) in enumerate(one_hop_loader):
ng_lst = ng.numpy().tolist()
neighbors.extend(ng_lst)
edge_ids = sg.edge_ids(ng, [src[i][j]]*len(ng))
h_mask[edge_ids] = 1
src.append(list(set(neighbors)))
e_h_mask = torch.cat((e_h_mask, h_mask.unsqueeze(0)), dim=0)
if not is_homogeneous:
e_h_mask_dict = {}
for i in range(len(edge_types)):
etype = edge_types[i]
a = torch.nonzero(sg.edata[dgl.ETYPE] == i).view(-1)
e_h_mask_dict[etype] = e_h_mask[:, a].T
return e_h_mask_dict
return e_h_mask.T
def accuracy(y_pred, y_true):
y_true = y_true.squeeze().long()
preds = y_pred.max(1)[1].type_as(y_true)
correct = preds.eq(y_true).double()
correct = correct.sum().item()
return correct / len(y_true)
def get_model(cfg):
graph_path = cfg.graph_path
index_path = cfg.index_path
method = cfg.method
data_hyparams = cfg.hyparams['data']
dataset = cfg.dataset
ckpt_dir = cfg.ckpt_dir
encoder_type = cfg.encoder_type
num_classes = data_hyparams['num_classes']
target_ntype = data_hyparams['target_ntype']
n_layer = 2
gs, _ = dgl.load_graphs(graph_path)
g = gs[0]
if g.is_homogeneous:
g = dgl.add_self_loop(g)
in_dim = {n: g.nodes[n].data['nfeat'].shape[1] for n in g.ntypes}
info = torch.load(index_path)
if method == 'gat':
model = gat.GAT(in_dim[target_ntype], 256, 64, [8, 1], num_classes)
elif method == 'gcn':
model = gcn.GCN(in_dim[target_ntype], 256, 64, num_classes)
elif method == 'simplehgn':
edge_type_num = len(g.etypes)
|
def edge_hop_mask(sg, target_ntype=None, k=2):
is_homogeneous = sg.is_homogeneous
if not is_homogeneous:
edge_types = sg.etypes
node_types = sg.ntypes
sg = dgl.to_homogeneous(sg)
src_target = torch.nonzero(sg.ndata['_TYPE']==node_types.index(target_ntype))[0].item()
else:
src_target = 0
e_h_mask = torch.tensor([], dtype=torch.bool)
src = [[src_target]]
for i in range(k):
one_hop_sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)
one_hop_loader = dgl.dataloading.DataLoader(sg, src[i],
one_hop_sampler, batch_size=1, shuffle=False)
neighbors = []
h_mask = torch.zeros(sg.number_of_edges(), dtype=torch.bool)
for j, (ng, _, _) in enumerate(one_hop_loader):
ng_lst = ng.numpy().tolist()
neighbors.extend(ng_lst)
edge_ids = sg.edge_ids(ng, [src[i][j]]*len(ng))
h_mask[edge_ids] = 1
src.append(list(set(neighbors)))
e_h_mask = torch.cat((e_h_mask, h_mask.unsqueeze(0)), dim=0)
if not is_homogeneous:
e_h_mask_dict = {}
for i in range(len(edge_types)):
etype = edge_types[i]
a = torch.nonzero(sg.edata[dgl.ETYPE] == i).view(-1)
e_h_mask_dict[etype] = e_h_mask[:, a].T
return e_h_mask_dict
return e_h_mask.T
def accuracy(y_pred, y_true):
y_true = y_true.squeeze().long()
preds = y_pred.max(1)[1].type_as(y_true)
correct = preds.eq(y_true).double()
correct = correct.sum().item()
return correct / len(y_true)
def get_model(cfg):
graph_path = cfg.graph_path
index_path = cfg.index_path
method = cfg.method
data_hyparams = cfg.hyparams['data']
dataset = cfg.dataset
ckpt_dir = cfg.ckpt_dir
encoder_type = cfg.encoder_type
num_classes = data_hyparams['num_classes']
target_ntype = data_hyparams['target_ntype']
n_layer = 2
gs, _ = dgl.load_graphs(graph_path)
g = gs[0]
if g.is_homogeneous:
g = dgl.add_self_loop(g)
in_dim = {n: g.nodes[n].data['nfeat'].shape[1] for n in g.ntypes}
info = torch.load(index_path)
if method == 'gat':
model = gat.GAT(in_dim[target_ntype], 256, 64, [8, 1], num_classes)
elif method == 'gcn':
model = gcn.GCN(in_dim[target_ntype], 256, 64, num_classes)
elif method == 'simplehgn':
edge_type_num = len(g.etypes) | model = hgn.SimpleHeteroHGN(32, edge_type_num, in_dim, 32, num_classes, n_layer, | 1 | 2023-12-12 02:46:00+00:00 | 4k |
dvmazur/mixtral-offloading | src/custom_layers.py | [
{
"identifier": "pack_4bit_u8_common",
"path": "src/packing.py",
"snippet": "def pack_4bit_u8_common(W_q: torch.Tensor):\n height = W_q.size(0)\n assert height % 2 == 0\n \n W_q = W_q.to(torch.uint8)\n p = (W_q[::2, ...] << 4) | (W_q[1::2, ...])\n\n return PackedTensor(p.to(torch.uint8... | import copy
import functools
import torch
from transformers.models.mixtral.configuration_mixtral import MixtralConfig
from transformers.activations import ACT2FN
from typing import Dict, Any
from hqq.core.quantize import HQQLinear, Quantizer
from torch import nn
from torch.nn import functional as F
from .packing import pack_4bit_u8_common, pack_2bit_u8_common, unpack_4bit_u8_common, unpack_2bit_u8_common
from .triton_kernels import triton_matmul4_transpose, triton_matmul3_transpose, triton_matmul2_transpose | 3,384 |
class HQQLinearTritonSavable(HQQLinear):
def __init__(self, layer, quant_config, meta=None, **kwargs):
"""
Example how to get meta:
>>>> meta1 = HQQLinearSavable.get_hqq_meta((hidden_dim, ffn_dim), quant_config)
>>>> meta2 = HQQLinearSavable.get_hqq_meta((ffn_dim, hidden_dim), quant_config)
"""
assert quant_config['weight_quant_params']['nbits'] in [2, 3, 4]
super().__init__(layer, quant_config, **kwargs)
if not hasattr(self, 'meta'):
assert meta is not None
self.meta = copy.deepcopy(meta)
self._register_state_dict_hook(self._add_to_state_dict_hook)
self._register_load_state_dict_pre_hook(self._load_from_state_dict_hook)
def quantize(self, *args, **kwargs):
super().quantize(*args, **kwargs)
# repacking
self.repack()
def repack(self):
if self.W_q.shape != self.meta['shape']:
W_q = Quantizer.unpack[self.meta['packing']](self.W_q)
sh = self.meta['shape']
W_q = W_q.reshape((-1,) + sh[1:])
W_q = W_q[:sh[0], ...]
self.W_q = Quantizer.pack[self.meta['packing']](W_q)
def forward(self, x):
return self.forward_triton(x)
def set_backend(self, backend):
pass
@torch.inference_mode()
def forward_triton(self, x):
assert self.ready, "model was not quantized"
assert self.meta['axis'] == 0
W_q, meta = self.W_q, self.meta
del_keys = []
if 'quant_scale' in meta and meta['quant_scale']:
meta['scale'] = Quantizer.dequantize(meta['scale_q'], meta['meta_scale']); del_keys.append('scale')
if 'quant_zero' in meta and meta['quant_zero']:
meta['zero'] = Quantizer.dequantize(meta['zero_q'], meta['meta_zero']); del_keys.append('zero')
K = meta['shape'][1]
N = meta['shape'][0]
if self.meta['nbits'] == 4:
fn = triton_matmul4_transpose
elif self.meta['nbits'] == 3:
fn = functools.partial(triton_matmul3_transpose, N=N)
elif self.meta['nbits'] == 2:
|
class HQQLinearTritonSavable(HQQLinear):
def __init__(self, layer, quant_config, meta=None, **kwargs):
"""
Example how to get meta:
>>>> meta1 = HQQLinearSavable.get_hqq_meta((hidden_dim, ffn_dim), quant_config)
>>>> meta2 = HQQLinearSavable.get_hqq_meta((ffn_dim, hidden_dim), quant_config)
"""
assert quant_config['weight_quant_params']['nbits'] in [2, 3, 4]
super().__init__(layer, quant_config, **kwargs)
if not hasattr(self, 'meta'):
assert meta is not None
self.meta = copy.deepcopy(meta)
self._register_state_dict_hook(self._add_to_state_dict_hook)
self._register_load_state_dict_pre_hook(self._load_from_state_dict_hook)
def quantize(self, *args, **kwargs):
super().quantize(*args, **kwargs)
# repacking
self.repack()
def repack(self):
if self.W_q.shape != self.meta['shape']:
W_q = Quantizer.unpack[self.meta['packing']](self.W_q)
sh = self.meta['shape']
W_q = W_q.reshape((-1,) + sh[1:])
W_q = W_q[:sh[0], ...]
self.W_q = Quantizer.pack[self.meta['packing']](W_q)
def forward(self, x):
return self.forward_triton(x)
def set_backend(self, backend):
pass
@torch.inference_mode()
def forward_triton(self, x):
assert self.ready, "model was not quantized"
assert self.meta['axis'] == 0
W_q, meta = self.W_q, self.meta
del_keys = []
if 'quant_scale' in meta and meta['quant_scale']:
meta['scale'] = Quantizer.dequantize(meta['scale_q'], meta['meta_scale']); del_keys.append('scale')
if 'quant_zero' in meta and meta['quant_zero']:
meta['zero'] = Quantizer.dequantize(meta['zero_q'], meta['meta_zero']); del_keys.append('zero')
K = meta['shape'][1]
N = meta['shape'][0]
if self.meta['nbits'] == 4:
fn = triton_matmul4_transpose
elif self.meta['nbits'] == 3:
fn = functools.partial(triton_matmul3_transpose, N=N)
elif self.meta['nbits'] == 2: | fn = triton_matmul2_transpose | 6 | 2023-12-15 03:32:35+00:00 | 4k |
CircleRadon/Osprey | osprey/model/osprey_arch.py | [
{
"identifier": "build_vision_tower",
"path": "osprey/model/multimodal_encoder/builder.py",
"snippet": "def build_vision_tower(vision_tower_cfg, delay_load=False):\n\n return CLIPVisionTower(args=vision_tower_cfg)\n\n raise ValueError(f'Unknown vision tower: {vision_tower}')"
},
{
"identif... | from abc import ABC, abstractmethod
from .multimodal_encoder.builder import build_vision_tower
from .multimodal_projector.builder import build_vision_projector
from osprey.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
import torch
import torch.nn as nn | 3,374 | cur_image_features = image_features[cur_image_idx]
image_token_start = image_token_indices[0]
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))
cur_new_input_embeds.append(cur_image_features)
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])
cur_labels = cur_labels[image_token_start+2:]
else:
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))
cur_new_input_embeds.append(cur_image_features)
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_labels = cur_labels[image_token_start+1:]
cur_image_idx += 1
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
cur_input_ids = cur_input_ids[image_token_start+2:]
else:
cur_input_ids = cur_input_ids[image_token_start+1:]
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
if cur_input_ids.numel() > 0:
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
mask_idx = torch.nonzero(cur_input_ids==self.tokenizer.convert_tokens_to_ids(['<mask>'])[0])
_l = 0
for i, idx in enumerate(mask_idx):
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[_l:idx[0]]).detach())
## mask
cur_new_input_embeds.append(mask_feats[batch_idx][i:i+1].detach())
## pos
cur_new_input_embeds.append(pos_feats[batch_idx][i:i+1].detach())
if labels is not None:
cur_labels[idx[0]:idx[0]+2] = torch.full((2,), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)
_l = idx[0]+2
if _l< len(cur_input_ids):
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[_l:]).detach())
else:
mask_idx = torch.nonzero(cur_input_ids==self.tokenizer.convert_tokens_to_ids(['<mask>'])[0])
assert len(mask_idx) == len(mask_feats[batch_idx]), "mask num not equal to mask feats"
_l = 0
for i, idx in enumerate(mask_idx):
cur_raw_new_input_embeds = self.get_model().embed_tokens(cur_input_ids[_l:idx[0]])
cur_new_input_embeds.append(cur_raw_new_input_embeds)
## mask
cur_new_input_embeds.append(mask_feats[batch_idx][i:i+1].to(cur_raw_new_input_embeds.dtype))
## pos
cur_new_input_embeds.append(pos_feats[batch_idx][i:i+1].to(cur_raw_new_input_embeds.dtype))
if labels is not None:
cur_labels[idx[0]:idx[0]+2] = torch.full((2,), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)
_l = idx[0]+2
if _l< len(cur_input_ids):
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[_l:]))
if labels is not None:
cur_new_labels.append(cur_labels)
cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]
cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)
new_input_embeds.append(cur_new_input_embeds)
if labels is not None:
cur_new_labels = torch.cat(cur_new_labels, dim=0)
new_labels.append(cur_new_labels)
if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):
max_len = max(x.shape[0] for x in new_input_embeds)
new_input_embeds_align = []
for cur_new_embed in new_input_embeds:
cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)
new_input_embeds_align.append(cur_new_embed)
new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
if labels is not None:
new_labels_align = []
_new_labels = new_labels
for cur_new_label in new_labels:
cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)
new_labels_align.append(cur_new_label)
new_labels = torch.stack(new_labels_align, dim=0)
if attention_mask is not None:
new_attention_mask = []
for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):
new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)
new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)
cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)
new_attention_mask.append(cur_new_attention_mask)
attention_mask = torch.stack(new_attention_mask, dim=0)
assert attention_mask.shape == new_labels.shape
else:
new_input_embeds = torch.stack(new_input_embeds, dim=0)
if labels is not None:
new_labels = torch.stack(new_labels, dim=0)
if attention_mask is not None:
new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)
attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)
assert attention_mask.shape == new_input_embeds.shape[:2]
return None, attention_mask, past_key_values, new_input_embeds, new_labels
def initialize_vision_tokenizer(self, model_args, tokenizer):
if model_args.mm_use_im_patch_token:
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
self.resize_token_embeddings(len(tokenizer))
mask_tokens = ['<mask>', '<pos>']
num_new_tokens = tokenizer.add_tokens(mask_tokens, special_tokens=True)
if model_args.mm_use_im_start_end:
| # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class OspreyMetaModel:
def __init__(self, config):
super(OspreyMetaModel, self).__init__(config)
if hasattr(config, "mm_vision_tower"):
self.vision_tower = build_vision_tower(config, delay_load=False)
self.mm_projector = build_vision_projector(config)
def get_vision_tower(self):
vision_tower = getattr(self, 'vision_tower', None)
if type(vision_tower) is list:
vision_tower = vision_tower[0]
return vision_tower
def initialize_vision_modules(self, model_args, fsdp=None):
vision_tower = model_args.vision_tower
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
if not hasattr(self.config, "mm_vision_tower"):
self.config.mm_vision_tower = vision_tower
vision_tower = build_vision_tower(model_args)
if fsdp is not None and len(fsdp) > 0:
self.vision_tower = [self.vision_tower]
else:
self.vision_tower = vision_tower
self.config.use_mm_proj = True
self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')
self.mm_projector = build_vision_projector(self.config)
if pretrain_mm_mlp_adapter is not None:
print("***********load projector_weights********")
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
def get_w(weights, keyword):
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
class OspreyMetaForCausalLM(ABC):
def __init__(self):
super(OspreyMetaForCausalLM, self).__init__()
@abstractmethod
def get_model(self):
pass
def get_vision_tower(self):
return self.get_model().get_vision_tower()
def encode_images(self, images):
image_features, image_features_dict = self.get_model().get_vision_tower()(images)
self.get_model().mm_projector.to(device=image_features.device, dtype=image_features.dtype)
image_features = self.get_model().mm_projector(image_features)
return image_features, image_features_dict
def prepare_inputs_labels_for_multimodal(
self, input_ids, masks, attention_mask, past_key_values, labels, images
):
vision_tower = self.get_vision_tower()
if vision_tower is None or images is None or input_ids.shape[1] == 1:
if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:
attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)
return input_ids, attention_mask, past_key_values, None, labels
if type(images) is list or images.ndim == 5:
concat_images = torch.cat([image for image in images], dim=0)
image_features, image_features_dict = self.encode_images(concat_images)
split_sizes = [image.shape[0] for image in images]
image_features = torch.split(image_features, split_sizes, dim=0)
image_features = [x.flatten(0, 1).to(concat_images.device) for x in image_features]
else:
image_features, image_features_dict = self.encode_images(images)
mask_feats, pos_feats = self.mask_extractor(image_features_dict, masks)
new_input_embeds = []
new_labels = [] if labels is not None else None
cur_image_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids):
if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:
# multimodal LLM, but the current sample is not multimodal
# FIXME: this is a hacky fix, for deepspeed zero3 to work
half_len = cur_input_ids.shape[0] // 2
cur_image_features = image_features[cur_image_idx]
cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len])
cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:])
cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0], cur_input_embeds_2], dim=0)
new_input_embeds.append(cur_input_embeds)
if labels is not None:
new_labels.append(labels[batch_idx])
cur_image_idx += 1
continue
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
cur_new_input_embeds = []
if labels is not None:
cur_labels = labels[batch_idx]
cur_new_labels = []
assert cur_labels.shape == cur_input_ids.shape
while image_token_indices.numel() > 0:
cur_image_features = image_features[cur_image_idx]
image_token_start = image_token_indices[0]
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))
cur_new_input_embeds.append(cur_image_features)
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])
cur_labels = cur_labels[image_token_start+2:]
else:
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))
cur_new_input_embeds.append(cur_image_features)
if labels is not None:
cur_new_labels.append(cur_labels[:image_token_start])
cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))
cur_labels = cur_labels[image_token_start+1:]
cur_image_idx += 1
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
cur_input_ids = cur_input_ids[image_token_start+2:]
else:
cur_input_ids = cur_input_ids[image_token_start+1:]
image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]
if cur_input_ids.numel() > 0:
if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):
mask_idx = torch.nonzero(cur_input_ids==self.tokenizer.convert_tokens_to_ids(['<mask>'])[0])
_l = 0
for i, idx in enumerate(mask_idx):
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[_l:idx[0]]).detach())
## mask
cur_new_input_embeds.append(mask_feats[batch_idx][i:i+1].detach())
## pos
cur_new_input_embeds.append(pos_feats[batch_idx][i:i+1].detach())
if labels is not None:
cur_labels[idx[0]:idx[0]+2] = torch.full((2,), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)
_l = idx[0]+2
if _l< len(cur_input_ids):
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[_l:]).detach())
else:
mask_idx = torch.nonzero(cur_input_ids==self.tokenizer.convert_tokens_to_ids(['<mask>'])[0])
assert len(mask_idx) == len(mask_feats[batch_idx]), "mask num not equal to mask feats"
_l = 0
for i, idx in enumerate(mask_idx):
cur_raw_new_input_embeds = self.get_model().embed_tokens(cur_input_ids[_l:idx[0]])
cur_new_input_embeds.append(cur_raw_new_input_embeds)
## mask
cur_new_input_embeds.append(mask_feats[batch_idx][i:i+1].to(cur_raw_new_input_embeds.dtype))
## pos
cur_new_input_embeds.append(pos_feats[batch_idx][i:i+1].to(cur_raw_new_input_embeds.dtype))
if labels is not None:
cur_labels[idx[0]:idx[0]+2] = torch.full((2,), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)
_l = idx[0]+2
if _l< len(cur_input_ids):
cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[_l:]))
if labels is not None:
cur_new_labels.append(cur_labels)
cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]
cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)
new_input_embeds.append(cur_new_input_embeds)
if labels is not None:
cur_new_labels = torch.cat(cur_new_labels, dim=0)
new_labels.append(cur_new_labels)
if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):
max_len = max(x.shape[0] for x in new_input_embeds)
new_input_embeds_align = []
for cur_new_embed in new_input_embeds:
cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)
new_input_embeds_align.append(cur_new_embed)
new_input_embeds = torch.stack(new_input_embeds_align, dim=0)
if labels is not None:
new_labels_align = []
_new_labels = new_labels
for cur_new_label in new_labels:
cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)
new_labels_align.append(cur_new_label)
new_labels = torch.stack(new_labels_align, dim=0)
if attention_mask is not None:
new_attention_mask = []
for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):
new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)
new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)
cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)
new_attention_mask.append(cur_new_attention_mask)
attention_mask = torch.stack(new_attention_mask, dim=0)
assert attention_mask.shape == new_labels.shape
else:
new_input_embeds = torch.stack(new_input_embeds, dim=0)
if labels is not None:
new_labels = torch.stack(new_labels, dim=0)
if attention_mask is not None:
new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)
attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)
assert attention_mask.shape == new_input_embeds.shape[:2]
return None, attention_mask, past_key_values, new_input_embeds, new_labels
def initialize_vision_tokenizer(self, model_args, tokenizer):
if model_args.mm_use_im_patch_token:
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
self.resize_token_embeddings(len(tokenizer))
mask_tokens = ['<mask>', '<pos>']
num_new_tokens = tokenizer.add_tokens(mask_tokens, special_tokens=True)
if model_args.mm_use_im_start_end: | num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True) | 6 | 2023-12-17 16:21:45+00:00 | 4k |
3DTopia/OpenLRM | lrm/inferrer.py | [
{
"identifier": "LRMGenerator",
"path": "lrm/models/generator.py",
"snippet": "class LRMGenerator(nn.Module):\n \"\"\"\n Full model of the large reconstruction model.\n \"\"\"\n def __init__(self, camera_embed_dim: int, rendering_samples_per_ray: int,\n transformer_dim: int, ... | import torch
import math
import os
import imageio
import mcubes
import trimesh
import numpy as np
import argparse
from PIL import Image
from .models.generator import LRMGenerator
from .cam_utils import build_camera_principle, build_camera_standard, center_looking_at_camera_pose
from huggingface_hub import hf_hub_download | 2,703 | # Copyright (c) 2023, Zexin He
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LRMInferrer:
def __init__(self, model_name: str):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
_checkpoint = self._load_checkpoint(model_name)
_model_weights, _model_kwargs = _checkpoint['weights'], _checkpoint['kwargs']['model']
self.model = self._build_model(_model_kwargs, _model_weights).eval()
self.infer_kwargs = _checkpoint['kwargs']['infer']
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def _load_checkpoint(self, model_name: str, cache_dir = './.cache'):
# download checkpoint if not exists
local_dir = os.path.join(cache_dir, model_name)
if not os.path.exists(local_dir):
os.makedirs(local_dir, exist_ok=True)
if not os.path.exists(os.path.join(local_dir, f'model.pth')):
# os.system(f'wget -O {os.path.join(cache_dir, f"{model_name}.pth")} https://zxhezexin.com/modelzoo/openlrm/{model_name}.pth')
# raise FileNotFoundError(f"Checkpoint {model_name} not found in {cache_dir}")
repo_id = f'zxhezexin/{model_name}'
config_path = hf_hub_download(repo_id=repo_id, filename='config.json', local_dir=local_dir)
model_path = hf_hub_download(repo_id=repo_id, filename=f'model.pth', local_dir=local_dir)
else:
model_path = os.path.join(local_dir, f'model.pth')
checkpoint = torch.load(model_path, map_location=self.device)
return checkpoint
def _build_model(self, model_kwargs, model_weights):
model = LRMGenerator(**model_kwargs).to(self.device)
model.load_state_dict(model_weights)
print(f"======== Loaded model from checkpoint ========")
return model
@staticmethod
def _get_surrounding_views(M: int = 160, radius: float = 2.0, height: float = 0.8):
# M: number of surrounding views
# radius: camera dist to center
# height: height of the camera
# return: (M, 3, 4)
assert M > 0
assert radius > 0
camera_positions = []
projected_radius = math.sqrt(radius ** 2 - height ** 2)
for i in range(M):
theta = 2 * math.pi * i / M - math.pi / 2
x = projected_radius * math.cos(theta)
y = projected_radius * math.sin(theta)
z = height
camera_positions.append([x, y, z])
camera_positions = torch.tensor(camera_positions, dtype=torch.float32)
extrinsics = center_looking_at_camera_pose(camera_positions)
return extrinsics
@staticmethod
def _default_intrinsics():
# return: (3, 2)
fx = fy = 384
cx = cy = 256
w = h = 512
intrinsics = torch.tensor([
[fx, fy],
[cx, cy],
[w, h],
], dtype=torch.float32)
return intrinsics
def _default_source_camera(self, batch_size: int = 1):
# return: (N, D_cam_raw)
dist_to_center = 2
canonical_camera_extrinsics = torch.tensor([[
[1, 0, 0, 0],
[0, 0, -1, -dist_to_center],
[0, 1, 0, 0],
]], dtype=torch.float32)
canonical_camera_intrinsics = self._default_intrinsics().unsqueeze(0)
source_camera = build_camera_principle(canonical_camera_extrinsics, canonical_camera_intrinsics)
return source_camera.repeat(batch_size, 1)
def _default_render_cameras(self, batch_size: int = 1):
# return: (N, M, D_cam_render)
render_camera_extrinsics = self._get_surrounding_views()
render_camera_intrinsics = self._default_intrinsics().unsqueeze(0).repeat(render_camera_extrinsics.shape[0], 1, 1)
| # Copyright (c) 2023, Zexin He
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LRMInferrer:
def __init__(self, model_name: str):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
_checkpoint = self._load_checkpoint(model_name)
_model_weights, _model_kwargs = _checkpoint['weights'], _checkpoint['kwargs']['model']
self.model = self._build_model(_model_kwargs, _model_weights).eval()
self.infer_kwargs = _checkpoint['kwargs']['infer']
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def _load_checkpoint(self, model_name: str, cache_dir = './.cache'):
# download checkpoint if not exists
local_dir = os.path.join(cache_dir, model_name)
if not os.path.exists(local_dir):
os.makedirs(local_dir, exist_ok=True)
if not os.path.exists(os.path.join(local_dir, f'model.pth')):
# os.system(f'wget -O {os.path.join(cache_dir, f"{model_name}.pth")} https://zxhezexin.com/modelzoo/openlrm/{model_name}.pth')
# raise FileNotFoundError(f"Checkpoint {model_name} not found in {cache_dir}")
repo_id = f'zxhezexin/{model_name}'
config_path = hf_hub_download(repo_id=repo_id, filename='config.json', local_dir=local_dir)
model_path = hf_hub_download(repo_id=repo_id, filename=f'model.pth', local_dir=local_dir)
else:
model_path = os.path.join(local_dir, f'model.pth')
checkpoint = torch.load(model_path, map_location=self.device)
return checkpoint
def _build_model(self, model_kwargs, model_weights):
model = LRMGenerator(**model_kwargs).to(self.device)
model.load_state_dict(model_weights)
print(f"======== Loaded model from checkpoint ========")
return model
@staticmethod
def _get_surrounding_views(M: int = 160, radius: float = 2.0, height: float = 0.8):
# M: number of surrounding views
# radius: camera dist to center
# height: height of the camera
# return: (M, 3, 4)
assert M > 0
assert radius > 0
camera_positions = []
projected_radius = math.sqrt(radius ** 2 - height ** 2)
for i in range(M):
theta = 2 * math.pi * i / M - math.pi / 2
x = projected_radius * math.cos(theta)
y = projected_radius * math.sin(theta)
z = height
camera_positions.append([x, y, z])
camera_positions = torch.tensor(camera_positions, dtype=torch.float32)
extrinsics = center_looking_at_camera_pose(camera_positions)
return extrinsics
@staticmethod
def _default_intrinsics():
# return: (3, 2)
fx = fy = 384
cx = cy = 256
w = h = 512
intrinsics = torch.tensor([
[fx, fy],
[cx, cy],
[w, h],
], dtype=torch.float32)
return intrinsics
def _default_source_camera(self, batch_size: int = 1):
# return: (N, D_cam_raw)
dist_to_center = 2
canonical_camera_extrinsics = torch.tensor([[
[1, 0, 0, 0],
[0, 0, -1, -dist_to_center],
[0, 1, 0, 0],
]], dtype=torch.float32)
canonical_camera_intrinsics = self._default_intrinsics().unsqueeze(0)
source_camera = build_camera_principle(canonical_camera_extrinsics, canonical_camera_intrinsics)
return source_camera.repeat(batch_size, 1)
def _default_render_cameras(self, batch_size: int = 1):
# return: (N, M, D_cam_render)
render_camera_extrinsics = self._get_surrounding_views()
render_camera_intrinsics = self._default_intrinsics().unsqueeze(0).repeat(render_camera_extrinsics.shape[0], 1, 1) | render_cameras = build_camera_standard(render_camera_extrinsics, render_camera_intrinsics) | 2 | 2023-12-20 10:52:01+00:00 | 4k |
xinghaochen/TinySAM | tinysam/predictor.py | [
{
"identifier": "Sam",
"path": "tinysam/modeling/sam.py",
"snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: Union[ImageEncoderViT, TinyViT],\n prompt_encoder: PromptEncoder,\n mask... | import numpy as np
import torch
from .modeling import Sam
from typing import Optional, Tuple
from .utils.transforms import ResizeLongestSide | 2,718 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class SamPredictor:
def __init__(
self,
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class SamPredictor:
def __init__(
self, | sam_model: Sam, | 0 | 2023-12-19 11:25:54+00:00 | 4k |
VikParuchuri/texify | benchmark.py | [
{
"identifier": "batch_inference",
"path": "texify/inference.py",
"snippet": "def batch_inference(images, model, processor, temperature=settings.TEMPERATURE, max_tokens=settings.MAX_TOKENS):\n images = [image.convert(\"RGB\") for image in images]\n encodings = processor(images=images, return_tenso... | import argparse
import os.path
import random
import time
import evaluate
import json
import base64
import io
import torch
from functools import partial
from tabulate import tabulate
from tqdm import tqdm
from texify.inference import batch_inference
from texify.model.model import load_model
from texify.model.processor import load_processor
from PIL import Image
from texify.settings import settings
from rapidfuzz.distance import Levenshtein
from pix2tex.cli import LatexOCR
from nougat.postprocessing import markdown_compatible
from nougat.utils.checkpoint import get_checkpoint
from nougat.utils.dataset import ImageDataset
from nougat.utils.device import move_to_device
from nougat import NougatModel | 1,827 | text = text.replace("$", "")
text = text.replace("\[", "")
text = text.replace("\]", "")
text = text.replace("\(", "")
text = text.replace("\)", "")
text = text.strip()
return text
def score_text(predictions, references):
bleu = evaluate.load("bleu")
bleu_results = bleu.compute(predictions=predictions, references=references)
meteor = evaluate.load('meteor')
meteor_results = meteor.compute(predictions=predictions, references=references)
lev_dist = []
for p, r in zip(predictions, references):
lev_dist.append(Levenshtein.normalized_distance(p, r))
return {
'bleu': bleu_results["bleu"],
'meteor': meteor_results['meteor'],
'edit': sum(lev_dist) / len(lev_dist)
}
def image_to_pil(image):
decoded = base64.b64decode(image)
return Image.open(io.BytesIO(decoded))
def load_images(source_data):
images = [sd["image"] for sd in source_data]
images = [image_to_pil(image) for image in images]
return images
def inference_texify(source_data, model, processor):
images = load_images(source_data)
write_data = []
for i in tqdm(range(0, len(images), settings.BATCH_SIZE), desc="Texify inference"):
batch = images[i:i+settings.BATCH_SIZE]
text = batch_inference(batch, model, processor)
for j, t in enumerate(text):
eq_idx = i + j
write_data.append({"text": t, "equation": source_data[eq_idx]["equation"]})
return write_data
def inference_pix2tex(source_data):
model = LatexOCR()
images = load_images(source_data)
write_data = []
for i in tqdm(range(len(images)), desc="Pix2tex inference"):
try:
text = model(images[i])
except ValueError:
# Happens when resize fails
text = ""
write_data.append({"text": text, "equation": source_data[i]["equation"]})
return write_data
def image_to_bmp(image):
img_out = io.BytesIO()
image.save(img_out, format="BMP")
return img_out
def inference_nougat(source_data, batch_size=1):
# Load images, then convert to bmp format for nougat
images = load_images(source_data)
images = [image_to_bmp(image) for image in images]
predictions = []
ckpt = get_checkpoint(None, model_tag="0.1.0-small")
model = NougatModel.from_pretrained(ckpt)
if settings.TORCH_DEVICE_MODEL != "cpu":
move_to_device(model, bf16=settings.CUDA, cuda=settings.CUDA)
model.eval()
dataset = ImageDataset(
images,
partial(model.encoder.prepare_input, random_padding=False),
)
# Batch sizes higher than 1 explode memory usage on CPU/MPS
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
pin_memory=True,
shuffle=False,
)
for idx, sample in tqdm(enumerate(dataloader), desc="Nougat inference", total=len(dataloader)):
model.config.max_length = settings.MAX_TOKENS
model_output = model.inference(image_tensors=sample, early_stopping=False)
output = [markdown_compatible(o) for o in model_output["predictions"]]
predictions.extend(output)
return predictions
def main():
parser = argparse.ArgumentParser(description="Benchmark the performance of texify.")
parser.add_argument("--data_path", type=str, help="Path to JSON file with source images/equations", default=os.path.join(settings.DATA_DIR, "bench_data.json"))
parser.add_argument("--result_path", type=str, help="Path to JSON file to save results to.", default=os.path.join(settings.DATA_DIR, "bench_results.json"))
parser.add_argument("--max", type=int, help="Maximum number of images to benchmark.", default=None)
parser.add_argument("--pix2tex", action="store_true", help="Run pix2tex scoring", default=False)
parser.add_argument("--nougat", action="store_true", help="Run nougat scoring", default=False)
args = parser.parse_args()
source_path = os.path.abspath(args.data_path)
result_path = os.path.abspath(args.result_path)
os.makedirs(os.path.dirname(result_path), exist_ok=True)
|
def normalize_text(text):
# Replace fences
text = text.replace("$", "")
text = text.replace("\[", "")
text = text.replace("\]", "")
text = text.replace("\(", "")
text = text.replace("\)", "")
text = text.strip()
return text
def score_text(predictions, references):
bleu = evaluate.load("bleu")
bleu_results = bleu.compute(predictions=predictions, references=references)
meteor = evaluate.load('meteor')
meteor_results = meteor.compute(predictions=predictions, references=references)
lev_dist = []
for p, r in zip(predictions, references):
lev_dist.append(Levenshtein.normalized_distance(p, r))
return {
'bleu': bleu_results["bleu"],
'meteor': meteor_results['meteor'],
'edit': sum(lev_dist) / len(lev_dist)
}
def image_to_pil(image):
decoded = base64.b64decode(image)
return Image.open(io.BytesIO(decoded))
def load_images(source_data):
images = [sd["image"] for sd in source_data]
images = [image_to_pil(image) for image in images]
return images
def inference_texify(source_data, model, processor):
images = load_images(source_data)
write_data = []
for i in tqdm(range(0, len(images), settings.BATCH_SIZE), desc="Texify inference"):
batch = images[i:i+settings.BATCH_SIZE]
text = batch_inference(batch, model, processor)
for j, t in enumerate(text):
eq_idx = i + j
write_data.append({"text": t, "equation": source_data[eq_idx]["equation"]})
return write_data
def inference_pix2tex(source_data):
model = LatexOCR()
images = load_images(source_data)
write_data = []
for i in tqdm(range(len(images)), desc="Pix2tex inference"):
try:
text = model(images[i])
except ValueError:
# Happens when resize fails
text = ""
write_data.append({"text": text, "equation": source_data[i]["equation"]})
return write_data
def image_to_bmp(image):
img_out = io.BytesIO()
image.save(img_out, format="BMP")
return img_out
def inference_nougat(source_data, batch_size=1):
# Load images, then convert to bmp format for nougat
images = load_images(source_data)
images = [image_to_bmp(image) for image in images]
predictions = []
ckpt = get_checkpoint(None, model_tag="0.1.0-small")
model = NougatModel.from_pretrained(ckpt)
if settings.TORCH_DEVICE_MODEL != "cpu":
move_to_device(model, bf16=settings.CUDA, cuda=settings.CUDA)
model.eval()
dataset = ImageDataset(
images,
partial(model.encoder.prepare_input, random_padding=False),
)
# Batch sizes higher than 1 explode memory usage on CPU/MPS
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
pin_memory=True,
shuffle=False,
)
for idx, sample in tqdm(enumerate(dataloader), desc="Nougat inference", total=len(dataloader)):
model.config.max_length = settings.MAX_TOKENS
model_output = model.inference(image_tensors=sample, early_stopping=False)
output = [markdown_compatible(o) for o in model_output["predictions"]]
predictions.extend(output)
return predictions
def main():
parser = argparse.ArgumentParser(description="Benchmark the performance of texify.")
parser.add_argument("--data_path", type=str, help="Path to JSON file with source images/equations", default=os.path.join(settings.DATA_DIR, "bench_data.json"))
parser.add_argument("--result_path", type=str, help="Path to JSON file to save results to.", default=os.path.join(settings.DATA_DIR, "bench_results.json"))
parser.add_argument("--max", type=int, help="Maximum number of images to benchmark.", default=None)
parser.add_argument("--pix2tex", action="store_true", help="Run pix2tex scoring", default=False)
parser.add_argument("--nougat", action="store_true", help="Run nougat scoring", default=False)
args = parser.parse_args()
source_path = os.path.abspath(args.data_path)
result_path = os.path.abspath(args.result_path)
os.makedirs(os.path.dirname(result_path), exist_ok=True) | model = load_model() | 1 | 2023-12-18 22:59:58+00:00 | 4k |
dcharatan/pixelsplat | src/model/decoder/cuda_splatting.py | [
{
"identifier": "get_fov",
"path": "src/geometry/projection.py",
"snippet": "def get_fov(intrinsics: Float[Tensor, \"batch 3 3\"]) -> Float[Tensor, \"batch 2\"]:\n intrinsics_inv = intrinsics.inverse()\n\n def process_vector(vector):\n vector = torch.tensor(vector, dtype=torch.float32, devi... | from math import isqrt
from typing import Literal
from diff_gaussian_rasterization import (
GaussianRasterizationSettings,
GaussianRasterizer,
)
from einops import einsum, rearrange, repeat
from jaxtyping import Float
from torch import Tensor
from ...geometry.projection import get_fov, homogenize_points
from ..encoder.epipolar.conversions import depth_to_relative_disparity
import torch | 2,894 | return torch.stack(all_images)
def render_cuda_orthographic(
extrinsics: Float[Tensor, "batch 4 4"],
width: Float[Tensor, " batch"],
height: Float[Tensor, " batch"],
near: Float[Tensor, " batch"],
far: Float[Tensor, " batch"],
image_shape: tuple[int, int],
background_color: Float[Tensor, "batch 3"],
gaussian_means: Float[Tensor, "batch gaussian 3"],
gaussian_covariances: Float[Tensor, "batch gaussian 3 3"],
gaussian_sh_coefficients: Float[Tensor, "batch gaussian 3 d_sh"],
gaussian_opacities: Float[Tensor, "batch gaussian"],
fov_degrees: float = 0.1,
use_sh: bool = True,
dump: dict | None = None,
) -> Float[Tensor, "batch 3 height width"]:
b, _, _ = extrinsics.shape
h, w = image_shape
assert use_sh or gaussian_sh_coefficients.shape[-1] == 1
_, _, _, n = gaussian_sh_coefficients.shape
degree = isqrt(n) - 1
shs = rearrange(gaussian_sh_coefficients, "b g xyz n -> b g n xyz").contiguous()
# Create fake "orthographic" projection by moving the camera back and picking a
# small field of view.
fov_x = torch.tensor(fov_degrees, device=extrinsics.device).deg2rad()
tan_fov_x = (0.5 * fov_x).tan()
distance_to_near = (0.5 * width) / tan_fov_x
tan_fov_y = 0.5 * height / distance_to_near
fov_y = (2 * tan_fov_y).atan()
near = near + distance_to_near
far = far + distance_to_near
move_back = torch.eye(4, dtype=torch.float32, device=extrinsics.device)
move_back[2, 3] = -distance_to_near
extrinsics = extrinsics @ move_back
# Escape hatch for visualization/figures.
if dump is not None:
dump["extrinsics"] = extrinsics
dump["fov_x"] = fov_x
dump["fov_y"] = fov_y
dump["near"] = near
dump["far"] = far
projection_matrix = get_projection_matrix(
near, far, repeat(fov_x, "-> b", b=b), fov_y
)
projection_matrix = rearrange(projection_matrix, "b i j -> b j i")
view_matrix = rearrange(extrinsics.inverse(), "b i j -> b j i")
full_projection = view_matrix @ projection_matrix
all_images = []
all_radii = []
for i in range(b):
# Set up a tensor for the gradients of the screen-space means.
mean_gradients = torch.zeros_like(gaussian_means[i], requires_grad=True)
try:
mean_gradients.retain_grad()
except Exception:
pass
settings = GaussianRasterizationSettings(
image_height=h,
image_width=w,
tanfovx=tan_fov_x,
tanfovy=tan_fov_y,
bg=background_color[i],
scale_modifier=1.0,
viewmatrix=view_matrix[i],
projmatrix=full_projection[i],
sh_degree=degree,
campos=extrinsics[i, :3, 3],
prefiltered=False, # This matches the original usage.
debug=False,
)
rasterizer = GaussianRasterizer(settings)
row, col = torch.triu_indices(3, 3)
image, radii = rasterizer(
means3D=gaussian_means[i],
means2D=mean_gradients,
shs=shs[i] if use_sh else None,
colors_precomp=None if use_sh else shs[i, :, 0, :],
opacities=gaussian_opacities[i, ..., None],
cov3D_precomp=gaussian_covariances[i, :, row, col],
)
all_images.append(image)
all_radii.append(radii)
return torch.stack(all_images)
DepthRenderingMode = Literal["depth", "disparity", "relative_disparity", "log"]
def render_depth_cuda(
extrinsics: Float[Tensor, "batch 4 4"],
intrinsics: Float[Tensor, "batch 3 3"],
near: Float[Tensor, " batch"],
far: Float[Tensor, " batch"],
image_shape: tuple[int, int],
gaussian_means: Float[Tensor, "batch gaussian 3"],
gaussian_covariances: Float[Tensor, "batch gaussian 3 3"],
gaussian_opacities: Float[Tensor, "batch gaussian"],
scale_invariant: bool = True,
mode: DepthRenderingMode = "depth",
) -> Float[Tensor, "batch height width"]:
# Specify colors according to Gaussian depths.
camera_space_gaussians = einsum(
extrinsics.inverse(), homogenize_points(gaussian_means), "b i j, b g j -> b g i"
)
fake_color = camera_space_gaussians[..., 2]
if mode == "disparity":
fake_color = 1 / fake_color
elif mode == "relative_disparity":
|
def get_projection_matrix(
near: Float[Tensor, " batch"],
far: Float[Tensor, " batch"],
fov_x: Float[Tensor, " batch"],
fov_y: Float[Tensor, " batch"],
) -> Float[Tensor, "batch 4 4"]:
"""Maps points in the viewing frustum to (-1, 1) on the X/Y axes and (0, 1) on the Z
axis. Differs from the OpenGL version in that Z doesn't have range (-1, 1) after
transformation and that Z is flipped.
"""
tan_fov_x = (0.5 * fov_x).tan()
tan_fov_y = (0.5 * fov_y).tan()
top = tan_fov_y * near
bottom = -top
right = tan_fov_x * near
left = -right
(b,) = near.shape
result = torch.zeros((b, 4, 4), dtype=torch.float32, device=near.device)
result[:, 0, 0] = 2 * near / (right - left)
result[:, 1, 1] = 2 * near / (top - bottom)
result[:, 0, 2] = (right + left) / (right - left)
result[:, 1, 2] = (top + bottom) / (top - bottom)
result[:, 3, 2] = 1
result[:, 2, 2] = far / (far - near)
result[:, 2, 3] = -(far * near) / (far - near)
return result
def render_cuda(
extrinsics: Float[Tensor, "batch 4 4"],
intrinsics: Float[Tensor, "batch 3 3"],
near: Float[Tensor, " batch"],
far: Float[Tensor, " batch"],
image_shape: tuple[int, int],
background_color: Float[Tensor, "batch 3"],
gaussian_means: Float[Tensor, "batch gaussian 3"],
gaussian_covariances: Float[Tensor, "batch gaussian 3 3"],
gaussian_sh_coefficients: Float[Tensor, "batch gaussian 3 d_sh"],
gaussian_opacities: Float[Tensor, "batch gaussian"],
scale_invariant: bool = True,
use_sh: bool = True,
) -> Float[Tensor, "batch 3 height width"]:
assert use_sh or gaussian_sh_coefficients.shape[-1] == 1
# Make sure everything is in a range where numerical issues don't appear.
if scale_invariant:
scale = 1 / near
extrinsics = extrinsics.clone()
extrinsics[..., :3, 3] = extrinsics[..., :3, 3] * scale[:, None]
gaussian_covariances = gaussian_covariances * (scale[:, None, None, None] ** 2)
gaussian_means = gaussian_means * scale[:, None, None]
near = near * scale
far = far * scale
_, _, _, n = gaussian_sh_coefficients.shape
degree = isqrt(n) - 1
shs = rearrange(gaussian_sh_coefficients, "b g xyz n -> b g n xyz").contiguous()
b, _, _ = extrinsics.shape
h, w = image_shape
fov_x, fov_y = get_fov(intrinsics).unbind(dim=-1)
tan_fov_x = (0.5 * fov_x).tan()
tan_fov_y = (0.5 * fov_y).tan()
projection_matrix = get_projection_matrix(near, far, fov_x, fov_y)
projection_matrix = rearrange(projection_matrix, "b i j -> b j i")
view_matrix = rearrange(extrinsics.inverse(), "b i j -> b j i")
full_projection = view_matrix @ projection_matrix
all_images = []
all_radii = []
for i in range(b):
# Set up a tensor for the gradients of the screen-space means.
mean_gradients = torch.zeros_like(gaussian_means[i], requires_grad=True)
try:
mean_gradients.retain_grad()
except Exception:
pass
settings = GaussianRasterizationSettings(
image_height=h,
image_width=w,
tanfovx=tan_fov_x[i].item(),
tanfovy=tan_fov_y[i].item(),
bg=background_color[i],
scale_modifier=1.0,
viewmatrix=view_matrix[i],
projmatrix=full_projection[i],
sh_degree=degree,
campos=extrinsics[i, :3, 3],
prefiltered=False, # This matches the original usage.
debug=False,
)
rasterizer = GaussianRasterizer(settings)
row, col = torch.triu_indices(3, 3)
image, radii = rasterizer(
means3D=gaussian_means[i],
means2D=mean_gradients,
shs=shs[i] if use_sh else None,
colors_precomp=None if use_sh else shs[i, :, 0, :],
opacities=gaussian_opacities[i, ..., None],
cov3D_precomp=gaussian_covariances[i, :, row, col],
)
all_images.append(image)
all_radii.append(radii)
return torch.stack(all_images)
def render_cuda_orthographic(
extrinsics: Float[Tensor, "batch 4 4"],
width: Float[Tensor, " batch"],
height: Float[Tensor, " batch"],
near: Float[Tensor, " batch"],
far: Float[Tensor, " batch"],
image_shape: tuple[int, int],
background_color: Float[Tensor, "batch 3"],
gaussian_means: Float[Tensor, "batch gaussian 3"],
gaussian_covariances: Float[Tensor, "batch gaussian 3 3"],
gaussian_sh_coefficients: Float[Tensor, "batch gaussian 3 d_sh"],
gaussian_opacities: Float[Tensor, "batch gaussian"],
fov_degrees: float = 0.1,
use_sh: bool = True,
dump: dict | None = None,
) -> Float[Tensor, "batch 3 height width"]:
b, _, _ = extrinsics.shape
h, w = image_shape
assert use_sh or gaussian_sh_coefficients.shape[-1] == 1
_, _, _, n = gaussian_sh_coefficients.shape
degree = isqrt(n) - 1
shs = rearrange(gaussian_sh_coefficients, "b g xyz n -> b g n xyz").contiguous()
# Create fake "orthographic" projection by moving the camera back and picking a
# small field of view.
fov_x = torch.tensor(fov_degrees, device=extrinsics.device).deg2rad()
tan_fov_x = (0.5 * fov_x).tan()
distance_to_near = (0.5 * width) / tan_fov_x
tan_fov_y = 0.5 * height / distance_to_near
fov_y = (2 * tan_fov_y).atan()
near = near + distance_to_near
far = far + distance_to_near
move_back = torch.eye(4, dtype=torch.float32, device=extrinsics.device)
move_back[2, 3] = -distance_to_near
extrinsics = extrinsics @ move_back
# Escape hatch for visualization/figures.
if dump is not None:
dump["extrinsics"] = extrinsics
dump["fov_x"] = fov_x
dump["fov_y"] = fov_y
dump["near"] = near
dump["far"] = far
projection_matrix = get_projection_matrix(
near, far, repeat(fov_x, "-> b", b=b), fov_y
)
projection_matrix = rearrange(projection_matrix, "b i j -> b j i")
view_matrix = rearrange(extrinsics.inverse(), "b i j -> b j i")
full_projection = view_matrix @ projection_matrix
all_images = []
all_radii = []
for i in range(b):
# Set up a tensor for the gradients of the screen-space means.
mean_gradients = torch.zeros_like(gaussian_means[i], requires_grad=True)
try:
mean_gradients.retain_grad()
except Exception:
pass
settings = GaussianRasterizationSettings(
image_height=h,
image_width=w,
tanfovx=tan_fov_x,
tanfovy=tan_fov_y,
bg=background_color[i],
scale_modifier=1.0,
viewmatrix=view_matrix[i],
projmatrix=full_projection[i],
sh_degree=degree,
campos=extrinsics[i, :3, 3],
prefiltered=False, # This matches the original usage.
debug=False,
)
rasterizer = GaussianRasterizer(settings)
row, col = torch.triu_indices(3, 3)
image, radii = rasterizer(
means3D=gaussian_means[i],
means2D=mean_gradients,
shs=shs[i] if use_sh else None,
colors_precomp=None if use_sh else shs[i, :, 0, :],
opacities=gaussian_opacities[i, ..., None],
cov3D_precomp=gaussian_covariances[i, :, row, col],
)
all_images.append(image)
all_radii.append(radii)
return torch.stack(all_images)
DepthRenderingMode = Literal["depth", "disparity", "relative_disparity", "log"]
def render_depth_cuda(
extrinsics: Float[Tensor, "batch 4 4"],
intrinsics: Float[Tensor, "batch 3 3"],
near: Float[Tensor, " batch"],
far: Float[Tensor, " batch"],
image_shape: tuple[int, int],
gaussian_means: Float[Tensor, "batch gaussian 3"],
gaussian_covariances: Float[Tensor, "batch gaussian 3 3"],
gaussian_opacities: Float[Tensor, "batch gaussian"],
scale_invariant: bool = True,
mode: DepthRenderingMode = "depth",
) -> Float[Tensor, "batch height width"]:
# Specify colors according to Gaussian depths.
camera_space_gaussians = einsum(
extrinsics.inverse(), homogenize_points(gaussian_means), "b i j, b g j -> b g i"
)
fake_color = camera_space_gaussians[..., 2]
if mode == "disparity":
fake_color = 1 / fake_color
elif mode == "relative_disparity": | fake_color = depth_to_relative_disparity( | 2 | 2023-12-20 19:45:59+00:00 | 4k |
FoundationVision/GLEE | app/GLEE/glee/backbone/resnet.py | [
{
"identifier": "Backbone",
"path": "app/GLEE/glee/backbone/backbone.py",
"snippet": "class Backbone(nn.Module):\n \"\"\"\n Abstract base class for network backbones.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n The `__init__` method of any subclass can specify its own set of ar... | import pickle
import numpy as np
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn.functional as F
from typing import Any, Dict
from torch import nn
from .backbone import Backbone
from .registry import register_backbone
from detectron2.layers import (
CNNBlockBase,
Conv2d,
DeformConv,
ModulatedDeformConv,
ShapeSpec,
get_norm,
)
from detectron2.utils.file_io import PathManager | 3,089 | Examples:
::
stage = ResNet.make_stage(
BottleneckBlock, 3, in_channels=16, out_channels=64,
bottleneck_channels=16, num_groups=1,
stride_per_block=[2, 1, 1],
dilations_per_block=[1, 1, 2]
)
Usually, layers that produce the same feature map spatial size are defined as one
"stage" (in :paper:`FPN`). Under such definition, ``stride_per_block[1:]`` should
all be 1.
"""
blocks = []
for i in range(num_blocks):
curr_kwargs = {}
for k, v in kwargs.items():
if k.endswith("_per_block"):
assert len(v) == num_blocks, (
f"Argument '{k}' of make_stage should have the "
f"same length as num_blocks={num_blocks}."
)
newk = k[: -len("_per_block")]
assert newk not in kwargs, f"Cannot call make_stage with both {k} and {newk}!"
curr_kwargs[newk] = v[i]
else:
curr_kwargs[k] = v
blocks.append(
block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs)
)
in_channels = out_channels
return blocks
@staticmethod
def make_default_stages(depth, block_class=None, **kwargs):
"""
Created list of ResNet stages from pre-defined depth (one of 18, 34, 50, 101, 152).
If it doesn't create the ResNet variant you need, please use :meth:`make_stage`
instead for fine-grained customization.
Args:
depth (int): depth of ResNet
block_class (type): the CNN block class. Has to accept
`bottleneck_channels` argument for depth > 50.
By default it is BasicBlock or BottleneckBlock, based on the
depth.
kwargs:
other arguments to pass to `make_stage`. Should not contain
stride and channels, as they are predefined for each depth.
Returns:
list[list[CNNBlockBase]]: modules in all stages; see arguments of
:class:`ResNet.__init__`.
"""
num_blocks_per_stage = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
}[depth]
if block_class is None:
block_class = BasicBlock if depth < 50 else BottleneckBlock
if depth < 50:
in_channels = [64, 64, 128, 256]
out_channels = [64, 128, 256, 512]
else:
in_channels = [64, 256, 512, 1024]
out_channels = [256, 512, 1024, 2048]
ret = []
for (n, s, i, o) in zip(num_blocks_per_stage, [1, 2, 2, 2], in_channels, out_channels):
if depth >= 50:
kwargs["bottleneck_channels"] = o // 4
ret.append(
ResNet.make_stage(
block_class=block_class,
num_blocks=n,
stride_per_block=[s] + [1] * (n - 1),
in_channels=i,
out_channels=o,
**kwargs,
)
)
return ret
ResNetBlockBase = CNNBlockBase
"""
Alias for backward compatibiltiy.
"""
def make_stage(*args, **kwargs):
"""
Deprecated alias for backward compatibiltiy.
"""
return ResNet.make_stage(*args, **kwargs)
def _convert_ndarray_to_tensor(state_dict: Dict[str, Any]) -> None:
"""
In-place convert all numpy arrays in the state_dict to torch tensor.
Args:
state_dict (dict): a state-dict to be loaded to the model.
Will be modified.
"""
# model could be an OrderedDict with _metadata attribute
# (as returned by Pytorch's state_dict()). We should preserve these
# properties.
for k in list(state_dict.keys()):
v = state_dict[k]
if not isinstance(v, np.ndarray) and not isinstance(v, torch.Tensor):
raise ValueError(
"Unsupported type found in checkpoint! {}: {}".format(k, type(v))
)
if not isinstance(v, torch.Tensor):
state_dict[k] = torch.from_numpy(v)
| # Copyright (c) Facebook, Inc. and its affiliates.
__all__ = [
"ResNetBlockBase",
"BasicBlock",
"BottleneckBlock",
"DeformBottleneckBlock",
"BasicStem",
"ResNet",
"make_stage",
"get_resnet_backbone",
]
class BasicBlock(CNNBlockBase):
"""
The basic residual block for ResNet-18 and ResNet-34 defined in :paper:`ResNet`,
with two 3x3 conv layers and a projection shortcut if needed.
"""
def __init__(self, in_channels, out_channels, *, stride=1, norm="BN"):
"""
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
stride (int): Stride for the first conv.
norm (str or callable): normalization for all conv layers.
See :func:`layers.get_norm` for supported format.
"""
super().__init__(in_channels, out_channels, stride)
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
self.conv1 = Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
norm=get_norm(norm, out_channels),
)
self.conv2 = Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
def forward(self, x):
out = self.conv1(x)
out = F.relu_(out)
out = self.conv2(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = F.relu_(out)
return out
class BottleneckBlock(CNNBlockBase):
"""
The standard bottleneck residual block used by ResNet-50, 101 and 152
defined in :paper:`ResNet`. It contains 3 conv layers with kernels
1x1, 3x3, 1x1, and a projection shortcut if needed.
"""
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
dilation=1,
):
"""
Args:
bottleneck_channels (int): number of output channels for the 3x3
"bottleneck" conv layers.
num_groups (int): number of groups for the 3x3 conv layer.
norm (str or callable): normalization for all conv layers.
See :func:`layers.get_norm` for supported format.
stride_in_1x1 (bool): when stride>1, whether to put stride in the
first 1x1 convolution or the bottleneck 3x3 convolution.
dilation (int): the dilation rate of the 3x3 conv layer.
"""
super().__init__(in_channels, out_channels, stride)
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
# The original MSRA ResNet models have stride in the first 1x1 conv
# The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have
# stride in the 3x3 conv
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=get_norm(norm, bottleneck_channels),
)
self.conv2 = Conv2d(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
bias=False,
groups=num_groups,
dilation=dilation,
norm=get_norm(norm, bottleneck_channels),
)
self.conv3 = Conv2d(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
# Zero-initialize the last normalization in each residual branch,
# so that at the beginning, the residual branch starts with zeros,
# and each residual block behaves like an identity.
# See Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "For BN layers, the learnable scaling coefficient γ is initialized
# to be 1, except for each residual block's last BN
# where γ is initialized to be 0."
# nn.init.constant_(self.conv3.norm.weight, 0)
# TODO this somehow hurts performance when training GN models from scratch.
# Add it as an option when we need to use this code to train a backbone.
def forward(self, x):
out = self.conv1(x)
out = F.relu_(out)
out = self.conv2(out)
out = F.relu_(out)
out = self.conv3(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = F.relu_(out)
return out
class DeformBottleneckBlock(CNNBlockBase):
"""
Similar to :class:`BottleneckBlock`, but with :paper:`deformable conv <deformconv>`
in the 3x3 convolution.
"""
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
dilation=1,
deform_modulated=False,
deform_num_groups=1,
):
super().__init__(in_channels, out_channels, stride)
self.deform_modulated = deform_modulated
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=get_norm(norm, bottleneck_channels),
)
if deform_modulated:
deform_conv_op = ModulatedDeformConv
# offset channels are 2 or 3 (if with modulated) * kernel_size * kernel_size
offset_channels = 27
else:
deform_conv_op = DeformConv
offset_channels = 18
self.conv2_offset = Conv2d(
bottleneck_channels,
offset_channels * deform_num_groups,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
dilation=dilation,
)
self.conv2 = deform_conv_op(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
bias=False,
groups=num_groups,
dilation=dilation,
deformable_groups=deform_num_groups,
norm=get_norm(norm, bottleneck_channels),
)
self.conv3 = Conv2d(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
nn.init.constant_(self.conv2_offset.weight, 0)
nn.init.constant_(self.conv2_offset.bias, 0)
def forward(self, x):
out = self.conv1(x)
out = F.relu_(out)
if self.deform_modulated:
offset_mask = self.conv2_offset(out)
offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1)
offset = torch.cat((offset_x, offset_y), dim=1)
mask = mask.sigmoid()
out = self.conv2(out, offset, mask)
else:
offset = self.conv2_offset(out)
out = self.conv2(out, offset)
out = F.relu_(out)
out = self.conv3(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = F.relu_(out)
return out
class BasicStem(CNNBlockBase):
"""
The standard ResNet stem (layers before the first residual block),
with a conv, relu and max_pool.
"""
def __init__(self, in_channels=3, out_channels=64, norm="BN"):
"""
Args:
norm (str or callable): norm after the first conv layer.
See :func:`layers.get_norm` for supported format.
"""
super().__init__(in_channels, out_channels, 4)
self.in_channels = in_channels
self.conv1 = Conv2d(
in_channels,
out_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False,
norm=get_norm(norm, out_channels),
)
weight_init.c2_msra_fill(self.conv1)
def forward(self, x):
x = self.conv1(x)
x = F.relu_(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
return x
class ResNet(Backbone):
"""
Implement :paper:`ResNet`.
"""
def __init__(self, stem, stages, num_classes=None, out_features=None, freeze_at=0):
"""
Args:
stem (nn.Module): a stem module
stages (list[list[CNNBlockBase]]): several (typically 4) stages,
each contains multiple :class:`CNNBlockBase`.
num_classes (None or int): if None, will not perform classification.
Otherwise, will create a linear layer.
out_features (list[str]): name of the layers whose outputs should
be returned in forward. Can be anything in "stem", "linear", or "res2" ...
If None, will return the output of the last layer.
freeze_at (int): The number of stages at the beginning to freeze.
see :meth:`freeze` for detailed explanation.
"""
super().__init__()
self.stem = stem
self.num_classes = num_classes
current_stride = self.stem.stride
self._out_feature_strides = {"stem": current_stride}
self._out_feature_channels = {"stem": self.stem.out_channels}
self.stage_names, self.stages = [], []
if out_features is not None:
# Avoid keeping unused layers in this module. They consume extra memory
# and may cause allreduce to fail
num_stages = max(
[{"res2": 1, "res3": 2, "res4": 3, "res5": 4}.get(f, 0) for f in out_features]
)
stages = stages[:num_stages]
for i, blocks in enumerate(stages):
assert len(blocks) > 0, len(blocks)
for block in blocks:
assert isinstance(block, CNNBlockBase), block
name = "res" + str(i + 2)
stage = nn.Sequential(*blocks)
self.add_module(name, stage)
self.stage_names.append(name)
self.stages.append(stage)
self._out_feature_strides[name] = current_stride = int(
current_stride * np.prod([k.stride for k in blocks])
)
self._out_feature_channels[name] = curr_channels = blocks[-1].out_channels
self.stage_names = tuple(self.stage_names) # Make it static for scripting
if num_classes is not None:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(curr_channels, num_classes)
# Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "The 1000-way fully-connected layer is initialized by
# drawing weights from a zero-mean Gaussian with standard deviation of 0.01."
nn.init.normal_(self.linear.weight, std=0.01)
name = "linear"
if out_features is None:
out_features = [name]
self._out_features = out_features
assert len(self._out_features)
children = [x[0] for x in self.named_children()]
for out_feature in self._out_features:
assert out_feature in children, "Available children: {}".format(", ".join(children))
self.freeze(freeze_at)
def forward(self, x):
"""
Args:
x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
Returns:
dict[str->Tensor]: names and the corresponding features
"""
assert x.dim() == 4, f"ResNet takes an input of shape (N, C, H, W). Got {x.shape} instead!"
outputs = {}
x = self.stem(x)
if "stem" in self._out_features:
outputs["stem"] = x
for name, stage in zip(self.stage_names, self.stages):
x = stage(x)
if name in self._out_features:
outputs[name] = x
if self.num_classes is not None:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.linear(x)
if "linear" in self._out_features:
outputs["linear"] = x
return outputs
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
def freeze(self, freeze_at=0):
"""
Freeze the first several stages of the ResNet. Commonly used in
fine-tuning.
Layers that produce the same feature map spatial size are defined as one
"stage" by :paper:`FPN`.
Args:
freeze_at (int): number of stages to freeze.
`1` means freezing the stem. `2` means freezing the stem and
one residual stage, etc.
Returns:
nn.Module: this ResNet itself
"""
if freeze_at >= 1:
self.stem.freeze()
for idx, stage in enumerate(self.stages, start=2):
if freeze_at >= idx:
for block in stage.children():
block.freeze()
return self
@staticmethod
def make_stage(block_class, num_blocks, *, in_channels, out_channels, **kwargs):
"""
Create a list of blocks of the same type that forms one ResNet stage.
Args:
block_class (type): a subclass of CNNBlockBase that's used to create all blocks in this
stage. A module of this type must not change spatial resolution of inputs unless its
stride != 1.
num_blocks (int): number of blocks in this stage
in_channels (int): input channels of the entire stage.
out_channels (int): output channels of **every block** in the stage.
kwargs: other arguments passed to the constructor of
`block_class`. If the argument name is "xx_per_block", the
argument is a list of values to be passed to each block in the
stage. Otherwise, the same argument is passed to every block
in the stage.
Returns:
list[CNNBlockBase]: a list of block module.
Examples:
::
stage = ResNet.make_stage(
BottleneckBlock, 3, in_channels=16, out_channels=64,
bottleneck_channels=16, num_groups=1,
stride_per_block=[2, 1, 1],
dilations_per_block=[1, 1, 2]
)
Usually, layers that produce the same feature map spatial size are defined as one
"stage" (in :paper:`FPN`). Under such definition, ``stride_per_block[1:]`` should
all be 1.
"""
blocks = []
for i in range(num_blocks):
curr_kwargs = {}
for k, v in kwargs.items():
if k.endswith("_per_block"):
assert len(v) == num_blocks, (
f"Argument '{k}' of make_stage should have the "
f"same length as num_blocks={num_blocks}."
)
newk = k[: -len("_per_block")]
assert newk not in kwargs, f"Cannot call make_stage with both {k} and {newk}!"
curr_kwargs[newk] = v[i]
else:
curr_kwargs[k] = v
blocks.append(
block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs)
)
in_channels = out_channels
return blocks
@staticmethod
def make_default_stages(depth, block_class=None, **kwargs):
"""
Created list of ResNet stages from pre-defined depth (one of 18, 34, 50, 101, 152).
If it doesn't create the ResNet variant you need, please use :meth:`make_stage`
instead for fine-grained customization.
Args:
depth (int): depth of ResNet
block_class (type): the CNN block class. Has to accept
`bottleneck_channels` argument for depth > 50.
By default it is BasicBlock or BottleneckBlock, based on the
depth.
kwargs:
other arguments to pass to `make_stage`. Should not contain
stride and channels, as they are predefined for each depth.
Returns:
list[list[CNNBlockBase]]: modules in all stages; see arguments of
:class:`ResNet.__init__`.
"""
num_blocks_per_stage = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
}[depth]
if block_class is None:
block_class = BasicBlock if depth < 50 else BottleneckBlock
if depth < 50:
in_channels = [64, 64, 128, 256]
out_channels = [64, 128, 256, 512]
else:
in_channels = [64, 256, 512, 1024]
out_channels = [256, 512, 1024, 2048]
ret = []
for (n, s, i, o) in zip(num_blocks_per_stage, [1, 2, 2, 2], in_channels, out_channels):
if depth >= 50:
kwargs["bottleneck_channels"] = o // 4
ret.append(
ResNet.make_stage(
block_class=block_class,
num_blocks=n,
stride_per_block=[s] + [1] * (n - 1),
in_channels=i,
out_channels=o,
**kwargs,
)
)
return ret
ResNetBlockBase = CNNBlockBase
"""
Alias for backward compatibiltiy.
"""
def make_stage(*args, **kwargs):
"""
Deprecated alias for backward compatibiltiy.
"""
return ResNet.make_stage(*args, **kwargs)
def _convert_ndarray_to_tensor(state_dict: Dict[str, Any]) -> None:
"""
In-place convert all numpy arrays in the state_dict to torch tensor.
Args:
state_dict (dict): a state-dict to be loaded to the model.
Will be modified.
"""
# model could be an OrderedDict with _metadata attribute
# (as returned by Pytorch's state_dict()). We should preserve these
# properties.
for k in list(state_dict.keys()):
v = state_dict[k]
if not isinstance(v, np.ndarray) and not isinstance(v, torch.Tensor):
raise ValueError(
"Unsupported type found in checkpoint! {}: {}".format(k, type(v))
)
if not isinstance(v, torch.Tensor):
state_dict[k] = torch.from_numpy(v)
| @register_backbone | 1 | 2023-12-15 01:12:36+00:00 | 4k |
nianhua99/PandoraNext-Helper | app.py | [
{
"identifier": "ApiResponse",
"path": "util/api_response.py",
"snippet": "class ApiResponse:\n\n @staticmethod\n def success(data):\n return jsonify({\n 'status': 0,\n 'message': '请求成功',\n 'data': data\n })\n\n @staticmethod\n def error(message... | import json
import os
import re
import secrets
import account
import share
import sys_info
from datetime import date, datetime
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from flask import Flask, redirect, url_for, send_from_directory
from flask.json.provider import JSONProvider
from flask_bootstrap import Bootstrap5
from flask_migrate import Migrate
from flask_moment import Moment
from flask_apscheduler import APScheduler
from loguru import logger
from flask_jwt_extended import JWTManager
from util.api_response import ApiResponse
from auth import auth
from model import db | 1,954 | logger.error("请配置PandoraNext相关环境变量")
exit(1)
else:
app.config.update(
pandora_path=PANDORA_NEXT_PATH,
pandora_domain=PANDORA_NEXT_DOMAIN
)
with open(os.path.join(PANDORA_NEXT_PATH, 'config.json'), 'r') as f:
config = json.load(f)
# 检查setup_password是否已经配置和密码强度
# 密码强度要求:8-16位,包含数字、字母、特殊字符
logger.info(config)
if config['setup_password'] is None:
logger.error('请先配置setup_password')
exit(1)
elif re.match(r'^(?=.*[a-zA-Z])(?=.*\d).{8,}$',
config['setup_password']) is None:
logger.error('setup_password强度不符合要求,请重新配置')
exit(1)
app.config.update(setup_password=config['setup_password'])
# 必须配置proxy_api_prefix,且不少于8位,同时包含字母和数字
if not config['proxy_api_prefix'] or re.match(r'^(?=.*[a-zA-Z])(?=.*\d).{8,}$',
config['proxy_api_prefix']) is None:
logger.error('请配置proxy_api_prefix')
exit(1)
app.config.update(proxy_api_prefix=config['proxy_api_prefix'])
DISABLE_CAPTCHA = os.getenv('DISABLE_CAPTCHA')
# 检查验证码是否已经配置
if DISABLE_CAPTCHA:
logger.warning('已关闭验证码配置,建议您开启验证码')
app.config.update(
license_id=config['license_id'],
captcha_enabled=False,
)
elif config['captcha'] and config['captcha']['provider'] and config['captcha']['provider'] == 'hcaptcha':
app.config.update(
license_id=config['license_id'],
captcha_enabled=True,
captcha_provider=config['captcha']['provider'],
captcha_site_key=config['captcha']['site_key'],
captcha_secret_key=config['captcha']['site_secret']
)
else:
logger.warning('未读取到有效的 hcaptcha 配置,建议您开启验证码')
app.config.update(
license_id=config['license_id'],
captcha_enabled=False,
)
check_require_config()
# scheduler jobstore
app.config['SCHEDULER_JOBSTORES'] = {
'default': SQLAlchemyJobStore(url='sqlite:///' + os.path.join(app.config['pandora_path'], DATABASE))
}
scheduler = APScheduler()
scheduler.init_app(app)
scheduler.start()
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(app.config['pandora_path'], DATABASE)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db.init_app(app)
def include_object(object, name, type_, reflected, compare_to):
if (
type_ == "table" and name == "apscheduler_jobs"
):
return False
else:
return True
migrate = Migrate(include_object=include_object)
migrate.init_app(app, db)
def format_datetime(value):
"""Format a datetime to a string."""
if value is None:
return ""
return value.strftime('%Y-%m-%d %H:%M:%S')
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(o, date):
return o.strftime('%Y-%m-%d')
elif hasattr(o, 'keys') and hasattr(o, '__getitem__'):
return dict(o)
raise TypeError(f'Object of type {o.__class__.__name__} '
f'is not JSON serializable')
class StandardJSONProvider(JSONProvider):
def dumps(self, obj, **kwargs):
# 使用自定义的JSON编码器进行序列化
return json.dumps(obj, cls=JSONEncoder, **kwargs)
def loads(self, s, **kwargs):
return json.loads(s, **kwargs)
app.json = StandardJSONProvider(app)
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
return app.send_static_file("index.html")
def create_app():
|
DATABASE = 'helper.db'
app = Flask(__name__, static_folder='frontend/dist', static_url_path='/')
Bootstrap5(app)
Moment().init_app(app)
# 生成随机的secret_key
app.secret_key = secrets.token_hex(16)
jwt = JWTManager(app)
@jwt.unauthorized_loader
def custom_unauthorized_callback(error_string):
return ApiResponse.unauthorized(error_string, )
@jwt.invalid_token_loader
def custom_invalid_token_callback(error_string):
return ApiResponse.unauthorized(error_string, )
@jwt.expired_token_loader
def custom_expired_token_callback(error_string, expired_token):
return ApiResponse.unauthorized(error_string, )
#
# @app.context_processor
# def context_api_prefix():
# return dict(api_prefix='/api')
def check_require_config():
PANDORA_NEXT_PATH = os.getenv('PANDORA_NEXT_PATH')
# 如果PANDORA_NEXT_PATH 为空则检查/data下是否存在config.json
if not PANDORA_NEXT_PATH:
if os.path.exists('/data/config.json'):
PANDORA_NEXT_PATH = '/data'
else:
logger.error("请配置PandoraNext相关环境变量")
exit(1)
PANDORA_NEXT_DOMAIN = os.getenv('PANDORA_NEXT_DOMAIN')
if not PANDORA_NEXT_DOMAIN:
logger.error("请配置PandoraNext相关环境变量")
exit(1)
else:
app.config.update(
pandora_path=PANDORA_NEXT_PATH,
pandora_domain=PANDORA_NEXT_DOMAIN
)
with open(os.path.join(PANDORA_NEXT_PATH, 'config.json'), 'r') as f:
config = json.load(f)
# 检查setup_password是否已经配置和密码强度
# 密码强度要求:8-16位,包含数字、字母、特殊字符
logger.info(config)
if config['setup_password'] is None:
logger.error('请先配置setup_password')
exit(1)
elif re.match(r'^(?=.*[a-zA-Z])(?=.*\d).{8,}$',
config['setup_password']) is None:
logger.error('setup_password强度不符合要求,请重新配置')
exit(1)
app.config.update(setup_password=config['setup_password'])
# 必须配置proxy_api_prefix,且不少于8位,同时包含字母和数字
if not config['proxy_api_prefix'] or re.match(r'^(?=.*[a-zA-Z])(?=.*\d).{8,}$',
config['proxy_api_prefix']) is None:
logger.error('请配置proxy_api_prefix')
exit(1)
app.config.update(proxy_api_prefix=config['proxy_api_prefix'])
DISABLE_CAPTCHA = os.getenv('DISABLE_CAPTCHA')
# 检查验证码是否已经配置
if DISABLE_CAPTCHA:
logger.warning('已关闭验证码配置,建议您开启验证码')
app.config.update(
license_id=config['license_id'],
captcha_enabled=False,
)
elif config['captcha'] and config['captcha']['provider'] and config['captcha']['provider'] == 'hcaptcha':
app.config.update(
license_id=config['license_id'],
captcha_enabled=True,
captcha_provider=config['captcha']['provider'],
captcha_site_key=config['captcha']['site_key'],
captcha_secret_key=config['captcha']['site_secret']
)
else:
logger.warning('未读取到有效的 hcaptcha 配置,建议您开启验证码')
app.config.update(
license_id=config['license_id'],
captcha_enabled=False,
)
check_require_config()
# scheduler jobstore
app.config['SCHEDULER_JOBSTORES'] = {
'default': SQLAlchemyJobStore(url='sqlite:///' + os.path.join(app.config['pandora_path'], DATABASE))
}
scheduler = APScheduler()
scheduler.init_app(app)
scheduler.start()
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(app.config['pandora_path'], DATABASE)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db.init_app(app)
def include_object(object, name, type_, reflected, compare_to):
if (
type_ == "table" and name == "apscheduler_jobs"
):
return False
else:
return True
migrate = Migrate(include_object=include_object)
migrate.init_app(app, db)
def format_datetime(value):
"""Format a datetime to a string."""
if value is None:
return ""
return value.strftime('%Y-%m-%d %H:%M:%S')
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(o, date):
return o.strftime('%Y-%m-%d')
elif hasattr(o, 'keys') and hasattr(o, '__getitem__'):
return dict(o)
raise TypeError(f'Object of type {o.__class__.__name__} '
f'is not JSON serializable')
class StandardJSONProvider(JSONProvider):
def dumps(self, obj, **kwargs):
# 使用自定义的JSON编码器进行序列化
return json.dumps(obj, cls=JSONEncoder, **kwargs)
def loads(self, s, **kwargs):
return json.loads(s, **kwargs)
app.json = StandardJSONProvider(app)
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
return app.send_static_file("index.html")
def create_app(): | app.register_blueprint(auth.auth_bp, url_prefix='/api') | 1 | 2023-12-18 13:18:50+00:00 | 4k |
SHI-Labs/VCoder | vcoder_llava/eval/model_vqa_mmbench.py | [
{
"identifier": "IMAGE_TOKEN_INDEX",
"path": "vcoder_llava/constants.py",
"snippet": "IMAGE_TOKEN_INDEX = -200"
},
{
"identifier": "DEFAULT_IMAGE_TOKEN",
"path": "vcoder_llava/constants.py",
"snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\""
},
{
"identifier": "conv_templates",
"... | import argparse
import torch
import os
import json
import pandas as pd
import shortuuid
import math
from tqdm import tqdm
from vcoder_llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
from vcoder_llava.vcoder_conversation import conv_templates, SeparatorStyle
from vcoder_llava.model.builder import load_pretrained_model
from vcoder_llava.utils import disable_torch_init
from vcoder_llava.mm_utils import tokenizer_image_token, process_images, load_image_from_base64, get_model_name_from_path
from PIL import Image | 3,234 |
all_options = ['A', 'B', 'C', 'D']
def split_list(lst, n):
"""Split a list into n (roughly) equal-sized chunks"""
chunk_size = math.ceil(len(lst) / n) # integer division
return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
def get_chunk(lst, n, k):
chunks = split_list(lst, n)
return chunks[k]
def is_none(value):
if value is None:
return True
if type(value) is float and math.isnan(value):
return True
if type(value) is str and value.lower() == 'nan':
return True
if type(value) is str and value.lower() == 'none':
return True
return False
def get_options(row, options):
parsed_options = []
for option in options:
option_value = row[option]
if is_none(option_value):
break
parsed_options.append(option_value)
return parsed_options
def eval_model(args):
# Model
disable_torch_init()
model_path = os.path.expanduser(args.model_path)
|
all_options = ['A', 'B', 'C', 'D']
def split_list(lst, n):
"""Split a list into n (roughly) equal-sized chunks"""
chunk_size = math.ceil(len(lst) / n) # integer division
return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
def get_chunk(lst, n, k):
chunks = split_list(lst, n)
return chunks[k]
def is_none(value):
if value is None:
return True
if type(value) is float and math.isnan(value):
return True
if type(value) is str and value.lower() == 'nan':
return True
if type(value) is str and value.lower() == 'none':
return True
return False
def get_options(row, options):
parsed_options = []
for option in options:
option_value = row[option]
if is_none(option_value):
break
parsed_options.append(option_value)
return parsed_options
def eval_model(args):
# Model
disable_torch_init()
model_path = os.path.expanduser(args.model_path) | model_name = get_model_name_from_path(model_path) | 8 | 2023-12-17 07:46:27+00:00 | 4k |
galatolofederico/microchain | examples/calculator.py | [
{
"identifier": "OpenAITextGenerator",
"path": "microchain/models/generators.py",
"snippet": "class OpenAITextGenerator:\n def __init__(self, *, model, api_key, api_base, temperature=0.9, top_p=1, max_tokens=512):\n try:\n import openai\n except ImportError:\n rais... | import os
import random
from dotenv import load_dotenv # pip install python-dotenv
from microchain import OpenAITextGenerator, HFChatTemplate, LLM, Function, Engine, Agent
from microchain.functions import Reasoning, Stop | 3,210 |
class Sum(Function):
@property
def description(self):
return "Use this function to compute the sum of two numbers"
@property
def example_args(self):
return [2, 2]
def __call__(self, a: float, b: float):
return a + b
class Product(Function):
@property
def description(self):
return "Use this function to compute the product of two numbers"
@property
def example_args(self):
return [2, 2]
def __call__(self, a: float, b: float):
return a * b
load_dotenv()
generator = OpenAITextGenerator(
model=os.environ["MODEL_NAME"],
api_key=os.environ["API_KEY"],
api_base=os.environ["API_BASE"],
temperature=0.7
)
template = HFChatTemplate(os.environ["TEMPLATE_NAME"])
llm = LLM(generator=generator, templates=[template])
engine = Engine()
engine.register(Reasoning())
|
class Sum(Function):
@property
def description(self):
return "Use this function to compute the sum of two numbers"
@property
def example_args(self):
return [2, 2]
def __call__(self, a: float, b: float):
return a + b
class Product(Function):
@property
def description(self):
return "Use this function to compute the product of two numbers"
@property
def example_args(self):
return [2, 2]
def __call__(self, a: float, b: float):
return a * b
load_dotenv()
generator = OpenAITextGenerator(
model=os.environ["MODEL_NAME"],
api_key=os.environ["API_KEY"],
api_base=os.environ["API_BASE"],
temperature=0.7
)
template = HFChatTemplate(os.environ["TEMPLATE_NAME"])
llm = LLM(generator=generator, templates=[template])
engine = Engine()
engine.register(Reasoning()) | engine.register(Stop()) | 7 | 2023-12-19 10:57:56+00:00 | 4k |
OSU-NLP-Group/SeeAct | src/offline_experiments/screenshot_generation/image_annotation.py | [
{
"identifier": "convert_elements2detections",
"path": "src/data_utils/image_utils.py",
"snippet": "def convert_elements2detections(candidate_elements):\n \"\"\"\n Extract element coordinates\n Parse candidate elements coordinates and convert into sv Detection objects\n \"\"\"\n boxes = [... | import json
import os
import jsonlines
import base64
import numpy as np
import cv2
import copy
import argparse
import supervision as sv
import torch
import pickle as pkl
from tqdm import tqdm
from src.data_utils.image_utils import convert_elements2detections
from src.data_utils.image_utils import extract_topk_elements, extract_elements_by_ids
from src.data_utils.image_utils import batch_elements_by_locality, batch_elements_by_locality_16_16_17
from src.data_utils.format_prompt_utils import data_format_input_multichoice | 2,627 | #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def run(args):
with open(args.selected_set_task_id_path, 'rb') as f:
selected_set_task_id_dict = pkl.load(f)
selected_task_ids = selected_set_task_id_dict[args.split]
# Path to the raw_dump containing screenshot source data
screenshot_dump_path = args.screenshot_dump_path
# Set the image output directory
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.mkdir(output_dir)
# Path to dumped query data (Taken from Mind2Web experiment sample before sending into LLM inference)
query_source_path = args.query_source_path
with open(query_source_path, 'r') as f:
all_queries = json.load(f)
# setup annotators
bounding_box_annotator = sv.BoundingBoxAnnotator(
thickness=2
)
candidate_label_annotator = sv.LabelAnnotator(
color_lookup=sv.ColorLookup.INDEX,
text_position=sv.Position.BOTTOM_LEFT,
text_scale=0.5,
text_color=sv.Color.white(),
color=sv.Color.black(),
text_thickness=1
)
# Enumerate each task in query data and generate screenshots
for i, task in tqdm(enumerate(all_queries)):
if len(task) == 2:
continue
task_action_id = task[0]
task_id, action_id = task_action_id.strip().split("_")
if task_id not in selected_task_ids:
continue
# Load Image source data
single_screenshot_path = os.path.join(screenshot_dump_path, task_id, "processed/screenshot.json")
if os.path.exists(single_screenshot_path):
with open(single_screenshot_path) as f:
scrshots_task = json.load(f)
else:
continue
# Output Path
task_dir = os.path.join(output_dir, task_action_id)
if not os.path.exists(task_dir):
os.mkdir(task_dir)
image_dir = os.path.join(output_dir, task_action_id, "images")
if not os.path.exists(image_dir):
os.mkdir(image_dir)
actid2scrshots_task = {}
for scrshot in scrshots_task:
tsd_act_uid = scrshot["action_uid"]
actid2scrshots_task[tsd_act_uid] = scrshot
scrshot = actid2scrshots_task[action_id]
inference_batches = task[1]
sample = task[2]
# Prepare Image
bef_tsd = scrshot["before"]["screenshot"]
bef_tsd = np.frombuffer(base64.b64decode(bef_tsd), np.uint8)
bef_img = cv2.imdecode(bef_tsd, cv2.IMREAD_COLOR)
# Collect all elements
all_elements = []
positive_elements = sample['pos_candidates']
negative_elements = sample['neg_candidates']
all_elements.extend(positive_elements)
all_elements.extend(negative_elements)
# Prepare top-50 elements and batch into 3 batches with 20 choices
top_50_elements = extract_topk_elements(all_elements, k=50)
if args.num_choice == -1:
choice_batches = batch_elements_by_locality_16_16_17(top_50_elements)
else:
choice_batches = batch_elements_by_locality(top_50_elements, num_choices=args.num_choice)
to_run = []
for batch_idx, candidate_elements in enumerate(choice_batches):
temp = copy.deepcopy(sample)
# Prepare question, choices, etc.
candidate_element_ids = [item['backend_node_id'] for item in candidate_elements]
seq_context, seq_in, _, choices, node_to_keep = data_format_input_multichoice(
temp, candidate_element_ids, -1, keep_html_brackets=True
)
temp['context_html'] = seq_context
temp['context_node_ids'] = copy.deepcopy(list(node_to_keep))
temp['question'] = seq_in
# Reorder Choices
temp['choices'] = choices
temp['image_path'] = os.path.join("", task_action_id, "images")
# Choices will be reordered after data_format_input_multichoice, need to reorder candidate_element_ids
# Align candidate_element_ids with choices
candidate_element_ids = [item[0] for item in choices]
# Align candidate_elements with choices
candidate_elements = extract_elements_by_ids(all_elements, ids=candidate_element_ids)
# Prepare Images
| # -*- coding: utf-8 -*-
# Copyright (c) 2024 OSU Natural Language Processing Group
#
# Licensed under the OpenRAIL-S License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.licenses.ai/ai-pubs-open-rails-vz1
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def run(args):
with open(args.selected_set_task_id_path, 'rb') as f:
selected_set_task_id_dict = pkl.load(f)
selected_task_ids = selected_set_task_id_dict[args.split]
# Path to the raw_dump containing screenshot source data
screenshot_dump_path = args.screenshot_dump_path
# Set the image output directory
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.mkdir(output_dir)
# Path to dumped query data (Taken from Mind2Web experiment sample before sending into LLM inference)
query_source_path = args.query_source_path
with open(query_source_path, 'r') as f:
all_queries = json.load(f)
# setup annotators
bounding_box_annotator = sv.BoundingBoxAnnotator(
thickness=2
)
candidate_label_annotator = sv.LabelAnnotator(
color_lookup=sv.ColorLookup.INDEX,
text_position=sv.Position.BOTTOM_LEFT,
text_scale=0.5,
text_color=sv.Color.white(),
color=sv.Color.black(),
text_thickness=1
)
# Enumerate each task in query data and generate screenshots
for i, task in tqdm(enumerate(all_queries)):
if len(task) == 2:
continue
task_action_id = task[0]
task_id, action_id = task_action_id.strip().split("_")
if task_id not in selected_task_ids:
continue
# Load Image source data
single_screenshot_path = os.path.join(screenshot_dump_path, task_id, "processed/screenshot.json")
if os.path.exists(single_screenshot_path):
with open(single_screenshot_path) as f:
scrshots_task = json.load(f)
else:
continue
# Output Path
task_dir = os.path.join(output_dir, task_action_id)
if not os.path.exists(task_dir):
os.mkdir(task_dir)
image_dir = os.path.join(output_dir, task_action_id, "images")
if not os.path.exists(image_dir):
os.mkdir(image_dir)
actid2scrshots_task = {}
for scrshot in scrshots_task:
tsd_act_uid = scrshot["action_uid"]
actid2scrshots_task[tsd_act_uid] = scrshot
scrshot = actid2scrshots_task[action_id]
inference_batches = task[1]
sample = task[2]
# Prepare Image
bef_tsd = scrshot["before"]["screenshot"]
bef_tsd = np.frombuffer(base64.b64decode(bef_tsd), np.uint8)
bef_img = cv2.imdecode(bef_tsd, cv2.IMREAD_COLOR)
# Collect all elements
all_elements = []
positive_elements = sample['pos_candidates']
negative_elements = sample['neg_candidates']
all_elements.extend(positive_elements)
all_elements.extend(negative_elements)
# Prepare top-50 elements and batch into 3 batches with 20 choices
top_50_elements = extract_topk_elements(all_elements, k=50)
if args.num_choice == -1:
choice_batches = batch_elements_by_locality_16_16_17(top_50_elements)
else:
choice_batches = batch_elements_by_locality(top_50_elements, num_choices=args.num_choice)
to_run = []
for batch_idx, candidate_elements in enumerate(choice_batches):
temp = copy.deepcopy(sample)
# Prepare question, choices, etc.
candidate_element_ids = [item['backend_node_id'] for item in candidate_elements]
seq_context, seq_in, _, choices, node_to_keep = data_format_input_multichoice(
temp, candidate_element_ids, -1, keep_html_brackets=True
)
temp['context_html'] = seq_context
temp['context_node_ids'] = copy.deepcopy(list(node_to_keep))
temp['question'] = seq_in
# Reorder Choices
temp['choices'] = choices
temp['image_path'] = os.path.join("", task_action_id, "images")
# Choices will be reordered after data_format_input_multichoice, need to reorder candidate_element_ids
# Align candidate_element_ids with choices
candidate_element_ids = [item[0] for item in choices]
# Align candidate_elements with choices
candidate_elements = extract_elements_by_ids(all_elements, ids=candidate_element_ids)
# Prepare Images | candidate_detections = convert_elements2detections(candidate_elements) | 0 | 2023-12-21 18:22:11+00:00 | 4k |
DeepWok/mase | machop/chop/passes/graph/transforms/quantize/quant_parsers/parse_quant_config.py | [
{
"identifier": "cp_multi_values",
"path": "machop/chop/passes/graph/transforms/quantize/quant_parsers/utils.py",
"snippet": "def cp_multi_values(\n src: dict, dst: dict, src_keys: tuple, dst_keys: tuple = None, strict: bool = True\n):\n \"\"\"Copy multiple values from src dict to dst dict.\"\"\"\... | from functools import partial
from .utils import cp_multi_values, has_multi_keys | 2,100 | ),
"data_in_entries": (
"data_in_width",
"data_in_exponent_width",
"data_in_exponent_bias",
),
"bias_entries": ("bias_width", "bias_exponent_width", "bias_exponent_bias"),
},
"minifloat_denorm": {
"weight_entries": (
"weight_width",
"weight_exponent_width",
"weight_exponent_bias",
),
"data_in_entries": (
"data_in_width",
"data_in_exponent_width",
"data_in_exponent_bias",
),
"bias_entries": ("bias_width", "bias_exponent_width", "bias_exponent_bias"),
},
"log": {
"weight_entries": ("weight_width", "weight_exponent_bias"),
"data_in_entries": ("data_in_width", "data_in_exponent_bias"),
"bias_entries": ("bias_width", "bias_exponent_bias"),
},
"block_fp": {
"weight_entries": (
"weight_width",
"weight_exponent_width",
"weight_exponent_bias",
"weight_block_size",
),
"data_in_entries": (
"data_in_width",
"data_in_exponent_width",
"data_in_exponent_bias",
"data_in_block_size",
),
"bias_entries": (
"bias_width",
"bias_exponent_width",
"bias_exponent_bias",
"bias_block_size",
),
},
"block_minifloat": {
"weight_entries": (
"weight_width",
"weight_exponent_width",
"weight_exponent_bias_width",
"weight_block_size",
),
"data_in_entries": (
"data_in_width",
"data_in_exponent_width",
"data_in_exponent_bias_width",
"data_in_block_size",
),
"bias_entries": (
"bias_width",
"bias_exponent_width",
"bias_exponent_bias_width",
"bias_block_size",
),
},
"block_log": {
"weight_entries": (
"weight_width",
"weight_exponent_bias_width",
"weight_block_size",
),
"data_in_entries": (
"data_in_width",
"data_in_exponent_bias_width",
"data_in_block_size",
),
"bias_entries": (
"bias_width",
"bias_exponent_bias_width",
"bias_block_size",
),
},
}
""" cp_<entry_name> functions
A collection of functions to copy values from a src config to a parsed config.
"""
def cp_name(config: dict, p_config: dict, entries=None, strict: bool = True):
cp_multi_values(config, p_config, ("name",), strict=strict)
def cp_bypass(config: dict, p_config: dict, entries=None, strict: bool = True):
cp_multi_values(config, p_config, ("bypass",), strict=strict)
def cp_weight_entries(config: dict, p_config: dict, entries: dict, strict: bool = True):
cp_multi_values(config, p_config, entries["weight_entries"], strict=strict)
def cp_data_in_entries(
config: dict, p_config: dict, entries: dict, strict: bool = True
):
cp_multi_values(config, p_config, entries["data_in_entries"], strict=strict)
def cp_data_out_entries(config: dict, p_config: dict, entries: dict):
cp_multi_values(config, p_config, entries["data_out_entries"])
def cp_bias_entries(config: dict, p_config: dict, entries: dict, strict: bool = True):
cp_multi_values(config, p_config, entries["bias_entries"], strict=strict)
def cp_weight_entries_to_bias(
config: dict, p_config: dict, entries: dict, strict: bool = True
):
|
""" QUANT_ARITH_ENTRIES
A mapping from (quantization arithmetic name) to (a mapping from (operand name) to (operand quantization spec name))
Example
A fixed point quantized value is defined by (width, frac_width), thus the mapping is defined as follows:
```python
"fixed": {
"weight_entries": ("weight_width", "weight_frac_width"),
"data_in_entries": ("data_in_width", "data_in_frac_width"),
"bias_entries": ("bias_width", "bias_frac_width"),
},
```
"""
QUANT_ARITH_ENTRIES = {
# <arith_name> : {<operand_name> : (<operand_quantization_spec_name>,)}
"integer": {
"weight_entries": ("weight_width", "weight_frac_width"),
"data_in_entries": ("data_in_width", "data_in_frac_width"),
"bias_entries": ("bias_width", "bias_frac_width"),
},
"fixed": {
"weight_entries": ("weight_width", "weight_frac_width"),
"data_in_entries": ("data_in_width", "data_in_frac_width"),
"bias_entries": ("bias_width", "bias_frac_width"),
},
"lutnet": {
"weight_entries": (
"weight_width",
"weight_frac_width",
"weight_binarization_level",
"weight_input_expanded",
"weight_k",
"weight_in_dim",
),
"data_in_entries": (
"data_in_width",
"data_in_frac_width",
"data_in_binarization_level", # binarization_level (int): which level of binarization is applied, "binarized_weight" is only weights binarized others is no binarization
"data_in_input_expanded", # input_expanded (bool): If set to True, means all LUT's inputs are considered during calculations , else only the first input will considered and the remaining will be masked.
"data_in_k", # k entries of a LUT
"data_in_levels", # data_in_levels (int): number of residual levels to use in lutnet
"data_in_dim", # data input dimension (this is needed by convolution)
),
"bias_entries": (
"bias_width",
"bias_frac_width",
"bias_binarization_level",
"bias_input_expanded",
"bias_k",
"bias_in_dim",
),
},
"logicnets": {
"weight_entries": ( # TODO: change update_node_meta.py to take optional argument so this can be removed
"weight_width",
"weight_frac_width",
),
"bias_entries": (
"bias_width",
"bias_frac_width",
),
"data_in_entries": (
"data_in_width",
"data_in_frac_width",
),
"data_out_entries": (
"data_out_width",
"data_out_frac_width",
),
"additional_layers_entries": {
"additional_layers_inputs",
"additional_layers_outputs",
},
},
"binary": {
"weight_entries": (
"weight_width",
"weight_stochastic",
"weight_bipolar",
),
"data_in_entries": (
"data_in_width",
"data_in_stochastic",
"data_in_bipolar",
),
"bias_entries": (
"bias_width",
"bias_stochastic",
"bias_bipolar",
),
},
"binary_residual": {
"weight_entries": (
"weight_width",
"weight_stochastic",
"weight_bipolar",
"binary_training",
),
"data_in_entries": (
"data_in_width",
"data_in_stochastic",
"data_in_bipolar",
"data_in_residual_sign",
"data_in_levels", # data_in_levels (int): number of residual levels to use in lutnet
),
"bias_entries": (
"bias_width",
"bias_stochastic",
"bias_bipolar",
),
},
"ternary": {
"weight_entries": (
"weight_width",
"weight_scaling_factor",
"weight_mean",
"weight_median",
"weight_max",
),
"data_in_entries": (
"data_in_width",
"data_in_scaling_factor",
"data_in_mean",
"data_in_median",
"data_in_max",
),
"bias_entries": (
"bias_width",
"bias_scaling_factor",
"bias_mean",
"bias_max",
"bias_median",
),
},
"minifloat_ieee": {
"weight_entries": (
"weight_width",
"weight_exponent_width",
"weight_exponent_bias",
),
"data_in_entries": (
"data_in_width",
"data_in_exponent_width",
"data_in_exponent_bias",
),
"bias_entries": ("bias_width", "bias_exponent_width", "bias_exponent_bias"),
},
"minifloat_denorm": {
"weight_entries": (
"weight_width",
"weight_exponent_width",
"weight_exponent_bias",
),
"data_in_entries": (
"data_in_width",
"data_in_exponent_width",
"data_in_exponent_bias",
),
"bias_entries": ("bias_width", "bias_exponent_width", "bias_exponent_bias"),
},
"log": {
"weight_entries": ("weight_width", "weight_exponent_bias"),
"data_in_entries": ("data_in_width", "data_in_exponent_bias"),
"bias_entries": ("bias_width", "bias_exponent_bias"),
},
"block_fp": {
"weight_entries": (
"weight_width",
"weight_exponent_width",
"weight_exponent_bias",
"weight_block_size",
),
"data_in_entries": (
"data_in_width",
"data_in_exponent_width",
"data_in_exponent_bias",
"data_in_block_size",
),
"bias_entries": (
"bias_width",
"bias_exponent_width",
"bias_exponent_bias",
"bias_block_size",
),
},
"block_minifloat": {
"weight_entries": (
"weight_width",
"weight_exponent_width",
"weight_exponent_bias_width",
"weight_block_size",
),
"data_in_entries": (
"data_in_width",
"data_in_exponent_width",
"data_in_exponent_bias_width",
"data_in_block_size",
),
"bias_entries": (
"bias_width",
"bias_exponent_width",
"bias_exponent_bias_width",
"bias_block_size",
),
},
"block_log": {
"weight_entries": (
"weight_width",
"weight_exponent_bias_width",
"weight_block_size",
),
"data_in_entries": (
"data_in_width",
"data_in_exponent_bias_width",
"data_in_block_size",
),
"bias_entries": (
"bias_width",
"bias_exponent_bias_width",
"bias_block_size",
),
},
}
""" cp_<entry_name> functions
A collection of functions to copy values from a src config to a parsed config.
"""
def cp_name(config: dict, p_config: dict, entries=None, strict: bool = True):
cp_multi_values(config, p_config, ("name",), strict=strict)
def cp_bypass(config: dict, p_config: dict, entries=None, strict: bool = True):
cp_multi_values(config, p_config, ("bypass",), strict=strict)
def cp_weight_entries(config: dict, p_config: dict, entries: dict, strict: bool = True):
cp_multi_values(config, p_config, entries["weight_entries"], strict=strict)
def cp_data_in_entries(
config: dict, p_config: dict, entries: dict, strict: bool = True
):
cp_multi_values(config, p_config, entries["data_in_entries"], strict=strict)
def cp_data_out_entries(config: dict, p_config: dict, entries: dict):
cp_multi_values(config, p_config, entries["data_out_entries"])
def cp_bias_entries(config: dict, p_config: dict, entries: dict, strict: bool = True):
cp_multi_values(config, p_config, entries["bias_entries"], strict=strict)
def cp_weight_entries_to_bias(
config: dict, p_config: dict, entries: dict, strict: bool = True
): | if has_multi_keys(config, entries["bias_entries"]): | 1 | 2023-12-18 12:50:53+00:00 | 4k |
yeyt97/AirDropPlus | AirDropPlus.py | [
{
"identifier": "Config",
"path": "config.py",
"snippet": "class Config:\n def __init__(self, config_path):\n self.config = configparser.ConfigParser()\n self.config.read(config_path, encoding='utf-8')\n\n self.config_path = config_path\n self.key = self.config.get('config... | import os
import sys
import utils
from config import Config
from notifier import create_notifier
from server import Server | 1,664 |
if __name__ == '__main__':
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
config_file_path = os.path.join(SCRIPT_DIR, 'config', 'config.ini')
config = Config(config_file_path)
notifier = create_notifier(config.basic_notifier)
if not os.path.exists(config.save_path):
notifier.notify('启动失败', f'文件保存路径:"{config.save_path}"不存在,请检查配置文件"{config_file_path}"')
sys.exit()
if utils.is_program_running():
notifier.notify('启动失败', '请不要重复启动')
sys.exit()
try:
|
if __name__ == '__main__':
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
config_file_path = os.path.join(SCRIPT_DIR, 'config', 'config.ini')
config = Config(config_file_path)
notifier = create_notifier(config.basic_notifier)
if not os.path.exists(config.save_path):
notifier.notify('启动失败', f'文件保存路径:"{config.save_path}"不存在,请检查配置文件"{config_file_path}"')
sys.exit()
if utils.is_program_running():
notifier.notify('启动失败', '请不要重复启动')
sys.exit()
try: | server = Server(config, notifier) | 2 | 2023-12-19 08:16:21+00:00 | 4k |
byeongjun-park/HarmonyView | ldm/modules/diffusionmodules/openaimodel.py | [
{
"identifier": "checkpoint",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n ... | from abc import abstractmethod
from functools import partial
from typing import Iterable
from ldm.modules.diffusionmodules.util import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
from ldm.modules.attention import SpatialTransformer
from ldm.util import exists
from omegaconf.listconfig import ListConfig
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F | 2,531 |
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class TransposedUpsample(nn.Module):
'Learned 2x upsampling without padding'
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
def forward(self,x):
return self.up(x)
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels
|
# dummy replace
def convert_module_to_f16(x):
pass
def convert_module_to_f32(x):
pass
## go
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class TransposedUpsample(nn.Module):
'Learned 2x upsampling without padding'
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
def forward(self,x):
return self.up(x)
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels | self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) | 3 | 2023-12-21 04:44:00+00:00 | 4k |
EntySec/SeaShell | seashell/modules/unhook.py | [
{
"identifier": "Loot",
"path": "seashell/lib/loot.py",
"snippet": "class Loot(String, FS):\n \"\"\" Subclass of seashell.lib module.\n\n This subclass of seashell.lib module is intended for providing\n tools for working with loot collected by SeaShell.\n \"\"\"\n\n def __init__(self) -> ... | from pwny.api import *
from pwny.types import *
from seashell.lib.loot import Loot
from seashell.core.hook import Hook
from hatsploit.lib.command import Command | 2,595 | """
This command requires SeaShell: https://github.com/EntySec/SeaShell
Current source: https://github.com/EntySec/SeaShell
"""
class HatSploitCommand(Command):
def __init__(self):
super().__init__()
self.details = {
'Category': "evasion",
'Name': "unhook",
'Authors': [
'Ivan Nikolskiy (enty8080) - command developer'
],
'Description': "Remove hook from other app (e.g. Contacts.app).",
'Usage': "unhook <app_name>",
'MinArgs': 1
}
self.plist = Loot().specific_loot('Info.plist')
def find_app(self, app_name):
containers = '/private/var/containers/Bundle/Application'
result = self.session.send_command(
tag=FS_LIST,
args={
TLV_TYPE_PATH: containers
}
)
if result.get_int(TLV_TYPE_STATUS) != TLV_STATUS_SUCCESS:
self.print_error("Failed to access application containers!")
return
self.print_process(f"Searching for {app_name} in containers...")
file = result.get_tlv(TLV_TYPE_GROUP)
path = None
while file:
apps = self.session.send_command(
tag=FS_LIST,
args={
TLV_TYPE_PATH: file.get_string(TLV_TYPE_PATH)
}
)
if apps.get_int(TLV_TYPE_STATUS) != TLV_STATUS_SUCCESS:
continue
app = apps.get_tlv(TLV_TYPE_GROUP)
while app:
if app.get_string(TLV_TYPE_FILENAME) == app_name:
path = app.get_string(TLV_TYPE_PATH)
self.print_success(f"Found {app_name} at {path}!")
break
app = apps.get_tlv(TLV_TYPE_GROUP)
if path:
break
file = result.get_tlv(TLV_TYPE_GROUP)
return path
def run(self, argc, argv):
path = self.find_app(argv[1])
if not path:
self.print_error(f"Path for {argv[1]} not found!")
return
if not self.session.download(path + '/Info.plist', self.plist):
self.print_error("Failed to access Info.plist!")
return
self.print_process("Patching Info.plist...")
| """
This command requires SeaShell: https://github.com/EntySec/SeaShell
Current source: https://github.com/EntySec/SeaShell
"""
class HatSploitCommand(Command):
def __init__(self):
super().__init__()
self.details = {
'Category': "evasion",
'Name': "unhook",
'Authors': [
'Ivan Nikolskiy (enty8080) - command developer'
],
'Description': "Remove hook from other app (e.g. Contacts.app).",
'Usage': "unhook <app_name>",
'MinArgs': 1
}
self.plist = Loot().specific_loot('Info.plist')
def find_app(self, app_name):
containers = '/private/var/containers/Bundle/Application'
result = self.session.send_command(
tag=FS_LIST,
args={
TLV_TYPE_PATH: containers
}
)
if result.get_int(TLV_TYPE_STATUS) != TLV_STATUS_SUCCESS:
self.print_error("Failed to access application containers!")
return
self.print_process(f"Searching for {app_name} in containers...")
file = result.get_tlv(TLV_TYPE_GROUP)
path = None
while file:
apps = self.session.send_command(
tag=FS_LIST,
args={
TLV_TYPE_PATH: file.get_string(TLV_TYPE_PATH)
}
)
if apps.get_int(TLV_TYPE_STATUS) != TLV_STATUS_SUCCESS:
continue
app = apps.get_tlv(TLV_TYPE_GROUP)
while app:
if app.get_string(TLV_TYPE_FILENAME) == app_name:
path = app.get_string(TLV_TYPE_PATH)
self.print_success(f"Found {app_name} at {path}!")
break
app = apps.get_tlv(TLV_TYPE_GROUP)
if path:
break
file = result.get_tlv(TLV_TYPE_GROUP)
return path
def run(self, argc, argv):
path = self.find_app(argv[1])
if not path:
self.print_error(f"Path for {argv[1]} not found!")
return
if not self.session.download(path + '/Info.plist', self.plist):
self.print_error("Failed to access Info.plist!")
return
self.print_process("Patching Info.plist...")
| hook = Hook() | 1 | 2023-12-17 04:14:16+00:00 | 4k |
FlagOpen/TACO | train.py | [
{
"identifier": "Trainer",
"path": "train_utils.py",
"snippet": "class Trainer(transformers.Trainer):\n \"\"\"Use CosineAnnealingLR from pytorch \n \"\"\"\n \n def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):\n \"\"\"\n Setup the sch... | from typing import Optional, Dict
from dataclasses import dataclass, field
from train_utils import Trainer
from datamodule import DEFAULT_PAD_TOKEN, DEFAULT_EOS_TOKEN, DEFAULT_BOS_TOKEN, TacoDataset, DataCollatorForTacoDataset
import transformers | 1,780 | """
Finetune models on TACO-Dataset train split
"""
@dataclass
class ModelArguments:
model_name_or_path: Optional[str] = field(default="bigcode/tiny_starcoder_py")
@dataclass
class DataArguments:
data_path: str = field(default=None, metadata={"help": "Path to the training data."})
@dataclass
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
adam_beta1: float = field(default=0.9)
adam_beta2: float = field(default=0.95)
use_cosine_anneal_with_warmup: bool = field(default=True)
model_max_length: int = field(
default=2048,
metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
)
resume_from_checkpoint: bool = field(
default=False,
metadata={"help": "load the last checkpoint in args.output_dir as saved by a previous instance of Trainer."}
)
def smart_tokenizer_and_embedding_resize(
special_tokens_dict: Dict,
tokenizer: transformers.PreTrainedTokenizer,
model: transformers.PreTrainedModel,
):
"""Resize tokenizer and embedding.
Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
"""
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
if num_new_tokens > 0:
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
input_embeddings[-num_new_tokens:] = input_embeddings_avg
output_embeddings[-num_new_tokens:] = output_embeddings_avg
def make_taco_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args) -> Dict:
"""Make dataset and collator for fine-tune"""
train_dataset = TacoDataset(data_path=data_args.data_path)
data_collator = DataCollatorForTacoDataset(tokenizer=tokenizer)
return dict(train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator)
def train():
parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
model = transformers.AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
trust_remote_code=True,
use_auth_token=True,
cache_dir=training_args.cache_dir,
)
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
trust_remote_code=True,
use_auth_token=True,
model_max_length=training_args.model_max_length,
padding_side="right",
use_fast=False,
)
special_tokens_dict = dict()
if tokenizer.pad_token is None:
| """
Finetune models on TACO-Dataset train split
"""
@dataclass
class ModelArguments:
model_name_or_path: Optional[str] = field(default="bigcode/tiny_starcoder_py")
@dataclass
class DataArguments:
data_path: str = field(default=None, metadata={"help": "Path to the training data."})
@dataclass
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
adam_beta1: float = field(default=0.9)
adam_beta2: float = field(default=0.95)
use_cosine_anneal_with_warmup: bool = field(default=True)
model_max_length: int = field(
default=2048,
metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
)
resume_from_checkpoint: bool = field(
default=False,
metadata={"help": "load the last checkpoint in args.output_dir as saved by a previous instance of Trainer."}
)
def smart_tokenizer_and_embedding_resize(
special_tokens_dict: Dict,
tokenizer: transformers.PreTrainedTokenizer,
model: transformers.PreTrainedModel,
):
"""Resize tokenizer and embedding.
Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
"""
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
if num_new_tokens > 0:
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
input_embeddings[-num_new_tokens:] = input_embeddings_avg
output_embeddings[-num_new_tokens:] = output_embeddings_avg
def make_taco_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args) -> Dict:
"""Make dataset and collator for fine-tune"""
train_dataset = TacoDataset(data_path=data_args.data_path)
data_collator = DataCollatorForTacoDataset(tokenizer=tokenizer)
return dict(train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator)
def train():
parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
model = transformers.AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
trust_remote_code=True,
use_auth_token=True,
cache_dir=training_args.cache_dir,
)
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
trust_remote_code=True,
use_auth_token=True,
model_max_length=training_args.model_max_length,
padding_side="right",
use_fast=False,
)
special_tokens_dict = dict()
if tokenizer.pad_token is None: | special_tokens_dict["pad_token"] = DEFAULT_PAD_TOKEN | 1 | 2023-12-20 03:12:01+00:00 | 4k |
OPPOMKLab/u-LLaVA | datasets/builders/plain_type_builder.py | [
{
"identifier": "registry",
"path": "utils/registry.py",
"snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def ... | from utils.registry import registry
from datasets.datasets.tgif_dataset import TgifDataset
from datasets.builders.base_builder import BaseDatasetBuilder
from datasets.datasets.llava_dataset import LLaVADataset, LLaVASegDataset | 2,978 | """
Copyright 2023 OPPO
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class PlainBuilder(BaseDatasetBuilder):
dataset_cls = LLaVADataset
def build(self, tokenizer, processor_dict, conv_type='conv_simple'):
build_info = self.config.build_info
dataset_cls = self.dataset_cls
image_token_len = self.config.get('image_token_len', 256)
image_dir = build_info.get('image_dir', '')
anno_dir = build_info.get('anno_dir', '')
portion = float(build_info.get('portion', 1))
data_type = self.config.get('data_type', 'image')
vis_processor = self.fetch_processor('vis_processor', processor_dict)
dataset = dataset_cls(
vis_processor=vis_processor,
tokenizer=tokenizer,
vis_root=image_dir,
ann_root=anno_dir,
portion=portion,
image_token_len=image_token_len,
data_type=data_type,
conv_type=conv_type,
)
return dataset
@registry.register_builder("llava_cc3m")
@registry.register_builder("llava_instruct")
@registry.register_builder("sqa")
class LLaVACc3mBuilder(PlainBuilder):
dataset_cls = LLaVADataset
@registry.register_builder("llava_seg")
class LlaVASegBuilder(PlainBuilder):
| """
Copyright 2023 OPPO
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class PlainBuilder(BaseDatasetBuilder):
dataset_cls = LLaVADataset
def build(self, tokenizer, processor_dict, conv_type='conv_simple'):
build_info = self.config.build_info
dataset_cls = self.dataset_cls
image_token_len = self.config.get('image_token_len', 256)
image_dir = build_info.get('image_dir', '')
anno_dir = build_info.get('anno_dir', '')
portion = float(build_info.get('portion', 1))
data_type = self.config.get('data_type', 'image')
vis_processor = self.fetch_processor('vis_processor', processor_dict)
dataset = dataset_cls(
vis_processor=vis_processor,
tokenizer=tokenizer,
vis_root=image_dir,
ann_root=anno_dir,
portion=portion,
image_token_len=image_token_len,
data_type=data_type,
conv_type=conv_type,
)
return dataset
@registry.register_builder("llava_cc3m")
@registry.register_builder("llava_instruct")
@registry.register_builder("sqa")
class LLaVACc3mBuilder(PlainBuilder):
dataset_cls = LLaVADataset
@registry.register_builder("llava_seg")
class LlaVASegBuilder(PlainBuilder): | dataset_cls = LLaVASegDataset | 4 | 2023-12-21 08:10:23+00:00 | 4k |
shashikg/WhisperS2T | whisper_s2t/data.py | [
{
"identifier": "pad_or_trim",
"path": "whisper_s2t/audio.py",
"snippet": "def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1):\n \"\"\"\n Pad or trim the audio array to N_SAMPLES, as expected by the encoder.\n \"\"\"\n \n if torch.is_tensor(array):\n if array.shape[... | import torch
import numpy as np
from tqdm import tqdm
from .configs import *
from .audio import pad_or_trim, audio_batch_generator, load_audio | 2,214 | if type(audio_files[0]) == str:
self.get_audio_signal = self._get_audio_signal_from_file
else:
self.get_audio_signal = self._get_audio_signal_from_array
def _get_audio_signal_from_array(self, item):
return self.audio_files[item]
def _get_audio_signal_from_file(self, item):
return load_audio(self.audio_files[item])
def __len__(self):
return len(self.audio_files)
def __getitem__(self, item):
audio = self.get_audio_signal(item)
seq_len = audio.shape[-1]
if self.initial_prompts[item]:
initial_prompt = " " + self.initial_prompts[item].strip()
initial_prompt_tokens = self.tokenizer.encode(initial_prompt)[-self.max_initial_prompt_len:]
else:
initial_prompt_tokens = []
prompt = self.tokenizer.sot_sequence(task=self.tasks[item], lang=self.lang_codes[item])
if self.without_timestamps:
prompt = prompt + [self.tokenizer.no_timestamps]
return audio, prompt, initial_prompt_tokens, seq_len
class WhisperDataLoader:
def __init__(self, device, tokenizer, speech_segmenter,
dta_padding=3.0,
without_timestamps=True,
max_speech_len=29.0,
max_initial_prompt_len=223,
merge_chunks=True,
use_dynamic_time_axis=False):
self.device = device
self.tokenizer = tokenizer
self.speech_segmenter = speech_segmenter
self.dta_padding = int(dta_padding*SAMPLE_RATE)
self.without_timestamps = without_timestamps
self.max_speech_len = max_speech_len
self.max_initial_prompt_len = max_initial_prompt_len
self.use_dynamic_time_axis = use_dynamic_time_axis
self.merge_chunks = merge_chunks
def data_collate_fn(self, batch):
if self.use_dynamic_time_axis:
max_len = min(max([_[3] for _ in batch]) + self.dta_padding, N_SAMPLES)
else:
max_len = N_SAMPLES
signal_batch = torch.stack([torch.from_numpy(pad_or_trim(_[0], length=max_len)).to(self.device) for _ in batch])
seq_len = torch.tensor([_[3] for _ in batch]).to(self.device)
prompt_batch = []
initial_prompt_max_len = max([len(_[2]) for _ in batch])
if initial_prompt_max_len:
for _ in batch: prompt_batch.append([self.tokenizer.sot_prev] + (initial_prompt_max_len-len(_[2]))*[self.tokenizer.silent_token] + _[2] + _[1])
else:
for _ in batch: prompt_batch.append(_[1])
if len(batch[0]) == 5:
seg_metadata = [_[4] for _ in batch]
return signal_batch, prompt_batch, seq_len, seg_metadata
else:
return signal_batch, prompt_batch, seq_len
def get_segmented_audio_signal(self, audio_signal, file_id, lang, task, initial_prompt, sr=16000):
start_ends, audio_signal = self.speech_segmenter(audio_signal=audio_signal)
if initial_prompt:
initial_prompt = " " + initial_prompt.strip()
initial_prompt_tokens = self.tokenizer.encode(initial_prompt)[-self.max_initial_prompt_len:]
else:
initial_prompt_tokens = []
prompt = self.tokenizer.sot_sequence(task=task, lang=lang)
if self.without_timestamps:
prompt.append(self.tokenizer.no_timestamps)
else:
prompt.append(self.tokenizer.timestamp_begin)
segmented_audio_signal = []
if self.merge_chunks:
stitched_speech_segments = stitch_speech_segments(start_ends, max_len=self.max_speech_len)
for stitched_seg in stitched_speech_segments:
audio = []
for st, et in stitched_seg:
audio.append(audio_signal[int(st*sr):int(et*sr)])
audio = np.concatenate(audio)
seq_len = audio.shape[-1]
seg_metadata = {
'file_id': file_id,
'start_time': stitched_seg[0][0],
'end_time': stitched_seg[-1][1],
'stitched_seg': stitched_seg,
'lang_code': lang
}
segmented_audio_signal.append((audio, prompt, initial_prompt_tokens, seq_len, seg_metadata))
else:
for st, et in start_ends:
audio = audio_signal[int(st*sr):int(et*sr)]
seq_len = audio.shape[-1]
segmented_audio_signal.append((audio, prompt, initial_prompt_tokens, seq_len, {'file_id': file_id, 'start_time': st, 'end_time': et}))
return segmented_audio_signal
def get_data_loader_with_vad(self, audio_files, lang_codes, tasks, initial_prompts, batch_size=16):
segmented_audio_signal = []
pbar_update_len = {}
|
def stitch_speech_segments(start_ends, max_len=27.0, max_silent_region=None):
speech_duration = [end - start for start, end in start_ends]
stitched_speech_segments = []
curr_seg = [0]
curr_dur = speech_duration[0]
idx = 1
while idx < len(start_ends):
if curr_dur + speech_duration[idx] > max_len:
stitched_speech_segments.append([start_ends[_] for _ in curr_seg])
curr_seg = [idx]
curr_dur = speech_duration[idx]
else:
curr_dur += speech_duration[idx]
curr_seg.append(idx)
idx += 1
stitched_speech_segments.append([start_ends[_] for _ in curr_seg])
if max_silent_region is None:
return stitched_speech_segments
stitched_speech_segments_joined = []
for segs in stitched_speech_segments:
_segs = []
curr_seg_start_time, curr_seg_end_time = segs[0]
for i in range(1, len(segs)):
if (segs[i][0] - curr_seg_end_time) >= max_silent_region:
_segs.append((curr_seg_start_time, curr_seg_end_time))
curr_seg_start_time = segs[i][0]
curr_seg_end_time = segs[i][1]
_segs.append((curr_seg_start_time, curr_seg_end_time))
stitched_speech_segments_joined.append(_segs)
return stitched_speech_segments_joined
class WhisperDataset(torch.utils.data.Dataset):
def __init__(self, audio_files, lang_codes, tasks, initial_prompts, tokenizer, max_initial_prompt_len,
device="cuda",
dta_padding=48000,
without_timestamps=True,
use_dynamic_time_axis=False):
self.audio_files = audio_files
self.lang_codes = lang_codes
self.tasks = tasks
self.initial_prompts = initial_prompts
self.tokenizer = tokenizer
self.device = device
self.dta_padding = dta_padding
self.without_timestamps = without_timestamps
self.use_dynamic_time_axis = use_dynamic_time_axis
self.max_initial_prompt_len = max_initial_prompt_len
if type(audio_files[0]) == str:
self.get_audio_signal = self._get_audio_signal_from_file
else:
self.get_audio_signal = self._get_audio_signal_from_array
def _get_audio_signal_from_array(self, item):
return self.audio_files[item]
def _get_audio_signal_from_file(self, item):
return load_audio(self.audio_files[item])
def __len__(self):
return len(self.audio_files)
def __getitem__(self, item):
audio = self.get_audio_signal(item)
seq_len = audio.shape[-1]
if self.initial_prompts[item]:
initial_prompt = " " + self.initial_prompts[item].strip()
initial_prompt_tokens = self.tokenizer.encode(initial_prompt)[-self.max_initial_prompt_len:]
else:
initial_prompt_tokens = []
prompt = self.tokenizer.sot_sequence(task=self.tasks[item], lang=self.lang_codes[item])
if self.without_timestamps:
prompt = prompt + [self.tokenizer.no_timestamps]
return audio, prompt, initial_prompt_tokens, seq_len
class WhisperDataLoader:
def __init__(self, device, tokenizer, speech_segmenter,
dta_padding=3.0,
without_timestamps=True,
max_speech_len=29.0,
max_initial_prompt_len=223,
merge_chunks=True,
use_dynamic_time_axis=False):
self.device = device
self.tokenizer = tokenizer
self.speech_segmenter = speech_segmenter
self.dta_padding = int(dta_padding*SAMPLE_RATE)
self.without_timestamps = without_timestamps
self.max_speech_len = max_speech_len
self.max_initial_prompt_len = max_initial_prompt_len
self.use_dynamic_time_axis = use_dynamic_time_axis
self.merge_chunks = merge_chunks
def data_collate_fn(self, batch):
if self.use_dynamic_time_axis:
max_len = min(max([_[3] for _ in batch]) + self.dta_padding, N_SAMPLES)
else:
max_len = N_SAMPLES
signal_batch = torch.stack([torch.from_numpy(pad_or_trim(_[0], length=max_len)).to(self.device) for _ in batch])
seq_len = torch.tensor([_[3] for _ in batch]).to(self.device)
prompt_batch = []
initial_prompt_max_len = max([len(_[2]) for _ in batch])
if initial_prompt_max_len:
for _ in batch: prompt_batch.append([self.tokenizer.sot_prev] + (initial_prompt_max_len-len(_[2]))*[self.tokenizer.silent_token] + _[2] + _[1])
else:
for _ in batch: prompt_batch.append(_[1])
if len(batch[0]) == 5:
seg_metadata = [_[4] for _ in batch]
return signal_batch, prompt_batch, seq_len, seg_metadata
else:
return signal_batch, prompt_batch, seq_len
def get_segmented_audio_signal(self, audio_signal, file_id, lang, task, initial_prompt, sr=16000):
start_ends, audio_signal = self.speech_segmenter(audio_signal=audio_signal)
if initial_prompt:
initial_prompt = " " + initial_prompt.strip()
initial_prompt_tokens = self.tokenizer.encode(initial_prompt)[-self.max_initial_prompt_len:]
else:
initial_prompt_tokens = []
prompt = self.tokenizer.sot_sequence(task=task, lang=lang)
if self.without_timestamps:
prompt.append(self.tokenizer.no_timestamps)
else:
prompt.append(self.tokenizer.timestamp_begin)
segmented_audio_signal = []
if self.merge_chunks:
stitched_speech_segments = stitch_speech_segments(start_ends, max_len=self.max_speech_len)
for stitched_seg in stitched_speech_segments:
audio = []
for st, et in stitched_seg:
audio.append(audio_signal[int(st*sr):int(et*sr)])
audio = np.concatenate(audio)
seq_len = audio.shape[-1]
seg_metadata = {
'file_id': file_id,
'start_time': stitched_seg[0][0],
'end_time': stitched_seg[-1][1],
'stitched_seg': stitched_seg,
'lang_code': lang
}
segmented_audio_signal.append((audio, prompt, initial_prompt_tokens, seq_len, seg_metadata))
else:
for st, et in start_ends:
audio = audio_signal[int(st*sr):int(et*sr)]
seq_len = audio.shape[-1]
segmented_audio_signal.append((audio, prompt, initial_prompt_tokens, seq_len, {'file_id': file_id, 'start_time': st, 'end_time': et}))
return segmented_audio_signal
def get_data_loader_with_vad(self, audio_files, lang_codes, tasks, initial_prompts, batch_size=16):
segmented_audio_signal = []
pbar_update_len = {} | for file_id, (audio_signal, lang, task, initial_prompt) in enumerate(zip(audio_batch_generator(audio_files), lang_codes, tasks, initial_prompts)): | 1 | 2023-12-16 18:09:16+00:00 | 4k |
chinhsuanwu/ifusion | ldm/models/diffusion/ddim.py | [
{
"identifier": "make_ddim_sampling_parameters",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):\n # select alphas for computing the variance schedule\n alphas = alphacums[ddim_timesteps]\n alphas_prev ... | import torch
import numpy as np
from tqdm import tqdm
from ldm.modules.diffusionmodules.util import (
make_ddim_sampling_parameters,
make_ddim_timesteps,
noise_like,
extract_into_tensor,
)
from ldm.models.diffusion.sampling_util import (
norm_thresholding,
) | 3,234 | self.model.sqrt_one_minus_alphas_cumprod
if use_original_steps
else self.ddim_sqrt_one_minus_alphas
)
sigmas = (
self.model.ddim_sigmas_for_original_num_steps
if use_original_steps
else self.ddim_sigmas
)
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full(
(b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device
)
# current prediction for x_0
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
if quantize_denoised:
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
if dynamic_threshold is not None:
pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)
# direction pointing to x_t
dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
if noise_dropout > 0.0:
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
return x_prev, pred_x0
@torch.no_grad()
def encode(
self,
x0,
c,
t_enc,
use_original_steps=False,
return_intermediates=None,
unconditional_guidance_scale=1.0,
unconditional_conditioning=None,
):
num_reference_steps = (
self.ddpm_num_timesteps
if use_original_steps
else self.ddim_timesteps.shape[0]
)
assert t_enc <= num_reference_steps
num_steps = t_enc
if use_original_steps:
alphas_next = self.alphas_cumprod[:num_steps]
alphas = self.alphas_cumprod_prev[:num_steps]
else:
alphas_next = self.ddim_alphas[:num_steps]
alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])
x_next = x0
intermediates = []
inter_steps = []
for i in tqdm(range(num_steps), desc="Encoding Image"):
t = torch.full(
(x0.shape[0],), i, device=self.model.device, dtype=torch.long
)
if unconditional_guidance_scale == 1.0:
noise_pred = self.model.apply_model(x_next, t, c)
else:
assert unconditional_conditioning is not None
e_t_uncond, noise_pred = torch.chunk(
self.model.apply_model(
torch.cat((x_next, x_next)),
torch.cat((t, t)),
torch.cat((unconditional_conditioning, c)),
),
2,
)
noise_pred = e_t_uncond + unconditional_guidance_scale * (
noise_pred - e_t_uncond
)
xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next
weighted_noise_pred = (
alphas_next[i].sqrt()
* ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())
* noise_pred
)
x_next = xt_weighted + weighted_noise_pred
if (
return_intermediates
and i % (num_steps // return_intermediates) == 0
and i < num_steps - 1
):
intermediates.append(x_next)
inter_steps.append(i)
elif return_intermediates and i >= num_steps - 2:
intermediates.append(x_next)
inter_steps.append(i)
out = {"x_encoded": x_next, "intermediate_steps": inter_steps}
if return_intermediates:
out.update({"intermediates": intermediates})
return x_next, out
@torch.no_grad()
def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
# fast, but does not allow for exact reconstruction
# t serves as an index to gather the correct alphas
if use_original_steps:
sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
else:
sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
if noise is None:
noise = torch.randn_like(x0)
return (
| """SAMPLING ONLY."""
class DDIMSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def to(self, device):
"""Same as to in torch module
Don't really underestand why this isn't a module in the first place"""
for k, v in self.__dict__.items():
if isinstance(v, torch.Tensor):
new_v = getattr(self, k).to(device)
setattr(self, k, new_v)
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(
self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.0, verbose=True
):
self.ddim_timesteps = make_ddim_timesteps(
ddim_discr_method=ddim_discretize,
num_ddim_timesteps=ddim_num_steps,
num_ddpm_timesteps=self.ddpm_num_timesteps,
verbose=verbose,
)
alphas_cumprod = self.model.alphas_cumprod
assert (
alphas_cumprod.shape[0] == self.ddpm_num_timesteps
), "alphas have to be defined for each timestep"
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
self.register_buffer("betas", to_torch(self.model.betas))
self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod))
self.register_buffer(
"alphas_cumprod_prev", to_torch(self.model.alphas_cumprod_prev)
)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer(
"sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_one_minus_alphas_cumprod",
to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),
)
self.register_buffer(
"log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))
)
self.register_buffer(
"sqrt_recipm1_alphas_cumprod",
to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),
)
# ddim sampling parameters
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(
alphacums=alphas_cumprod.cpu(),
ddim_timesteps=self.ddim_timesteps,
eta=ddim_eta,
verbose=verbose,
)
self.register_buffer("ddim_sigmas", ddim_sigmas)
self.register_buffer("ddim_alphas", ddim_alphas)
self.register_buffer("ddim_alphas_prev", ddim_alphas_prev)
self.register_buffer("ddim_sqrt_one_minus_alphas", np.sqrt(1.0 - ddim_alphas))
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
(1 - self.alphas_cumprod_prev)
/ (1 - self.alphas_cumprod)
* (1 - self.alphas_cumprod / self.alphas_cumprod_prev)
)
self.register_buffer(
"ddim_sigmas_for_original_num_steps", sigmas_for_original_sampling_steps
)
@torch.no_grad()
def sample(
self,
S,
batch_size,
shape,
conditioning=None,
callback=None,
normals_sequence=None,
img_callback=None,
quantize_x0=False,
eta=0.0,
mask=None,
x0=None,
temperature=1.0,
noise_dropout=0.0,
score_corrector=None,
corrector_kwargs=None,
verbose=True,
x_T=None,
log_every_t=100,
unconditional_guidance_scale=1.0,
unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
dynamic_threshold=None,
**kwargs,
):
if conditioning is not None:
if isinstance(conditioning, dict):
ctmp = conditioning[list(conditioning.keys())[0]]
while isinstance(ctmp, list):
ctmp = ctmp[0]
cbs = ctmp.shape[0]
if cbs != batch_size:
print(
f"Warning: Got {cbs} conditionings but batch-size is {batch_size}"
)
else:
if conditioning.shape[0] != batch_size:
print(
f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}"
)
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
# sampling
C, H, W = shape
size = (batch_size, C, H, W)
# print(f'Data shape for DDIM sampling is {size}, eta {eta}')
samples, intermediates = self.ddim_sampling(
conditioning,
size,
callback=callback,
img_callback=img_callback,
quantize_denoised=quantize_x0,
mask=mask,
x0=x0,
ddim_use_original_steps=False,
noise_dropout=noise_dropout,
temperature=temperature,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
x_T=x_T,
log_every_t=log_every_t,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
dynamic_threshold=dynamic_threshold,
)
return samples, intermediates
@torch.no_grad()
def ddim_sampling(
self,
cond,
shape,
x_T=None,
ddim_use_original_steps=False,
callback=None,
timesteps=None,
quantize_denoised=False,
mask=None,
x0=None,
img_callback=None,
log_every_t=100,
temperature=1.0,
noise_dropout=0.0,
score_corrector=None,
corrector_kwargs=None,
unconditional_guidance_scale=1.0,
unconditional_conditioning=None,
dynamic_threshold=None,
t_start=-1,
):
device = self.model.betas.device
b = shape[0]
if x_T is None:
img = torch.randn(shape, device=device)
else:
img = x_T
if timesteps is None:
timesteps = (
self.ddpm_num_timesteps
if ddim_use_original_steps
else self.ddim_timesteps
)
elif timesteps is not None and not ddim_use_original_steps:
subset_end = (
int(
min(timesteps / self.ddim_timesteps.shape[0], 1)
* self.ddim_timesteps.shape[0]
)
- 1
)
timesteps = self.ddim_timesteps[:subset_end]
timesteps = timesteps[:t_start]
intermediates = {"x_inter": [img], "pred_x0": [img]}
time_range = (
reversed(range(0, timesteps))
if ddim_use_original_steps
else np.flip(timesteps)
)
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
# print(f"Running DDIM Sampling with {total_steps} timesteps")
# iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
# for i, step in enumerate(iterator):
for i, step in enumerate(time_range):
index = total_steps - i - 1
ts = torch.full((b,), step, device=device, dtype=torch.long)
if mask is not None:
assert x0 is not None
img_orig = self.model.q_sample(
x0, ts
) # TODO: deterministic forward pass?
img = img_orig * mask + (1.0 - mask) * img
outs = self.p_sample_ddim(
img,
cond,
ts,
index=index,
use_original_steps=ddim_use_original_steps,
quantize_denoised=quantize_denoised,
temperature=temperature,
noise_dropout=noise_dropout,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
dynamic_threshold=dynamic_threshold,
)
img, pred_x0 = outs
if callback:
img = callback(i, img, pred_x0)
if img_callback:
img_callback(pred_x0, i)
if index % log_every_t == 0 or index == total_steps - 1:
intermediates["x_inter"].append(img)
intermediates["pred_x0"].append(pred_x0)
return img, intermediates
@torch.no_grad()
def p_sample_ddim(
self,
x,
c,
t,
index,
repeat_noise=False,
use_original_steps=False,
quantize_denoised=False,
temperature=1.0,
noise_dropout=0.0,
score_corrector=None,
corrector_kwargs=None,
unconditional_guidance_scale=1.0,
unconditional_conditioning=None,
dynamic_threshold=None,
):
b, *_, device = *x.shape, x.device
if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t] * 2)
if isinstance(c, dict):
assert isinstance(unconditional_conditioning, dict)
c_in = dict()
for k in c:
if isinstance(c[k], list):
c_in[k] = [
torch.cat([unconditional_conditioning[k][i], c[k][i]])
for i in range(len(c[k]))
]
else:
c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])
else:
c_in = torch.cat([unconditional_conditioning, c])
e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
if score_corrector is not None:
assert self.model.parameterization == "eps"
e_t = score_corrector.modify_score(
self.model, e_t, x, t, c, **corrector_kwargs
)
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
alphas_prev = (
self.model.alphas_cumprod_prev
if use_original_steps
else self.ddim_alphas_prev
)
sqrt_one_minus_alphas = (
self.model.sqrt_one_minus_alphas_cumprod
if use_original_steps
else self.ddim_sqrt_one_minus_alphas
)
sigmas = (
self.model.ddim_sigmas_for_original_num_steps
if use_original_steps
else self.ddim_sigmas
)
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full(
(b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device
)
# current prediction for x_0
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
if quantize_denoised:
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
if dynamic_threshold is not None:
pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)
# direction pointing to x_t
dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
if noise_dropout > 0.0:
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
return x_prev, pred_x0
@torch.no_grad()
def encode(
self,
x0,
c,
t_enc,
use_original_steps=False,
return_intermediates=None,
unconditional_guidance_scale=1.0,
unconditional_conditioning=None,
):
num_reference_steps = (
self.ddpm_num_timesteps
if use_original_steps
else self.ddim_timesteps.shape[0]
)
assert t_enc <= num_reference_steps
num_steps = t_enc
if use_original_steps:
alphas_next = self.alphas_cumprod[:num_steps]
alphas = self.alphas_cumprod_prev[:num_steps]
else:
alphas_next = self.ddim_alphas[:num_steps]
alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])
x_next = x0
intermediates = []
inter_steps = []
for i in tqdm(range(num_steps), desc="Encoding Image"):
t = torch.full(
(x0.shape[0],), i, device=self.model.device, dtype=torch.long
)
if unconditional_guidance_scale == 1.0:
noise_pred = self.model.apply_model(x_next, t, c)
else:
assert unconditional_conditioning is not None
e_t_uncond, noise_pred = torch.chunk(
self.model.apply_model(
torch.cat((x_next, x_next)),
torch.cat((t, t)),
torch.cat((unconditional_conditioning, c)),
),
2,
)
noise_pred = e_t_uncond + unconditional_guidance_scale * (
noise_pred - e_t_uncond
)
xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next
weighted_noise_pred = (
alphas_next[i].sqrt()
* ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())
* noise_pred
)
x_next = xt_weighted + weighted_noise_pred
if (
return_intermediates
and i % (num_steps // return_intermediates) == 0
and i < num_steps - 1
):
intermediates.append(x_next)
inter_steps.append(i)
elif return_intermediates and i >= num_steps - 2:
intermediates.append(x_next)
inter_steps.append(i)
out = {"x_encoded": x_next, "intermediate_steps": inter_steps}
if return_intermediates:
out.update({"intermediates": intermediates})
return x_next, out
@torch.no_grad()
def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
# fast, but does not allow for exact reconstruction
# t serves as an index to gather the correct alphas
if use_original_steps:
sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
else:
sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
if noise is None:
noise = torch.randn_like(x0)
return ( | extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 | 3 | 2023-12-17 12:45:38+00:00 | 4k |
wangzhecheng/SkyScript | src/open_clip/openai.py | [
{
"identifier": "OPENAI_DATASET_MEAN",
"path": "src/open_clip/constants.py",
"snippet": "OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)"
},
{
"identifier": "OPENAI_DATASET_STD",
"path": "src/open_clip/constants.py",
"snippet": "OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.2... | import os
import warnings
import torch
from typing import List, Optional, Union
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
from .model import build_model_from_openai_state_dict, convert_weights_to_lp, get_cast_dtype
from .pretrained import get_pretrained_url, list_pretrained_models_by_tag, download_pretrained_from_url | 2,467 | """ OpenAI pretrained model functions
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
"""
__all__ = ["list_openai_models", "load_openai_model"]
def list_openai_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list_pretrained_models_by_tag('openai')
def load_openai_model(
name: str,
precision: Optional[str] = None,
device: Optional[Union[str, torch.device]] = None,
cache_dir: Optional[str] = None,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
precision: str
Model precision, if None defaults to 'fp32' if device == 'cpu' else 'fp16'.
device : Union[str, torch.device]
The device to put the loaded model
cache_dir : Optional[str]
The directory to cache the downloaded model weights
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
if precision is None:
precision = 'fp32' if device == 'cpu' else 'fp16'
if get_pretrained_url(name, 'openai'):
model_path = download_pretrained_from_url(get_pretrained_url(name, 'openai'), cache_dir=cache_dir)
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location="cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
state_dict = torch.load(model_path, map_location="cpu")
# Build a non-jit model from the OpenAI jitted model state dict
cast_dtype = get_cast_dtype(precision)
try:
model = build_model_from_openai_state_dict(name, state_dict or model.state_dict(), cast_dtype=cast_dtype)
except KeyError:
sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
model = build_model_from_openai_state_dict(name, sd, cast_dtype=cast_dtype)
# model from OpenAI state dict is in manually cast fp16 mode, must be converted for AMP/fp32/bf16 use
model = model.to(device)
# FIXME support pure fp16/bf16 precision modes
if precision != 'fp16':
model.float()
if precision == 'bf16':
# for bf16, convert back to low-precision
convert_weights_to_lp(model, dtype=torch.bfloat16)
# add mean / std attributes for consistency with OpenCLIP models
model.visual.image_mean = OPENAI_DATASET_MEAN
| """ OpenAI pretrained model functions
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
"""
__all__ = ["list_openai_models", "load_openai_model"]
def list_openai_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list_pretrained_models_by_tag('openai')
def load_openai_model(
name: str,
precision: Optional[str] = None,
device: Optional[Union[str, torch.device]] = None,
cache_dir: Optional[str] = None,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
precision: str
Model precision, if None defaults to 'fp32' if device == 'cpu' else 'fp16'.
device : Union[str, torch.device]
The device to put the loaded model
cache_dir : Optional[str]
The directory to cache the downloaded model weights
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
if precision is None:
precision = 'fp32' if device == 'cpu' else 'fp16'
if get_pretrained_url(name, 'openai'):
model_path = download_pretrained_from_url(get_pretrained_url(name, 'openai'), cache_dir=cache_dir)
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location="cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
state_dict = torch.load(model_path, map_location="cpu")
# Build a non-jit model from the OpenAI jitted model state dict
cast_dtype = get_cast_dtype(precision)
try:
model = build_model_from_openai_state_dict(name, state_dict or model.state_dict(), cast_dtype=cast_dtype)
except KeyError:
sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
model = build_model_from_openai_state_dict(name, sd, cast_dtype=cast_dtype)
# model from OpenAI state dict is in manually cast fp16 mode, must be converted for AMP/fp32/bf16 use
model = model.to(device)
# FIXME support pure fp16/bf16 precision modes
if precision != 'fp16':
model.float()
if precision == 'bf16':
# for bf16, convert back to low-precision
convert_weights_to_lp(model, dtype=torch.bfloat16)
# add mean / std attributes for consistency with OpenCLIP models
model.visual.image_mean = OPENAI_DATASET_MEAN | model.visual.image_std = OPENAI_DATASET_STD | 1 | 2023-12-19 11:50:56+00:00 | 4k |
JarodMica/ai-voice-cloning | modules/rvc/tools/torchgate/torchgate.py | [
{
"identifier": "linspace",
"path": "modules/rvc/tools/torchgate/utils.py",
"snippet": "@torch.no_grad()\ndef linspace(\n start: Number, stop: Number, num: int = 50, endpoint: bool = True, **kwargs\n) -> torch.Tensor:\n \"\"\"\n Generate a linearly spaced 1-D tensor.\n\n Arguments:\n ... | import torch
from torch.nn.functional import conv1d, conv2d
from typing import Union, Optional
from .utils import linspace, temperature_sigmoid, amp_to_db | 2,577 | def _generate_mask_smoothing_filter(self) -> Union[torch.Tensor, None]:
"""
A PyTorch module that applies a spectral gate to an input signal using the STFT.
Returns:
smoothing_filter (torch.Tensor): a 2D tensor representing the smoothing filter,
with shape (n_grad_freq, n_grad_time), where n_grad_freq is the number of frequency
bins to smooth and n_grad_time is the number of time frames to smooth.
If both self.freq_mask_smooth_hz and self.time_mask_smooth_ms are None, returns None.
"""
if self.freq_mask_smooth_hz is None and self.time_mask_smooth_ms is None:
return None
n_grad_freq = (
1
if self.freq_mask_smooth_hz is None
else int(self.freq_mask_smooth_hz / (self.sr / (self.n_fft / 2)))
)
if n_grad_freq < 1:
raise ValueError(
f"freq_mask_smooth_hz needs to be at least {int((self.sr / (self._n_fft / 2)))} Hz"
)
n_grad_time = (
1
if self.time_mask_smooth_ms is None
else int(self.time_mask_smooth_ms / ((self.hop_length / self.sr) * 1000))
)
if n_grad_time < 1:
raise ValueError(
f"time_mask_smooth_ms needs to be at least {int((self.hop_length / self.sr) * 1000)} ms"
)
if n_grad_time == 1 and n_grad_freq == 1:
return None
v_f = torch.cat(
[
linspace(0, 1, n_grad_freq + 1, endpoint=False),
linspace(1, 0, n_grad_freq + 2),
]
)[1:-1]
v_t = torch.cat(
[
linspace(0, 1, n_grad_time + 1, endpoint=False),
linspace(1, 0, n_grad_time + 2),
]
)[1:-1]
smoothing_filter = torch.outer(v_f, v_t).unsqueeze(0).unsqueeze(0)
return smoothing_filter / smoothing_filter.sum()
@torch.no_grad()
def _stationary_mask(
self, X_db: torch.Tensor, xn: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""
Computes a stationary binary mask to filter out noise in a log-magnitude spectrogram.
Arguments:
X_db (torch.Tensor): 2D tensor of shape (frames, freq_bins) containing the log-magnitude spectrogram.
xn (torch.Tensor): 1D tensor containing the audio signal corresponding to X_db.
Returns:
sig_mask (torch.Tensor): Binary mask of the same shape as X_db, where values greater than the threshold
are set to 1, and the rest are set to 0.
"""
if xn is not None:
XN = torch.stft(
xn,
n_fft=self.n_fft,
hop_length=self.hop_length,
win_length=self.win_length,
return_complex=True,
pad_mode="constant",
center=True,
window=torch.hann_window(self.win_length).to(xn.device),
)
XN_db = amp_to_db(XN).to(dtype=X_db.dtype)
else:
XN_db = X_db
# calculate mean and standard deviation along the frequency axis
std_freq_noise, mean_freq_noise = torch.std_mean(XN_db, dim=-1)
# compute noise threshold
noise_thresh = mean_freq_noise + std_freq_noise * self.n_std_thresh_stationary
# create binary mask by thresholding the spectrogram
sig_mask = X_db > noise_thresh.unsqueeze(2)
return sig_mask
@torch.no_grad()
def _nonstationary_mask(self, X_abs: torch.Tensor) -> torch.Tensor:
"""
Computes a non-stationary binary mask to filter out noise in a log-magnitude spectrogram.
Arguments:
X_abs (torch.Tensor): 2D tensor of shape (frames, freq_bins) containing the magnitude spectrogram.
Returns:
sig_mask (torch.Tensor): Binary mask of the same shape as X_abs, where values greater than the threshold
are set to 1, and the rest are set to 0.
"""
X_smoothed = (
conv1d(
X_abs.reshape(-1, 1, X_abs.shape[-1]),
torch.ones(
self.n_movemean_nonstationary,
dtype=X_abs.dtype,
device=X_abs.device,
).view(1, 1, -1),
padding="same",
).view(X_abs.shape)
/ self.n_movemean_nonstationary
)
# Compute slowness ratio and apply temperature sigmoid
slowness_ratio = (X_abs - X_smoothed) / (X_smoothed + 1e-6)
|
class TorchGate(torch.nn.Module):
"""
A PyTorch module that applies a spectral gate to an input signal.
Arguments:
sr {int} -- Sample rate of the input signal.
nonstationary {bool} -- Whether to use non-stationary or stationary masking (default: {False}).
n_std_thresh_stationary {float} -- Number of standard deviations above mean to threshold noise for
stationary masking (default: {1.5}).
n_thresh_nonstationary {float} -- Number of multiplies above smoothed magnitude spectrogram. for
non-stationary masking (default: {1.3}).
temp_coeff_nonstationary {float} -- Temperature coefficient for non-stationary masking (default: {0.1}).
n_movemean_nonstationary {int} -- Number of samples for moving average smoothing in non-stationary masking
(default: {20}).
prop_decrease {float} -- Proportion to decrease signal by where the mask is zero (default: {1.0}).
n_fft {int} -- Size of FFT for STFT (default: {1024}).
win_length {[int]} -- Window length for STFT. If None, defaults to `n_fft` (default: {None}).
hop_length {[int]} -- Hop length for STFT. If None, defaults to `win_length` // 4 (default: {None}).
freq_mask_smooth_hz {float} -- Frequency smoothing width for mask (in Hz). If None, no smoothing is applied
(default: {500}).
time_mask_smooth_ms {float} -- Time smoothing width for mask (in ms). If None, no smoothing is applied
(default: {50}).
"""
@torch.no_grad()
def __init__(
self,
sr: int,
nonstationary: bool = False,
n_std_thresh_stationary: float = 1.5,
n_thresh_nonstationary: float = 1.3,
temp_coeff_nonstationary: float = 0.1,
n_movemean_nonstationary: int = 20,
prop_decrease: float = 1.0,
n_fft: int = 1024,
win_length: bool = None,
hop_length: int = None,
freq_mask_smooth_hz: float = 500,
time_mask_smooth_ms: float = 50,
):
super().__init__()
# General Params
self.sr = sr
self.nonstationary = nonstationary
assert 0.0 <= prop_decrease <= 1.0
self.prop_decrease = prop_decrease
# STFT Params
self.n_fft = n_fft
self.win_length = self.n_fft if win_length is None else win_length
self.hop_length = self.win_length // 4 if hop_length is None else hop_length
# Stationary Params
self.n_std_thresh_stationary = n_std_thresh_stationary
# Non-Stationary Params
self.temp_coeff_nonstationary = temp_coeff_nonstationary
self.n_movemean_nonstationary = n_movemean_nonstationary
self.n_thresh_nonstationary = n_thresh_nonstationary
# Smooth Mask Params
self.freq_mask_smooth_hz = freq_mask_smooth_hz
self.time_mask_smooth_ms = time_mask_smooth_ms
self.register_buffer("smoothing_filter", self._generate_mask_smoothing_filter())
@torch.no_grad()
def _generate_mask_smoothing_filter(self) -> Union[torch.Tensor, None]:
"""
A PyTorch module that applies a spectral gate to an input signal using the STFT.
Returns:
smoothing_filter (torch.Tensor): a 2D tensor representing the smoothing filter,
with shape (n_grad_freq, n_grad_time), where n_grad_freq is the number of frequency
bins to smooth and n_grad_time is the number of time frames to smooth.
If both self.freq_mask_smooth_hz and self.time_mask_smooth_ms are None, returns None.
"""
if self.freq_mask_smooth_hz is None and self.time_mask_smooth_ms is None:
return None
n_grad_freq = (
1
if self.freq_mask_smooth_hz is None
else int(self.freq_mask_smooth_hz / (self.sr / (self.n_fft / 2)))
)
if n_grad_freq < 1:
raise ValueError(
f"freq_mask_smooth_hz needs to be at least {int((self.sr / (self._n_fft / 2)))} Hz"
)
n_grad_time = (
1
if self.time_mask_smooth_ms is None
else int(self.time_mask_smooth_ms / ((self.hop_length / self.sr) * 1000))
)
if n_grad_time < 1:
raise ValueError(
f"time_mask_smooth_ms needs to be at least {int((self.hop_length / self.sr) * 1000)} ms"
)
if n_grad_time == 1 and n_grad_freq == 1:
return None
v_f = torch.cat(
[
linspace(0, 1, n_grad_freq + 1, endpoint=False),
linspace(1, 0, n_grad_freq + 2),
]
)[1:-1]
v_t = torch.cat(
[
linspace(0, 1, n_grad_time + 1, endpoint=False),
linspace(1, 0, n_grad_time + 2),
]
)[1:-1]
smoothing_filter = torch.outer(v_f, v_t).unsqueeze(0).unsqueeze(0)
return smoothing_filter / smoothing_filter.sum()
@torch.no_grad()
def _stationary_mask(
self, X_db: torch.Tensor, xn: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""
Computes a stationary binary mask to filter out noise in a log-magnitude spectrogram.
Arguments:
X_db (torch.Tensor): 2D tensor of shape (frames, freq_bins) containing the log-magnitude spectrogram.
xn (torch.Tensor): 1D tensor containing the audio signal corresponding to X_db.
Returns:
sig_mask (torch.Tensor): Binary mask of the same shape as X_db, where values greater than the threshold
are set to 1, and the rest are set to 0.
"""
if xn is not None:
XN = torch.stft(
xn,
n_fft=self.n_fft,
hop_length=self.hop_length,
win_length=self.win_length,
return_complex=True,
pad_mode="constant",
center=True,
window=torch.hann_window(self.win_length).to(xn.device),
)
XN_db = amp_to_db(XN).to(dtype=X_db.dtype)
else:
XN_db = X_db
# calculate mean and standard deviation along the frequency axis
std_freq_noise, mean_freq_noise = torch.std_mean(XN_db, dim=-1)
# compute noise threshold
noise_thresh = mean_freq_noise + std_freq_noise * self.n_std_thresh_stationary
# create binary mask by thresholding the spectrogram
sig_mask = X_db > noise_thresh.unsqueeze(2)
return sig_mask
@torch.no_grad()
def _nonstationary_mask(self, X_abs: torch.Tensor) -> torch.Tensor:
"""
Computes a non-stationary binary mask to filter out noise in a log-magnitude spectrogram.
Arguments:
X_abs (torch.Tensor): 2D tensor of shape (frames, freq_bins) containing the magnitude spectrogram.
Returns:
sig_mask (torch.Tensor): Binary mask of the same shape as X_abs, where values greater than the threshold
are set to 1, and the rest are set to 0.
"""
X_smoothed = (
conv1d(
X_abs.reshape(-1, 1, X_abs.shape[-1]),
torch.ones(
self.n_movemean_nonstationary,
dtype=X_abs.dtype,
device=X_abs.device,
).view(1, 1, -1),
padding="same",
).view(X_abs.shape)
/ self.n_movemean_nonstationary
)
# Compute slowness ratio and apply temperature sigmoid
slowness_ratio = (X_abs - X_smoothed) / (X_smoothed + 1e-6) | sig_mask = temperature_sigmoid( | 1 | 2023-12-18 00:10:23+00:00 | 4k |
Lavreniuk/EVP | refer/models_refer/model.py | [
{
"identifier": "UNetWrapper",
"path": "evp/models.py",
"snippet": "class UNetWrapper(nn.Module):\n def __init__(self, unet, use_attn=True, base_size=512, max_attn_size=None, attn_selector='up_cross+down_cross') -> None:\n super().__init__()\n self.unet = unet\n self.attention_st... | import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import sys
from ldm.util import instantiate_from_config
from transformers.models.clip.modeling_clip import CLIPTextModel
from omegaconf import OmegaConf
from lib.mask_predictor import SimpleDecoding
from evp.models import UNetWrapper, TextAdapterRefer | 3,071 |
# ReLU Activation
self.relu = nn.ReLU()
self.upscale = PixelShuffle(in_channels, 2)
def forward(self, x):
# Apply spatial attention
spatial_attention = self.spatial_attention(x)
x = x * spatial_attention
# Apply channel attention
channel_attention = self.channel_attention(x)
x = x * channel_attention
# Apply convolutional layers
x = self.conv1(x)
x = self.group_norm(x)
x = self.relu(x)
x = self.conv2(x)
x = self.group_norm(x)
x = self.relu(x)
# Upsample
x = self.upscale(x)
return x
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvLayer, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1),
nn.GroupNorm(20, out_channels),
nn.ReLU(),
)
def forward(self, x):
x = self.conv1(x)
return x
class InverseMultiAttentiveFeatureRefinement(nn.Module):
def __init__(self, in_channels_list):
super(InverseMultiAttentiveFeatureRefinement, self).__init__()
self.layer1 = AttentionModule(in_channels_list[0], in_channels_list[0])
self.layer2 = AttentionDownsamplingModule(in_channels_list[0], in_channels_list[0]//2, scale_factor = 2)
self.layer3 = ConvLayer(in_channels_list[0]//2 + in_channels_list[1], in_channels_list[1])
self.layer4 = AttentionDownsamplingModule(in_channels_list[1], in_channels_list[1]//2, scale_factor = 2)
self.layer5 = ConvLayer(in_channels_list[1]//2 + in_channels_list[2], in_channels_list[2])
self.layer6 = AttentionDownsamplingModule(in_channels_list[2], in_channels_list[2]//2, scale_factor = 2)
self.layer7 = ConvLayer(in_channels_list[2]//2 + in_channels_list[3], in_channels_list[3])
'''
self.layer8 = AttentionUpsamplingModule(in_channels_list[3], in_channels_list[3])
self.layer9 = ConvLayer(in_channels_list[2] + in_channels_list[3], in_channels_list[2])
self.layer10 = AttentionUpsamplingModule(in_channels_list[2], in_channels_list[2])
self.layer11 = ConvLayer(in_channels_list[1] + in_channels_list[2], in_channels_list[1])
self.layer12 = AttentionUpsamplingModule(in_channels_list[1], in_channels_list[1])
self.layer13 = ConvLayer(in_channels_list[0] + in_channels_list[1], in_channels_list[0])
'''
def forward(self, inputs):
x_c4, x_c3, x_c2, x_c1 = inputs
x_c4 = self.layer1(x_c4)
x_c4_3 = self.layer2(x_c4)
x_c3 = torch.cat([x_c4_3, x_c3], dim=1)
x_c3 = self.layer3(x_c3)
x_c3_2 = self.layer4(x_c3)
x_c2 = torch.cat([x_c3_2, x_c2], dim=1)
x_c2 = self.layer5(x_c2)
x_c2_1 = self.layer6(x_c2)
x_c1 = torch.cat([x_c2_1, x_c1], dim=1)
x_c1 = self.layer7(x_c1)
'''
x_c1_2 = self.layer8(x_c1)
x_c2 = torch.cat([x_c1_2, x_c2], dim=1)
x_c2 = self.layer9(x_c2)
x_c2_3 = self.layer10(x_c2)
x_c3 = torch.cat([x_c2_3, x_c3], dim=1)
x_c3 = self.layer11(x_c3)
x_c3_4 = self.layer12(x_c3)
x_c4 = torch.cat([x_c3_4, x_c4], dim=1)
x_c4 = self.layer13(x_c4)
'''
return [x_c4, x_c3, x_c2, x_c1]
class EVPRefer(nn.Module):
"""Encoder Decoder segmentors.
EncoderDecoder typically consists of backbone, decode_head, auxiliary_head.
Note that auxiliary_head is only used for deep supervision during training,
which could be dumped during inference.
"""
def __init__(self,
sd_path=None,
base_size=512,
token_embed_dim=768,
neck_dim=[320,680,1320,1280],
**args):
super().__init__()
config = OmegaConf.load('./v1-inference.yaml')
if os.path.exists(f'{sd_path}'):
config.model.params.ckpt_path = f'{sd_path}'
else:
config.model.params.ckpt_path = None
sd_model = instantiate_from_config(config.model)
self.encoder_vq = sd_model.first_stage_model
self.unet = UNetWrapper(sd_model.model, base_size=base_size)
del sd_model.cond_stage_model
del self.encoder_vq.decoder
for param in self.encoder_vq.parameters():
param.requires_grad = True
|
def icnr(x, scale=2, init=nn.init.kaiming_normal_):
"""
Checkerboard artifact free sub-pixel convolution
https://arxiv.org/abs/1707.02937
"""
ni,nf,h,w = x.shape
ni2 = int(ni/(scale**2))
k = init(torch.zeros([ni2,nf,h,w])).transpose(0, 1)
k = k.contiguous().view(ni2, nf, -1)
k = k.repeat(1, 1, scale**2)
k = k.contiguous().view([nf,ni,h,w]).transpose(0, 1)
x.data.copy_(k)
class PixelShuffle(nn.Module):
"""
Real-Time Single Image and Video Super-Resolution
https://arxiv.org/abs/1609.05158
"""
def __init__(self, n_channels, scale):
super(PixelShuffle, self).__init__()
self.conv = nn.Conv2d(n_channels, n_channels*(scale**2), kernel_size=1)
icnr(self.conv.weight)
self.shuf = nn.PixelShuffle(scale)
self.relu = nn.ReLU()
def forward(self,x):
x = self.shuf(self.relu(self.conv(x)))
return x
class AttentionModule(nn.Module):
def __init__(self, in_channels, out_channels):
super(AttentionModule, self).__init__()
# Convolutional Layers
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
# Group Normalization
self.group_norm = nn.GroupNorm(20, out_channels)
# ReLU Activation
self.relu = nn.ReLU()
# Spatial Attention
self.spatial_attention = nn.Sequential(
nn.Conv2d(in_channels, 1, kernel_size=1),
nn.Sigmoid()
)
def forward(self, x):
# Apply spatial attention
spatial_attention = self.spatial_attention(x)
x = x * spatial_attention
# Apply convolutional layer
x = self.conv1(x)
x = self.group_norm(x)
x = self.relu(x)
return x
class AttentionDownsamplingModule(nn.Module):
def __init__(self, in_channels, out_channels, scale_factor=2):
super(AttentionDownsamplingModule, self).__init__()
# Spatial Attention
self.spatial_attention = nn.Sequential(
nn.Conv2d(in_channels, 1, kernel_size=1),
nn.Sigmoid()
)
# Channel Attention
self.channel_attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, in_channels // 8, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels // 8, in_channels, kernel_size=1),
nn.Sigmoid()
)
# Convolutional Layers
if scale_factor == 2:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
elif scale_factor == 4:
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=2, padding=1)
# Group Normalization
self.group_norm = nn.GroupNorm(20, out_channels)
# ReLU Activation
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
# Apply spatial attention
spatial_attention = self.spatial_attention(x)
x = x * spatial_attention
# Apply channel attention
channel_attention = self.channel_attention(x)
x = x * channel_attention
# Apply convolutional layers
x = self.conv1(x)
x = self.group_norm(x)
x = self.relu(x)
x = self.conv2(x)
x = self.group_norm(x)
x = self.relu(x)
return x
class AttentionUpsamplingModule(nn.Module):
def __init__(self, in_channels, out_channels):
super(AttentionUpsamplingModule, self).__init__()
# Spatial Attention for outs[2]
self.spatial_attention = nn.Sequential(
nn.Conv2d(in_channels, 1, kernel_size=1),
nn.Sigmoid()
)
# Channel Attention for outs[2]
self.channel_attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, in_channels // 8, kernel_size=1),
nn.ReLU(),
nn.Conv2d(in_channels // 8, in_channels, kernel_size=1),
nn.Sigmoid()
)
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
# Group Normalization
self.group_norm = nn.GroupNorm(20, out_channels)
# ReLU Activation
self.relu = nn.ReLU()
self.upscale = PixelShuffle(in_channels, 2)
def forward(self, x):
# Apply spatial attention
spatial_attention = self.spatial_attention(x)
x = x * spatial_attention
# Apply channel attention
channel_attention = self.channel_attention(x)
x = x * channel_attention
# Apply convolutional layers
x = self.conv1(x)
x = self.group_norm(x)
x = self.relu(x)
x = self.conv2(x)
x = self.group_norm(x)
x = self.relu(x)
# Upsample
x = self.upscale(x)
return x
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvLayer, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1),
nn.GroupNorm(20, out_channels),
nn.ReLU(),
)
def forward(self, x):
x = self.conv1(x)
return x
class InverseMultiAttentiveFeatureRefinement(nn.Module):
def __init__(self, in_channels_list):
super(InverseMultiAttentiveFeatureRefinement, self).__init__()
self.layer1 = AttentionModule(in_channels_list[0], in_channels_list[0])
self.layer2 = AttentionDownsamplingModule(in_channels_list[0], in_channels_list[0]//2, scale_factor = 2)
self.layer3 = ConvLayer(in_channels_list[0]//2 + in_channels_list[1], in_channels_list[1])
self.layer4 = AttentionDownsamplingModule(in_channels_list[1], in_channels_list[1]//2, scale_factor = 2)
self.layer5 = ConvLayer(in_channels_list[1]//2 + in_channels_list[2], in_channels_list[2])
self.layer6 = AttentionDownsamplingModule(in_channels_list[2], in_channels_list[2]//2, scale_factor = 2)
self.layer7 = ConvLayer(in_channels_list[2]//2 + in_channels_list[3], in_channels_list[3])
'''
self.layer8 = AttentionUpsamplingModule(in_channels_list[3], in_channels_list[3])
self.layer9 = ConvLayer(in_channels_list[2] + in_channels_list[3], in_channels_list[2])
self.layer10 = AttentionUpsamplingModule(in_channels_list[2], in_channels_list[2])
self.layer11 = ConvLayer(in_channels_list[1] + in_channels_list[2], in_channels_list[1])
self.layer12 = AttentionUpsamplingModule(in_channels_list[1], in_channels_list[1])
self.layer13 = ConvLayer(in_channels_list[0] + in_channels_list[1], in_channels_list[0])
'''
def forward(self, inputs):
x_c4, x_c3, x_c2, x_c1 = inputs
x_c4 = self.layer1(x_c4)
x_c4_3 = self.layer2(x_c4)
x_c3 = torch.cat([x_c4_3, x_c3], dim=1)
x_c3 = self.layer3(x_c3)
x_c3_2 = self.layer4(x_c3)
x_c2 = torch.cat([x_c3_2, x_c2], dim=1)
x_c2 = self.layer5(x_c2)
x_c2_1 = self.layer6(x_c2)
x_c1 = torch.cat([x_c2_1, x_c1], dim=1)
x_c1 = self.layer7(x_c1)
'''
x_c1_2 = self.layer8(x_c1)
x_c2 = torch.cat([x_c1_2, x_c2], dim=1)
x_c2 = self.layer9(x_c2)
x_c2_3 = self.layer10(x_c2)
x_c3 = torch.cat([x_c2_3, x_c3], dim=1)
x_c3 = self.layer11(x_c3)
x_c3_4 = self.layer12(x_c3)
x_c4 = torch.cat([x_c3_4, x_c4], dim=1)
x_c4 = self.layer13(x_c4)
'''
return [x_c4, x_c3, x_c2, x_c1]
class EVPRefer(nn.Module):
"""Encoder Decoder segmentors.
EncoderDecoder typically consists of backbone, decode_head, auxiliary_head.
Note that auxiliary_head is only used for deep supervision during training,
which could be dumped during inference.
"""
def __init__(self,
sd_path=None,
base_size=512,
token_embed_dim=768,
neck_dim=[320,680,1320,1280],
**args):
super().__init__()
config = OmegaConf.load('./v1-inference.yaml')
if os.path.exists(f'{sd_path}'):
config.model.params.ckpt_path = f'{sd_path}'
else:
config.model.params.ckpt_path = None
sd_model = instantiate_from_config(config.model)
self.encoder_vq = sd_model.first_stage_model
self.unet = UNetWrapper(sd_model.model, base_size=base_size)
del sd_model.cond_stage_model
del self.encoder_vq.decoder
for param in self.encoder_vq.parameters():
param.requires_grad = True
| self.text_adapter = TextAdapterRefer(text_dim=token_embed_dim) | 1 | 2023-12-15 14:13:59+00:00 | 4k |
penghao-wu/vstar | LLaVA/llava/model/llava_arch.py | [
{
"identifier": "build_vision_tower",
"path": "LLaVA/llava/model/multimodal_encoder/builder.py",
"snippet": "def build_vision_tower(vision_tower_cfg, **kwargs):\n vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))\n is_absolute_path_exists ... | from abc import ABC, abstractmethod
from LLaVA.llava.model.multimodal_encoder.builder import build_vision_tower
from LLaVA.llava.model.multimodal_projector.builder import build_vision_projector
from ..constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
import torch | 1,774 | # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LlavaMetaModel:
def __init__(self, config):
super(LlavaMetaModel, self).__init__(config)
if hasattr(config, "mm_vision_tower"):
self.vision_tower = build_vision_tower(config, delay_load=True)
self.mm_projector = build_vision_projector(config)
def get_vision_tower(self):
vision_tower = getattr(self, 'vision_tower', None)
if type(vision_tower) is list:
vision_tower = vision_tower[0]
return vision_tower
def initialize_vision_modules(self, model_args, fsdp=None):
vision_tower = model_args.vision_tower
mm_vision_select_layer = model_args.mm_vision_select_layer
mm_vision_select_feature = model_args.mm_vision_select_feature
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
self.config.mm_vision_tower = vision_tower
if self.get_vision_tower() is None:
vision_tower = build_vision_tower(model_args)
if fsdp is not None and len(fsdp) > 0:
self.vision_tower = [vision_tower]
else:
self.vision_tower = vision_tower
else:
if fsdp is not None and len(fsdp) > 0:
vision_tower = self.vision_tower[0]
else:
vision_tower = self.vision_tower
vision_tower.load_model()
self.config.use_mm_proj = True
self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')
self.config.mm_hidden_size = vision_tower.hidden_size
self.config.mm_vision_select_layer = mm_vision_select_layer
self.config.mm_vision_select_feature = mm_vision_select_feature
if getattr(self, 'mm_projector', None) is None:
self.mm_projector = build_vision_projector(self.config)
if pretrain_mm_mlp_adapter is not None:
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
def get_w(weights, keyword):
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
class LlavaMetaForCausalLM(ABC):
@abstractmethod
def get_model(self):
pass
def get_vision_tower(self):
return self.get_model().get_vision_tower()
def encode_images(self, images):
image_features = self.get_model().get_vision_tower()(images)
image_features = self.get_model().mm_projector(image_features)
return image_features
def prepare_inputs_labels_for_multimodal(
self, input_ids, attention_mask, past_key_values, labels, images
):
vision_tower = self.get_vision_tower()
if vision_tower is None or images is None or input_ids.shape[1] == 1:
if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:
attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)
return input_ids, attention_mask, past_key_values, None, labels
if type(images) is list or images.ndim == 5:
concat_images = torch.cat([image for image in images], dim=0)
image_features = self.encode_images(concat_images)
split_sizes = [image.shape[0] for image in images]
image_features = torch.split(image_features, split_sizes, dim=0)
image_features = [x.flatten(0, 1) for x in image_features]
else:
image_features = self.encode_images(images)
new_input_embeds = []
new_labels = [] if labels is not None else None
cur_image_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids):
| # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LlavaMetaModel:
def __init__(self, config):
super(LlavaMetaModel, self).__init__(config)
if hasattr(config, "mm_vision_tower"):
self.vision_tower = build_vision_tower(config, delay_load=True)
self.mm_projector = build_vision_projector(config)
def get_vision_tower(self):
vision_tower = getattr(self, 'vision_tower', None)
if type(vision_tower) is list:
vision_tower = vision_tower[0]
return vision_tower
def initialize_vision_modules(self, model_args, fsdp=None):
vision_tower = model_args.vision_tower
mm_vision_select_layer = model_args.mm_vision_select_layer
mm_vision_select_feature = model_args.mm_vision_select_feature
pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter
self.config.mm_vision_tower = vision_tower
if self.get_vision_tower() is None:
vision_tower = build_vision_tower(model_args)
if fsdp is not None and len(fsdp) > 0:
self.vision_tower = [vision_tower]
else:
self.vision_tower = vision_tower
else:
if fsdp is not None and len(fsdp) > 0:
vision_tower = self.vision_tower[0]
else:
vision_tower = self.vision_tower
vision_tower.load_model()
self.config.use_mm_proj = True
self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear')
self.config.mm_hidden_size = vision_tower.hidden_size
self.config.mm_vision_select_layer = mm_vision_select_layer
self.config.mm_vision_select_feature = mm_vision_select_feature
if getattr(self, 'mm_projector', None) is None:
self.mm_projector = build_vision_projector(self.config)
if pretrain_mm_mlp_adapter is not None:
mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')
def get_w(weights, keyword):
return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}
self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))
class LlavaMetaForCausalLM(ABC):
@abstractmethod
def get_model(self):
pass
def get_vision_tower(self):
return self.get_model().get_vision_tower()
def encode_images(self, images):
image_features = self.get_model().get_vision_tower()(images)
image_features = self.get_model().mm_projector(image_features)
return image_features
def prepare_inputs_labels_for_multimodal(
self, input_ids, attention_mask, past_key_values, labels, images
):
vision_tower = self.get_vision_tower()
if vision_tower is None or images is None or input_ids.shape[1] == 1:
if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:
attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)
return input_ids, attention_mask, past_key_values, None, labels
if type(images) is list or images.ndim == 5:
concat_images = torch.cat([image for image in images], dim=0)
image_features = self.encode_images(concat_images)
split_sizes = [image.shape[0] for image in images]
image_features = torch.split(image_features, split_sizes, dim=0)
image_features = [x.flatten(0, 1) for x in image_features]
else:
image_features = self.encode_images(images)
new_input_embeds = []
new_labels = [] if labels is not None else None
cur_image_idx = 0
for batch_idx, cur_input_ids in enumerate(input_ids): | if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0: | 3 | 2023-12-15 14:58:24+00:00 | 4k |
ValdonVitija/crap | crap/cli.py | [
{
"identifier": "CrapManager",
"path": "crap/crap_manager.py",
"snippet": "class CrapManager:\n __slots__ = (\"path_\", \"venv_checker\", \"package_usage_counter\", \"deleted_packages\")\n\n def __init__(self, path_: str):\n self.path_ = pathlib.Path(path_).absolute()\n self.venv_che... | from functools import lru_cache
from typing import List
from typing_extensions import Annotated, Optional
from crap.crap_manager import CrapManager
from crap.package_management import PackageManagement
import typer | 2,138 |
__all__: List[str] = ["get_app"]
app = typer.Typer(no_args_is_help=True)
@app.command()
def crap(
path_: Annotated[str, typer.Argument(help="path to file/files")] = ".",
important: Optional[str] = typer.Option(
None,
"--important",
"-i",
help="Add a package to the list of important packages",
),
remove: Optional[str] = typer.Option(
None,
"--remove",
"-r",
help="Remove a package from the list of important packages",
),
flush: bool = typer.Option(
False,
"--flush",
"-f",
help="Remove all packages from the list of important packages",
),
show: bool = typer.Option(
False,
"--show",
"-s",
help="Show all important packages",
),
factory_reset: bool = typer.Option(
False,
"--factory-reset",
"-fr",
help="Reset all settings to default",
),
):
if (
sum(
[
bool(opt)
for opt in [path_ != ".", important, remove, flush, show, factory_reset]
]
)
> 1
):
print("Error: Options cannot be used together.")
raise typer.Exit(code=1)
package_management = PackageManagement()
if important:
package_management.add_important_package(important)
elif remove:
package_management.remove_important_package(remove)
elif flush:
package_management.flush_important_packages()
elif show:
package_management.show_important_packages()
elif factory_reset:
package_management.factory_reset_important_packages()
else:
|
__all__: List[str] = ["get_app"]
app = typer.Typer(no_args_is_help=True)
@app.command()
def crap(
path_: Annotated[str, typer.Argument(help="path to file/files")] = ".",
important: Optional[str] = typer.Option(
None,
"--important",
"-i",
help="Add a package to the list of important packages",
),
remove: Optional[str] = typer.Option(
None,
"--remove",
"-r",
help="Remove a package from the list of important packages",
),
flush: bool = typer.Option(
False,
"--flush",
"-f",
help="Remove all packages from the list of important packages",
),
show: bool = typer.Option(
False,
"--show",
"-s",
help="Show all important packages",
),
factory_reset: bool = typer.Option(
False,
"--factory-reset",
"-fr",
help="Reset all settings to default",
),
):
if (
sum(
[
bool(opt)
for opt in [path_ != ".", important, remove, flush, show, factory_reset]
]
)
> 1
):
print("Error: Options cannot be used together.")
raise typer.Exit(code=1)
package_management = PackageManagement()
if important:
package_management.add_important_package(important)
elif remove:
package_management.remove_important_package(remove)
elif flush:
package_management.flush_important_packages()
elif show:
package_management.show_important_packages()
elif factory_reset:
package_management.factory_reset_important_packages()
else: | manager = CrapManager(path_=path_) | 0 | 2023-12-19 20:22:37+00:00 | 4k |
worm128/AI-YinMei | text-generation-webui/extensions/openai/script.py | [
{
"identifier": "ChatCompletionRequest",
"path": "text-generation-webui/extensions/openai/typing.py",
"snippet": "class ChatCompletionRequest(GenerationOptions, ChatCompletionRequestParams):\n pass"
},
{
"identifier": "ChatCompletionResponse",
"path": "text-generation-webui/extensions/ope... | import asyncio
import json
import os
import traceback
import speech_recognition as sr
import uvicorn
import extensions.openai.completions as OAIcompletions
import extensions.openai.embeddings as OAIembeddings
import extensions.openai.images as OAIimages
import extensions.openai.logits as OAIlogits
import extensions.openai.models as OAImodels
import extensions.openai.moderations as OAImoderations
from threading import Thread
from fastapi import Depends, FastAPI, Header, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.requests import Request
from fastapi.responses import JSONResponse
from pydub import AudioSegment
from sse_starlette import EventSourceResponse
from extensions.openai.errors import ServiceUnavailableError
from extensions.openai.tokens import token_count, token_decode, token_encode
from extensions.openai.utils import _start_cloudflared
from modules import shared
from modules.logging_colors import logger
from modules.models import unload_model
from modules.text_generation import stop_everything_event
from .typing import (
ChatCompletionRequest,
ChatCompletionResponse,
CompletionRequest,
CompletionResponse,
DecodeRequest,
DecodeResponse,
EmbeddingsRequest,
EmbeddingsResponse,
EncodeRequest,
EncodeResponse,
LoadLorasRequest,
LoadModelRequest,
LogitsRequest,
LogitsResponse,
LoraListResponse,
ModelInfoResponse,
ModelListResponse,
TokenCountResponse,
to_dict
) | 2,821 |
@app.get('/v1/billing/usage', dependencies=check_key)
def handle_billing_usage():
'''
Ex. /v1/dashboard/billing/usage?start_date=2023-05-01&end_date=2023-05-31
'''
return JSONResponse(content={"total_usage": 0})
@app.post('/v1/audio/transcriptions', dependencies=check_key)
async def handle_audio_transcription(request: Request):
r = sr.Recognizer()
form = await request.form()
audio_file = await form["file"].read()
audio_data = AudioSegment.from_file(audio_file)
# Convert AudioSegment to raw data
raw_data = audio_data.raw_data
# Create AudioData object
audio_data = sr.AudioData(raw_data, audio_data.frame_rate, audio_data.sample_width)
whipser_language = form.getvalue('language', None)
whipser_model = form.getvalue('model', 'tiny') # Use the model from the form data if it exists, otherwise default to tiny
transcription = {"text": ""}
try:
transcription["text"] = r.recognize_whisper(audio_data, language=whipser_language, model=whipser_model)
except sr.UnknownValueError:
print("Whisper could not understand audio")
transcription["text"] = "Whisper could not understand audio UnknownValueError"
except sr.RequestError as e:
print("Could not request results from Whisper", e)
transcription["text"] = "Whisper could not understand audio RequestError"
return JSONResponse(content=transcription)
@app.post('/v1/images/generations', dependencies=check_key)
async def handle_image_generation(request: Request):
if not os.environ.get('SD_WEBUI_URL', params.get('sd_webui_url', '')):
raise ServiceUnavailableError("Stable Diffusion not available. SD_WEBUI_URL not set.")
body = await request.json()
prompt = body['prompt']
size = body.get('size', '1024x1024')
response_format = body.get('response_format', 'url') # or b64_json
n = body.get('n', 1) # ignore the batch limits of max 10
response = await OAIimages.generations(prompt=prompt, size=size, response_format=response_format, n=n)
return JSONResponse(response)
@app.post("/v1/embeddings", response_model=EmbeddingsResponse, dependencies=check_key)
async def handle_embeddings(request: Request, request_data: EmbeddingsRequest):
input = request_data.input
if not input:
raise HTTPException(status_code=400, detail="Missing required argument input")
if type(input) is str:
input = [input]
response = OAIembeddings.embeddings(input, request_data.encoding_format)
return JSONResponse(response)
@app.post("/v1/moderations", dependencies=check_key)
async def handle_moderations(request: Request):
body = await request.json()
input = body["input"]
if not input:
raise HTTPException(status_code=400, detail="Missing required argument input")
response = OAImoderations.moderations(input)
return JSONResponse(response)
@app.post("/v1/internal/encode", response_model=EncodeResponse, dependencies=check_key)
async def handle_token_encode(request_data: EncodeRequest):
response = token_encode(request_data.text)
return JSONResponse(response)
@app.post("/v1/internal/decode", response_model=DecodeResponse, dependencies=check_key)
async def handle_token_decode(request_data: DecodeRequest):
response = token_decode(request_data.tokens)
return JSONResponse(response)
@app.post("/v1/internal/token-count", response_model=TokenCountResponse, dependencies=check_key)
async def handle_token_count(request_data: EncodeRequest):
response = token_count(request_data.text)
return JSONResponse(response)
@app.post("/v1/internal/logits", response_model=LogitsResponse, dependencies=check_key)
async def handle_logits(request_data: LogitsRequest):
'''
Given a prompt, returns the top 50 most likely logits as a dict.
The keys are the tokens, and the values are the probabilities.
'''
response = OAIlogits._get_next_logits(to_dict(request_data))
return JSONResponse(response)
@app.post("/v1/internal/stop-generation", dependencies=check_key)
async def handle_stop_generation(request: Request):
stop_everything_event()
return JSONResponse(content="OK")
@app.get("/v1/internal/model/info", response_model=ModelInfoResponse, dependencies=check_key)
async def handle_model_info():
payload = OAImodels.get_current_model_info()
return JSONResponse(content=payload)
|
params = {
'embedding_device': 'cpu',
'embedding_model': 'sentence-transformers/all-mpnet-base-v2',
'sd_webui_url': '',
'debug': 0
}
streaming_semaphore = asyncio.Semaphore(1)
def verify_api_key(authorization: str = Header(None)) -> None:
expected_api_key = shared.args.api_key
if expected_api_key and (authorization is None or authorization != f"Bearer {expected_api_key}"):
raise HTTPException(status_code=401, detail="Unauthorized")
def verify_admin_key(authorization: str = Header(None)) -> None:
expected_api_key = shared.args.admin_key
if expected_api_key and (authorization is None or authorization != f"Bearer {expected_api_key}"):
raise HTTPException(status_code=401, detail="Unauthorized")
app = FastAPI()
check_key = [Depends(verify_api_key)]
check_admin_key = [Depends(verify_admin_key)]
# Configure CORS settings to allow all origins, methods, and headers
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
@app.options("/", dependencies=check_key)
async def options_route():
return JSONResponse(content="OK")
@app.post('/v1/completions', response_model=CompletionResponse, dependencies=check_key)
async def openai_completions(request: Request, request_data: CompletionRequest):
path = request.url.path
is_legacy = "/generate" in path
if request_data.stream:
async def generator():
async with streaming_semaphore:
response = OAIcompletions.stream_completions(to_dict(request_data), is_legacy=is_legacy)
for resp in response:
disconnected = await request.is_disconnected()
if disconnected:
break
yield {"data": json.dumps(resp)}
return EventSourceResponse(generator()) # SSE streaming
else:
response = OAIcompletions.completions(to_dict(request_data), is_legacy=is_legacy)
return JSONResponse(response)
@app.post('/v1/chat/completions', response_model=ChatCompletionResponse, dependencies=check_key)
async def openai_chat_completions(request: Request, request_data: ChatCompletionRequest):
path = request.url.path
is_legacy = "/generate" in path
if request_data.stream:
async def generator():
async with streaming_semaphore:
response = OAIcompletions.stream_chat_completions(to_dict(request_data), is_legacy=is_legacy)
for resp in response:
disconnected = await request.is_disconnected()
if disconnected:
break
yield {"data": json.dumps(resp)}
return EventSourceResponse(generator()) # SSE streaming
else:
response = OAIcompletions.chat_completions(to_dict(request_data), is_legacy=is_legacy)
return JSONResponse(response)
@app.get("/v1/models", dependencies=check_key)
@app.get("/v1/models/{model}", dependencies=check_key)
async def handle_models(request: Request):
path = request.url.path
is_list = request.url.path.split('?')[0].split('#')[0] == '/v1/models'
if is_list:
response = OAImodels.list_dummy_models()
else:
model_name = path[len('/v1/models/'):]
response = OAImodels.model_info_dict(model_name)
return JSONResponse(response)
@app.get('/v1/billing/usage', dependencies=check_key)
def handle_billing_usage():
'''
Ex. /v1/dashboard/billing/usage?start_date=2023-05-01&end_date=2023-05-31
'''
return JSONResponse(content={"total_usage": 0})
@app.post('/v1/audio/transcriptions', dependencies=check_key)
async def handle_audio_transcription(request: Request):
r = sr.Recognizer()
form = await request.form()
audio_file = await form["file"].read()
audio_data = AudioSegment.from_file(audio_file)
# Convert AudioSegment to raw data
raw_data = audio_data.raw_data
# Create AudioData object
audio_data = sr.AudioData(raw_data, audio_data.frame_rate, audio_data.sample_width)
whipser_language = form.getvalue('language', None)
whipser_model = form.getvalue('model', 'tiny') # Use the model from the form data if it exists, otherwise default to tiny
transcription = {"text": ""}
try:
transcription["text"] = r.recognize_whisper(audio_data, language=whipser_language, model=whipser_model)
except sr.UnknownValueError:
print("Whisper could not understand audio")
transcription["text"] = "Whisper could not understand audio UnknownValueError"
except sr.RequestError as e:
print("Could not request results from Whisper", e)
transcription["text"] = "Whisper could not understand audio RequestError"
return JSONResponse(content=transcription)
@app.post('/v1/images/generations', dependencies=check_key)
async def handle_image_generation(request: Request):
if not os.environ.get('SD_WEBUI_URL', params.get('sd_webui_url', '')):
raise ServiceUnavailableError("Stable Diffusion not available. SD_WEBUI_URL not set.")
body = await request.json()
prompt = body['prompt']
size = body.get('size', '1024x1024')
response_format = body.get('response_format', 'url') # or b64_json
n = body.get('n', 1) # ignore the batch limits of max 10
response = await OAIimages.generations(prompt=prompt, size=size, response_format=response_format, n=n)
return JSONResponse(response)
@app.post("/v1/embeddings", response_model=EmbeddingsResponse, dependencies=check_key)
async def handle_embeddings(request: Request, request_data: EmbeddingsRequest):
input = request_data.input
if not input:
raise HTTPException(status_code=400, detail="Missing required argument input")
if type(input) is str:
input = [input]
response = OAIembeddings.embeddings(input, request_data.encoding_format)
return JSONResponse(response)
@app.post("/v1/moderations", dependencies=check_key)
async def handle_moderations(request: Request):
body = await request.json()
input = body["input"]
if not input:
raise HTTPException(status_code=400, detail="Missing required argument input")
response = OAImoderations.moderations(input)
return JSONResponse(response)
@app.post("/v1/internal/encode", response_model=EncodeResponse, dependencies=check_key)
async def handle_token_encode(request_data: EncodeRequest):
response = token_encode(request_data.text)
return JSONResponse(response)
@app.post("/v1/internal/decode", response_model=DecodeResponse, dependencies=check_key)
async def handle_token_decode(request_data: DecodeRequest):
response = token_decode(request_data.tokens)
return JSONResponse(response)
@app.post("/v1/internal/token-count", response_model=TokenCountResponse, dependencies=check_key)
async def handle_token_count(request_data: EncodeRequest):
response = token_count(request_data.text)
return JSONResponse(response)
@app.post("/v1/internal/logits", response_model=LogitsResponse, dependencies=check_key)
async def handle_logits(request_data: LogitsRequest):
'''
Given a prompt, returns the top 50 most likely logits as a dict.
The keys are the tokens, and the values are the probabilities.
'''
response = OAIlogits._get_next_logits(to_dict(request_data))
return JSONResponse(response)
@app.post("/v1/internal/stop-generation", dependencies=check_key)
async def handle_stop_generation(request: Request):
stop_everything_event()
return JSONResponse(content="OK")
@app.get("/v1/internal/model/info", response_model=ModelInfoResponse, dependencies=check_key)
async def handle_model_info():
payload = OAImodels.get_current_model_info()
return JSONResponse(content=payload)
| @app.get("/v1/internal/model/list", response_model=ModelListResponse, dependencies=check_admin_key) | 16 | 2023-12-20 14:13:38+00:00 | 4k |
foocker/Bert-VITS2-Faster | modules.py | [
{
"identifier": "init_weights",
"path": "commons.py",
"snippet": "def init_weights(m, mean=0.0, std=0.01):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n m.weight.data.normal_(mean, std)"
},
{
"identifier": "get_padding",
"path": "commons.py",
"sni... | import math
import torch
import commons
from torch import nn
from torch.nn import functional as F
from torch.nn import Conv1d
from torch.nn.utils import weight_norm, remove_weight_norm
from commons import init_weights, get_padding
from transforms import piecewise_rational_quadratic_transform
from attentions import Encoder | 2,653 | )
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
self.norms_1.append(LayerNorm(channels))
self.norms_2.append(LayerNorm(channels))
def forward(self, x, x_mask, g=None):
if g is not None:
x = x + g
for i in range(self.n_layers):
y = self.convs_sep[i](x * x_mask)
y = self.norms_1[i](y)
y = F.gelu(y)
y = self.convs_1x1[i](y)
y = self.norms_2[i](y)
y = F.gelu(y)
y = self.drop(y)
x = x + y
return x * x_mask
class WN(torch.nn.Module):
def __init__(
self,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0,
p_dropout=0,
):
super(WN, self).__init__()
assert kernel_size % 2 == 1
self.hidden_channels = hidden_channels
self.kernel_size = (kernel_size,)
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(
gin_channels, 2 * hidden_channels * n_layers, 1
)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
for i in range(n_layers):
dilation = dilation_rate**i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(
hidden_channels,
2 * hidden_channels,
kernel_size,
dilation=dilation,
padding=padding,
)
in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
else:
g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:, : self.hidden_channels, :]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:, self.hidden_channels :, :]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class ResBlock1(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.convs1 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[0],
|
LRELU_SLOPE = 0.1
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
class ConvReluNorm(nn.Module):
def __init__(
self,
in_channels,
hidden_channels,
out_channels,
kernel_size,
n_layers,
p_dropout,
):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
assert n_layers > 1, "Number of layers should be larger than 0."
self.conv_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
self.conv_layers.append(
nn.Conv1d(
in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
)
)
self.norm_layers.append(LayerNorm(hidden_channels))
self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
for _ in range(n_layers - 1):
self.conv_layers.append(
nn.Conv1d(
hidden_channels,
hidden_channels,
kernel_size,
padding=kernel_size // 2,
)
)
self.norm_layers.append(LayerNorm(hidden_channels))
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask):
x_org = x
for i in range(self.n_layers):
x = self.conv_layers[i](x * x_mask)
x = self.norm_layers[i](x)
x = self.relu_drop(x)
x = x_org + self.proj(x)
return x * x_mask
class DDSConv(nn.Module):
"""
Dialted and Depth-Separable Convolution
"""
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
super().__init__()
self.channels = channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
self.drop = nn.Dropout(p_dropout)
self.convs_sep = nn.ModuleList()
self.convs_1x1 = nn.ModuleList()
self.norms_1 = nn.ModuleList()
self.norms_2 = nn.ModuleList()
for i in range(n_layers):
dilation = kernel_size**i
padding = (kernel_size * dilation - dilation) // 2
self.convs_sep.append(
nn.Conv1d(
channels,
channels,
kernel_size,
groups=channels,
dilation=dilation,
padding=padding,
)
)
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
self.norms_1.append(LayerNorm(channels))
self.norms_2.append(LayerNorm(channels))
def forward(self, x, x_mask, g=None):
if g is not None:
x = x + g
for i in range(self.n_layers):
y = self.convs_sep[i](x * x_mask)
y = self.norms_1[i](y)
y = F.gelu(y)
y = self.convs_1x1[i](y)
y = self.norms_2[i](y)
y = F.gelu(y)
y = self.drop(y)
x = x + y
return x * x_mask
class WN(torch.nn.Module):
def __init__(
self,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0,
p_dropout=0,
):
super(WN, self).__init__()
assert kernel_size % 2 == 1
self.hidden_channels = hidden_channels
self.kernel_size = (kernel_size,)
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(
gin_channels, 2 * hidden_channels * n_layers, 1
)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
for i in range(n_layers):
dilation = dilation_rate**i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(
hidden_channels,
2 * hidden_channels,
kernel_size,
dilation=dilation,
padding=padding,
)
in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
else:
g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:, : self.hidden_channels, :]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:, self.hidden_channels :, :]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class ResBlock1(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.convs1 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[0], | padding=get_padding(kernel_size, dilation[0]), | 1 | 2023-12-18 09:53:41+00:00 | 4k |
sinoyou/nelf-pro | nerfstudio/configs/base_config.py | [
{
"identifier": "to_immutable_dict",
"path": "nerfstudio/configs/config_utils.py",
"snippet": "def to_immutable_dict(d: Dict[str, Any]):\n \"\"\"Method to convert mutable dict to default factory dict\n\n Args:\n d: dictionary to convert into default factory dict for dataclass\n \"\"\"\n ... | import warnings
import yaml
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Type
from rich.console import Console
from typing_extensions import Literal
from nerfstudio.configs.config_utils import to_immutable_dict
from nerfstudio.utils import writer
from nerfstudio.engine.optimizers import OptimizerConfig
from nerfstudio.engine.schedulers import SchedulerConfig
from nerfstudio.pipelines.base_pipeline import VanillaPipelineConfig | 3,250 | """maximum number of rows to print before wrapping. if 0, will print everything."""
def setup(self, banner_messages: Optional[List[str]] = None, **kwargs) -> Any:
"""Instantiate local writer
Args:
banner_messages: List of strings that always print at the bottom of screen.
"""
return self._target(self, banner_messages=banner_messages, **kwargs)
@dataclass
class LoggingConfig(PrintableConfig):
"""Configuration of loggers and profilers"""
relative_log_dir: Path = Path("./")
"""relative path to save all logged events"""
steps_per_log: int = 10
"""number of steps between logging stats"""
max_buffer_size: int = 20
"""maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurances."""
local_writer: LocalWriterConfig = LocalWriterConfig(enable=True)
"""if provided, will print stats locally. if None, will disable printing"""
enable_profiler: bool = True
"""whether to enable profiling code; prints speed of functions at the end of a program.
profiler logs run times of functions and prints at end of training"""
# Trainer related configs
@dataclass
class TrainerConfig(PrintableConfig):
"""Configuration for training regimen"""
steps_per_save: int = 1000
"""Number of steps between saves."""
steps_per_eval_batch: int = 500
"""Number of steps between randomly sampled batches of rays."""
steps_per_eval_image: int = 500
"""Number of steps between single eval images."""
steps_per_eval_all_images: int = 25000
"""Number of steps between eval all images."""
max_num_iterations: int = 1000000
"""Maximum number of iterations to run."""
mixed_precision: bool = False
"""Whether or not to use mixed precision for training."""
relative_model_dir: Path = Path("models/")
"""Relative path to save all checkpoints."""
save_only_latest_checkpoint: bool = True
"""Whether to only save the latest checkpoint or all checkpoints."""
# optional parameters if we want to resume training
load_dir: Optional[Path] = None
"""Optionally specify a pre-trained model directory to load from."""
load_step: Optional[int] = None
"""Optionally specify model step to load from; if none, will find most recent model in load_dir."""
load_config: Optional[Path] = None
"""Optionally specify model config to load from; if none, will use the default config?"""
load_scheduler: bool = True
"""Whether to load the lr scheduler state_dict if exists"""
visualize_scene: bool = False
"""Whether to visualize the scene by plotly on the wandb."""
visualize_seperate_eval_images: bool = False
"""Whether to visualize the eval images seperately. (cloud storage is huge)"""
# Viewer related configs
@dataclass
class ViewerConfig(PrintableConfig):
"""Configuration for viewer instantiation"""
relative_log_filename: str = "viewer_log_filename.txt"
"""Filename to use for the log file."""
start_train: bool = False
"""whether to immediately start training upon loading viewer
if False, will just visualize dataset but you can toggle training in viewer"""
zmq_port: Optional[int] = None
"""The zmq port to connect to for communication. If None, find an available port."""
launch_bridge_server: bool = True
"""whether or not to launch the bridge server"""
websocket_port: Optional[int] = 7007
"""the default websocket port to connect to"""
ip_address: str = "127.0.0.1"
"""the ip address where the bridge server is running"""
num_rays_per_chunk: int = 32768
"""number of rays per chunk to render with viewer"""
max_num_display_images: int = 512
"""Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training/evaluation. If -1, display all."""
quit_on_train_completion: bool = False
"""Whether to kill the training job when it has completed. Note this will stop rendering in the viewer."""
@dataclass
class Config(PrintableConfig):
"""Full config contents"""
output_dir: Path = Path("outputs")
"""relative or absolute output directory to save all checkpoints and logging"""
method_name: Optional[str] = None
"""Method name. Required to set in python or via cli"""
experiment_name: Optional[str] = None
"""Experiment name. If None, will automatically be set to dataset name"""
timestamp: str = "{timestamp}"
"""Experiment timestamp."""
machine: MachineConfig = MachineConfig()
"""Machine configuration"""
logging: LoggingConfig = LoggingConfig()
"""Logging configuration"""
viewer: ViewerConfig = ViewerConfig()
"""Viewer configuration"""
trainer: TrainerConfig = TrainerConfig()
"""Trainer configuration"""
pipeline: VanillaPipelineConfig = VanillaPipelineConfig()
"""Pipeline configuration"""
optimizers: Dict[str, Any] = to_immutable_dict(
{
"fields": {
"optimizer": OptimizerConfig(),
| # Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Configs"""
# pylint: disable=wrong-import-position
from __future__ import annotations
# model instances
warnings.filterwarnings("ignore", module="torchvision")
CONSOLE = Console(width=120)
# Pretty printing class
class PrintableConfig: # pylint: disable=too-few-public-methods
"""Printable Config defining str function"""
def __str__(self):
lines = [self.__class__.__name__ + ":"]
for key, val in vars(self).items():
if isinstance(val, Tuple):
flattened_val = "["
for item in val:
flattened_val += str(item) + "\n"
flattened_val = flattened_val.rstrip("\n")
val = flattened_val + "]"
lines += f"{key}: {str(val)}".split("\n")
return "\n ".join(lines)
# Base instantiate configs
@dataclass
class InstantiateConfig(PrintableConfig): # pylint: disable=too-few-public-methods
"""Config class for instantiating an the class specified in the _target attribute."""
_target: Type
def setup(self, **kwargs) -> Any:
"""Returns the instantiated object using the config."""
return self._target(self, **kwargs)
# Machine related configs
@dataclass
class MachineConfig(PrintableConfig):
"""Configuration of machine setup"""
seed: int = 42
"""random seed initilization"""
@dataclass
class LocalWriterConfig(InstantiateConfig):
"""Local Writer config"""
_target: Type = writer.LocalWriter
"""target class to instantiate"""
enable: bool = False
"""if True enables local logging, else disables"""
stats_to_track: Tuple[writer.EventName, ...] = (
writer.EventName.ITER_TRAIN_TIME,
writer.EventName.TRAIN_RAYS_PER_SEC,
writer.EventName.CURR_TEST_PSNR,
writer.EventName.VIS_RAYS_PER_SEC,
writer.EventName.TEST_RAYS_PER_SEC,
)
"""specifies which stats will be logged/printed to terminal"""
max_log_size: int = 10
"""maximum number of rows to print before wrapping. if 0, will print everything."""
def setup(self, banner_messages: Optional[List[str]] = None, **kwargs) -> Any:
"""Instantiate local writer
Args:
banner_messages: List of strings that always print at the bottom of screen.
"""
return self._target(self, banner_messages=banner_messages, **kwargs)
@dataclass
class LoggingConfig(PrintableConfig):
"""Configuration of loggers and profilers"""
relative_log_dir: Path = Path("./")
"""relative path to save all logged events"""
steps_per_log: int = 10
"""number of steps between logging stats"""
max_buffer_size: int = 20
"""maximum history size to keep for computing running averages of stats.
e.g. if 20, averages will be computed over past 20 occurances."""
local_writer: LocalWriterConfig = LocalWriterConfig(enable=True)
"""if provided, will print stats locally. if None, will disable printing"""
enable_profiler: bool = True
"""whether to enable profiling code; prints speed of functions at the end of a program.
profiler logs run times of functions and prints at end of training"""
# Trainer related configs
@dataclass
class TrainerConfig(PrintableConfig):
"""Configuration for training regimen"""
steps_per_save: int = 1000
"""Number of steps between saves."""
steps_per_eval_batch: int = 500
"""Number of steps between randomly sampled batches of rays."""
steps_per_eval_image: int = 500
"""Number of steps between single eval images."""
steps_per_eval_all_images: int = 25000
"""Number of steps between eval all images."""
max_num_iterations: int = 1000000
"""Maximum number of iterations to run."""
mixed_precision: bool = False
"""Whether or not to use mixed precision for training."""
relative_model_dir: Path = Path("models/")
"""Relative path to save all checkpoints."""
save_only_latest_checkpoint: bool = True
"""Whether to only save the latest checkpoint or all checkpoints."""
# optional parameters if we want to resume training
load_dir: Optional[Path] = None
"""Optionally specify a pre-trained model directory to load from."""
load_step: Optional[int] = None
"""Optionally specify model step to load from; if none, will find most recent model in load_dir."""
load_config: Optional[Path] = None
"""Optionally specify model config to load from; if none, will use the default config?"""
load_scheduler: bool = True
"""Whether to load the lr scheduler state_dict if exists"""
visualize_scene: bool = False
"""Whether to visualize the scene by plotly on the wandb."""
visualize_seperate_eval_images: bool = False
"""Whether to visualize the eval images seperately. (cloud storage is huge)"""
# Viewer related configs
@dataclass
class ViewerConfig(PrintableConfig):
"""Configuration for viewer instantiation"""
relative_log_filename: str = "viewer_log_filename.txt"
"""Filename to use for the log file."""
start_train: bool = False
"""whether to immediately start training upon loading viewer
if False, will just visualize dataset but you can toggle training in viewer"""
zmq_port: Optional[int] = None
"""The zmq port to connect to for communication. If None, find an available port."""
launch_bridge_server: bool = True
"""whether or not to launch the bridge server"""
websocket_port: Optional[int] = 7007
"""the default websocket port to connect to"""
ip_address: str = "127.0.0.1"
"""the ip address where the bridge server is running"""
num_rays_per_chunk: int = 32768
"""number of rays per chunk to render with viewer"""
max_num_display_images: int = 512
"""Maximum number of training images to display in the viewer, to avoid lag. This does not change which images are
actually used in training/evaluation. If -1, display all."""
quit_on_train_completion: bool = False
"""Whether to kill the training job when it has completed. Note this will stop rendering in the viewer."""
@dataclass
class Config(PrintableConfig):
"""Full config contents"""
output_dir: Path = Path("outputs")
"""relative or absolute output directory to save all checkpoints and logging"""
method_name: Optional[str] = None
"""Method name. Required to set in python or via cli"""
experiment_name: Optional[str] = None
"""Experiment name. If None, will automatically be set to dataset name"""
timestamp: str = "{timestamp}"
"""Experiment timestamp."""
machine: MachineConfig = MachineConfig()
"""Machine configuration"""
logging: LoggingConfig = LoggingConfig()
"""Logging configuration"""
viewer: ViewerConfig = ViewerConfig()
"""Viewer configuration"""
trainer: TrainerConfig = TrainerConfig()
"""Trainer configuration"""
pipeline: VanillaPipelineConfig = VanillaPipelineConfig()
"""Pipeline configuration"""
optimizers: Dict[str, Any] = to_immutable_dict(
{
"fields": {
"optimizer": OptimizerConfig(), | "scheduler": SchedulerConfig(), | 3 | 2023-12-15 20:07:22+00:00 | 4k |
wuc9521/rep-flow | app.py | [
{
"identifier": "read_keywords_from_file",
"path": "utils/loader.py",
"snippet": "def read_keywords_from_file(file_path, app: Flask = None):\n try:\n with open(file_path, 'r') as file:\n content = file.read()\n keywords_list = [keyword.strip() for keyword in re.split(',|\... | import os
import spacy
import logging
import pandas as pd
from logging.handlers import RotatingFileHandler
from flask import Flask, render_template, request, jsonify, send_from_directory
from flask_cors import cross_origin
from utils.loader import read_keywords_from_file
from utils.hints import HELP, get_NUMBER_EMBD_HINT, get_CURRENT_STATE_HINT, get_NEXT_STEP_HINT
from utils.test import extract_and_validate_test_number
from utils.log import log_
from utils.file import get_i
from model.common import imgs
from model.process import image_process | 2,125 |
DEFAULT_RESPONSE_FLAG = "*"
NUMBER_EMBD_HINT = None
CURRENT_BUG_ID = -1
# Load spaCy English model
nlp = spacy.load("en_core_web_sm")
app = Flask(__name__, template_folder='')
# Configure
LOG_DIR = os.path.join(app.root_path, 'log')
DATA_DIR = os.path.join(app.root_path, 'data')
MODEL_DIR = os.path.join(app.root_path, 'model')
CORPUS_DIR = os.path.join(DATA_DIR, 'corpus')
GUIDANCE_DIR = os.path.join(DATA_DIR, 'guidance')
STATE_DIR = os.path.join(DATA_DIR, 'state')
std = pd.read_csv(os.path.join(CORPUS_DIR, 'std.csv'))
df = pd.merge(
pd.read_csv(os.path.join(CORPUS_DIR, 'qa.csv')),
std,
on='ID',
how='left'
)
qa = dict(zip(df['Q'], df['A']))
at = dict(zip(std['A'], std['TYPE']))
ta = dict(zip(std['TYPE'], std['A']))
key_words = read_keywords_from_file(
os.path.join(CORPUS_DIR, 'kw.txt'), app=app)
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
log_file_path = os.path.join(LOG_DIR, f"app.log")
formatter = logging.Formatter(
"[%(asctime)s] [%(levelname)s] [%(module)s] - %(message)s")
handler = RotatingFileHandler(log_file_path, maxBytes=10000, backupCount=1)
handler.setFormatter(formatter)
app.logger.addHandler(handler)
app.logger.setLevel(logging.INFO)
@app.route('/')
def home():
return render_template('index.html'), 200
@app.route('/states/<filename>')
def serve_image(filename):
return send_from_directory(STATE_DIR, filename), 200
@app.route('/guidance/<filename>')
def serve_guidance(filename):
return send_from_directory(os.path.join(GUIDANCE_DIR, CURRENT_BUG_ID), filename), 200
@app.route('/ask', methods=['POST'])
@cross_origin(supports_credentials=True)
def ask():
try:
data = request.get_json()
query_text = data['query']
rgx_num = extract_and_validate_test_number(query_text, app)
if rgx_num is not None and rgx_num != "": # "/test $BUG"
global NUMBER_EMBD_HINT
NUMBER_EMBD_HINT = get_NUMBER_EMBD_HINT(rgx_num)
global CURRENT_BUG_ID
CURRENT_BUG_ID = rgx_num
return jsonify({
"type": "TEST",
"answer": ta.get("TEST"),
"img": None,
"hint": NUMBER_EMBD_HINT
}), 200
response = qa.get(DEFAULT_RESPONSE_FLAG)
doc = nlp(query_text)
nouns = [token.text for token in doc if token.pos_ == "NOUN"]
question = DEFAULT_RESPONSE_FLAG
for question_, answer in qa.items():
if doc.similarity(nlp(question_)) > doc.similarity(nlp(question)):
response = answer
question = question_
if response == qa.get(DEFAULT_RESPONSE_FLAG) or doc.similarity(nlp(question)) < 0.7:
app.logger.warning(
f"User query: \"{query_text}\" - No answer found")
if set(key_words).intersection(set(nouns)):
return jsonify({
"type": "SORRY",
"answer": ta.get("SORRY")
}), 200
else:
return jsonify({
"type": at.get(qa.get(DEFAULT_RESPONSE_FLAG)),
"answer": qa.get(DEFAULT_RESPONSE_FLAG)
}), 200
app.logger.info(f"User query: \"{query_text}\" - Answer: {response}")
app.logger.info("Current State: {}".format(monitor_current_state()))
if at.get(response) == "HELP":
return jsonify({
"type": at.get(response),
"answer": response,
"img": monitor_current_state(),
|
DEFAULT_RESPONSE_FLAG = "*"
NUMBER_EMBD_HINT = None
CURRENT_BUG_ID = -1
# Load spaCy English model
nlp = spacy.load("en_core_web_sm")
app = Flask(__name__, template_folder='')
# Configure
LOG_DIR = os.path.join(app.root_path, 'log')
DATA_DIR = os.path.join(app.root_path, 'data')
MODEL_DIR = os.path.join(app.root_path, 'model')
CORPUS_DIR = os.path.join(DATA_DIR, 'corpus')
GUIDANCE_DIR = os.path.join(DATA_DIR, 'guidance')
STATE_DIR = os.path.join(DATA_DIR, 'state')
std = pd.read_csv(os.path.join(CORPUS_DIR, 'std.csv'))
df = pd.merge(
pd.read_csv(os.path.join(CORPUS_DIR, 'qa.csv')),
std,
on='ID',
how='left'
)
qa = dict(zip(df['Q'], df['A']))
at = dict(zip(std['A'], std['TYPE']))
ta = dict(zip(std['TYPE'], std['A']))
key_words = read_keywords_from_file(
os.path.join(CORPUS_DIR, 'kw.txt'), app=app)
if not os.path.exists(LOG_DIR):
os.makedirs(LOG_DIR)
log_file_path = os.path.join(LOG_DIR, f"app.log")
formatter = logging.Formatter(
"[%(asctime)s] [%(levelname)s] [%(module)s] - %(message)s")
handler = RotatingFileHandler(log_file_path, maxBytes=10000, backupCount=1)
handler.setFormatter(formatter)
app.logger.addHandler(handler)
app.logger.setLevel(logging.INFO)
@app.route('/')
def home():
return render_template('index.html'), 200
@app.route('/states/<filename>')
def serve_image(filename):
return send_from_directory(STATE_DIR, filename), 200
@app.route('/guidance/<filename>')
def serve_guidance(filename):
return send_from_directory(os.path.join(GUIDANCE_DIR, CURRENT_BUG_ID), filename), 200
@app.route('/ask', methods=['POST'])
@cross_origin(supports_credentials=True)
def ask():
try:
data = request.get_json()
query_text = data['query']
rgx_num = extract_and_validate_test_number(query_text, app)
if rgx_num is not None and rgx_num != "": # "/test $BUG"
global NUMBER_EMBD_HINT
NUMBER_EMBD_HINT = get_NUMBER_EMBD_HINT(rgx_num)
global CURRENT_BUG_ID
CURRENT_BUG_ID = rgx_num
return jsonify({
"type": "TEST",
"answer": ta.get("TEST"),
"img": None,
"hint": NUMBER_EMBD_HINT
}), 200
response = qa.get(DEFAULT_RESPONSE_FLAG)
doc = nlp(query_text)
nouns = [token.text for token in doc if token.pos_ == "NOUN"]
question = DEFAULT_RESPONSE_FLAG
for question_, answer in qa.items():
if doc.similarity(nlp(question_)) > doc.similarity(nlp(question)):
response = answer
question = question_
if response == qa.get(DEFAULT_RESPONSE_FLAG) or doc.similarity(nlp(question)) < 0.7:
app.logger.warning(
f"User query: \"{query_text}\" - No answer found")
if set(key_words).intersection(set(nouns)):
return jsonify({
"type": "SORRY",
"answer": ta.get("SORRY")
}), 200
else:
return jsonify({
"type": at.get(qa.get(DEFAULT_RESPONSE_FLAG)),
"answer": qa.get(DEFAULT_RESPONSE_FLAG)
}), 200
app.logger.info(f"User query: \"{query_text}\" - Answer: {response}")
app.logger.info("Current State: {}".format(monitor_current_state()))
if at.get(response) == "HELP":
return jsonify({
"type": at.get(response),
"answer": response,
"img": monitor_current_state(), | "hint": HELP | 1 | 2023-12-20 09:44:09+00:00 | 4k |
yash-srivastava19/verizon | class_utils.py | [
{
"identifier": "VerizonIndex",
"path": "classes.py",
"snippet": "class VerizonIndex:\n version = None \n entries = []\n\n def __init__(self, version = 2, entries=None) -> None:\n if not entries:\n entries = list()\n \n self.version = version \n self.entri... | from imports import *
from classes import VerizonIndex, VerizonIndexEntry, VerizonCommit, VerizonBlob, VerizonIgnore, VerizonTag, VerizonTree, VerizonTreeLeaf
from utils import repo_file, repo_dir
from other_utils import ref_resolve | 3,283 | obj.items.sort(key=tree_leaf_sort_key)
ret = b''
for i in obj.items:
ret += i.mode
ret += b''
ret += i.path.encode('utf8')
ret += b'\x00'
sha = int(i.sha, 16)
ret += sha.to_bytes(20, byteorder="big")
return ret
def object_read(repo, sha):
path = repo_file(repo, "objects", sha[0:2], sha[2:])
if not os.path.isfile(path):
return None
with open(path, "rb") as f:
raw = zlib.decompress(f.read())
# Read the object type
x = raw.find(b'')
fmt = raw[0:x]
# Read and Validate the object size
y = raw.find(b'\x00', x)
size = int(raw[x:y].decode('ascii'))
if size != len(raw)-y-1:
raise Exception(f"Malformed object {sha}: bad length")
match fmt:
case b'commit' : c=VerizonCommit
case b'tree' : c=VerizonTree
case b'tag' : c=VerizonTag
case b'blob' : c=VerizonBlob
case _ : raise Exception(f"Unknown type {fmt.decode('ascii')} for object {sha}")
# Call constructor and return object.
return c(raw[y+1])
def object_write(obj, repo=None):
data = obj.serialize()
result = obj.fmt + b' ' + str(len(data)).encode() + b'\x00' + data
sha = hashlib.sha1(result).hexdigest()
if repo:
path = repo_file(repo, "objects", sha[0:2], sha[2:], mkdir=True)
if not os.path.exists(path):
with open(path, "wb") as f:
f.write(zlib.compress(result))
return sha
def object_find(repo, name, fmt=None, follow=True):
sha = object_resolve(repo, name)
if not sha:
raise Exception(f"No such reference : {name}")
if len(sha) > 1:
raise Exception("Ambigious Reference - {0}. Candidates are :\n - {1}".format(name, '\n - '.join(sha)))
sha = sha[0]
if not fmt:
return sha
while True:
obj = object_read(repo, sha)
if obj.fmt == fmt :
return sha
if not follow:
return None
# Follow tags
if obj.fmt == b'tag':
sha = obj.kvlm[b'object'].decode('ascii')
elif obj.fmt == b'commit':
sha = obj.kvlm[b'tree'].decode('ascii')
else:
return None
def object_hash(fd, fmt, repo=None):
data = fd.read()
match fmt:
case b'commit': obj=VerizonCommit(data)
case b'tree' : obj=VerizonTree(data)
case b'tag' : obj=VerizonTag(data)
case b'blob' : obj=VerizonBlob(data)
case _ : raise Exception(f"Unknown Type : {fmt}")
return object_write(obj, repo)
def object_resolve(repo, name):
"""Resolve names to an object has in repo."""
candidates = list()
hashRE = re.compile(r"^[0-9A-Fa-f]{4,40}$")
if not name.strip():
return None
# If it's head, then it is non-ambigious.
if name == "HEAD":
return [ref_resolve(repo, "HEAD")]
if hashRE.match(name):
name = name.lower()
prefix = name[0:2]
|
def index_read(repo):
index_file = repo_file(repo, "index")
if not os.path.exists(index_file):
return VerizonIndex()
with open(index_file, 'rb') as f:
raw = f.read()
header = raw[:12]
signature = header[:4]
assert signature == b'DIRC'
version = int.from_bytes(header[4:8], 'big')
assert version == 2, "Verizon supports only index file version 2"
count = int.from_bytes(header[8:12], 'big')
entries = list()
content = raw[12:]
idx = 0
for i in range(0, count):
ctime_s = int.from_bytes(content[idx:idx+4], 'big')
ctime_ns = int.from_bytes(content[idx+4:idx+8], 'big')
mtime_s = int.from_bytes(content[idx+8:idx+12], 'big')
mtime_ns = int.from_bytes(content[idx+12:idx+16], 'big')
dev = int.from_bytes(content[idx+16:idx+20], 'big')
ino = int.from_bytes(content[idx+20:idx+24], 'big')
unused = int.from_bytes(content[idx+24:idx+26], 'big')
assert 0 == unused
mode = int.from_bytes(content[idx+26:idx+28], 'big')
mode_type = mode >> 12
assert mode_type in [0b1000, 0b1010, 0b1110]
mode_perms = mode & 0b0000000111111111
uid = int.from_bytes(content[idx+28:idx+32], 'big')
gid = int.from_bytes(content[idx+32:idx+36], 'big')
fsize = int.from_bytes(content[idx+36:idx+40], 'big')
sha = format(int.from_bytes(content[idx+40:idx+60], 'big'), '040x')
flags = int.from_bytes(content[idx+60:idx+62], 'big')
flag_assume_valid = (flags & 0b1000000000000000) != 0
flag_extended = (flags & 0b0100000000000000) != 0
assert not flag_extended
flag_stage = flags & 0b0011000000000000
name_length = flags & 0b0000111111111111
idx += 62
if name_length < 0xFFF:
assert content[idx + name_length] == 0x00
raw_name = content[idx: idx+name_length]
idx += name_length + 1
else:
print("Notice that Name is 0x{:X} bytes long".format(name_length))
null_idx = content.find(b'\x00', idx + 0xFFF)
raw_name = content[idx:null_idx]
idx = null_idx + 1
name = raw_name.decode('utf8')
idx = 8*ceil(idx/8)
entries.append(VerizonIndexEntry(
ctime = (ctime_s, ctime_ns),
mtime = (mtime_s, mtime_ns),
dev = dev,
ino = ino,
mode_type= mode_type,
mode_perms= mode_perms,
uid = uid,
gid = gid,
fsize = fsize,
sha=sha,
flag_assume_valid=flag_assume_valid,
flag_stage=flag_stage,
name=name))
return VerizonIndex(version = version, entries=entries)
def index_write(repo, index):
with open(repo_file(repo, "index"), "wb") as f:
f.write(b'DIRC')
f.write(index.version.to_bytes(4, "big"))
f.write(len(index.entries).to_bytes(4, "big"))
idx = 0
# Entries
for e in index.entries:
f.write(e.ctime[0].to_bytes(4, "big"))
f.write(e.ctime[1].to_bytes(4, "big"))
f.write(e.mtime[0].to_bytes(4, "big"))
f.write(e.mtime[1].to_bytes(4, "big"))
f.write(e.dev.to_bytes(4, "big"))
f.write(e.ino.to_bytes(4, "big"))
# Mode
mode = (e.mode_type << 12) | e.mode_perms
f.write(mode.to_bytes(4, "big"))
f.write(e.uid.to_bytes(4, "big"))
f.write(e.gid.to_bytes(4, "big"))
f.write(e.fsize.to_bytes(4, "big"))
f.write(int(e.sha, 16).to_bytes(20, "big"))
flag_assume_valid = 0x1 << 15 if e.flag_assume_valid else 0
name_bytes = e.name.encode('utf8')
bytes_len = len(name_bytes)
if bytes_len >= 0xFFF :
name_length = 0xFFF
else:
name_length = bytes_len
f.write((flag_assume_valid | e.flag_stage | name_length).to_bytes(2, "big"))
f.write(name_bytes)
f.write((0).to_bytes(1, "big"))
idx += 62 + len(name_bytes) + 1
if idx % 8 != 0:
pad = 8-(idx % 8)
f.write((0).to_bytes(pad, "big"))
idx += pad
def tree_parse_one(raw, start=0):
x = raw.find(b' ', start)
assert x-start == 5 or x-start == 6
mode = raw[start:x]
if len(mode) == 5:
mode = b' ' + mode
y = raw.find(b'\x00', x)
path = raw[x+1:y]
sha = format(int.from_bytes(raw[y+1:y+21], "big"), "040x")
return y+21, VerizonTreeLeaf(mode, path.decode('utf8'), sha)
def tree_parse(raw):
pos = 0
max = len(raw)
ret = list()
while pos<max:
pos, data = tree_parse_one(raw, pos)
ret.append(data)
return ret
# This is the ordering function. Entries are sorted by name, alphabetically, but directories are sorted with a final / added.
def tree_leaf_sort_key(leaf):
if leaf.mode.startswith(b'10'):
return leaf.path
return leaf.path + '/'
def tree_serialize(obj):
obj.items.sort(key=tree_leaf_sort_key)
ret = b''
for i in obj.items:
ret += i.mode
ret += b''
ret += i.path.encode('utf8')
ret += b'\x00'
sha = int(i.sha, 16)
ret += sha.to_bytes(20, byteorder="big")
return ret
def object_read(repo, sha):
path = repo_file(repo, "objects", sha[0:2], sha[2:])
if not os.path.isfile(path):
return None
with open(path, "rb") as f:
raw = zlib.decompress(f.read())
# Read the object type
x = raw.find(b'')
fmt = raw[0:x]
# Read and Validate the object size
y = raw.find(b'\x00', x)
size = int(raw[x:y].decode('ascii'))
if size != len(raw)-y-1:
raise Exception(f"Malformed object {sha}: bad length")
match fmt:
case b'commit' : c=VerizonCommit
case b'tree' : c=VerizonTree
case b'tag' : c=VerizonTag
case b'blob' : c=VerizonBlob
case _ : raise Exception(f"Unknown type {fmt.decode('ascii')} for object {sha}")
# Call constructor and return object.
return c(raw[y+1])
def object_write(obj, repo=None):
data = obj.serialize()
result = obj.fmt + b' ' + str(len(data)).encode() + b'\x00' + data
sha = hashlib.sha1(result).hexdigest()
if repo:
path = repo_file(repo, "objects", sha[0:2], sha[2:], mkdir=True)
if not os.path.exists(path):
with open(path, "wb") as f:
f.write(zlib.compress(result))
return sha
def object_find(repo, name, fmt=None, follow=True):
sha = object_resolve(repo, name)
if not sha:
raise Exception(f"No such reference : {name}")
if len(sha) > 1:
raise Exception("Ambigious Reference - {0}. Candidates are :\n - {1}".format(name, '\n - '.join(sha)))
sha = sha[0]
if not fmt:
return sha
while True:
obj = object_read(repo, sha)
if obj.fmt == fmt :
return sha
if not follow:
return None
# Follow tags
if obj.fmt == b'tag':
sha = obj.kvlm[b'object'].decode('ascii')
elif obj.fmt == b'commit':
sha = obj.kvlm[b'tree'].decode('ascii')
else:
return None
def object_hash(fd, fmt, repo=None):
data = fd.read()
match fmt:
case b'commit': obj=VerizonCommit(data)
case b'tree' : obj=VerizonTree(data)
case b'tag' : obj=VerizonTag(data)
case b'blob' : obj=VerizonBlob(data)
case _ : raise Exception(f"Unknown Type : {fmt}")
return object_write(obj, repo)
def object_resolve(repo, name):
"""Resolve names to an object has in repo."""
candidates = list()
hashRE = re.compile(r"^[0-9A-Fa-f]{4,40}$")
if not name.strip():
return None
# If it's head, then it is non-ambigious.
if name == "HEAD":
return [ref_resolve(repo, "HEAD")]
if hashRE.match(name):
name = name.lower()
prefix = name[0:2] | path = repo_dir(repo, "objects", prefix, mkdir=False) | 9 | 2023-12-18 18:53:26+00:00 | 4k |
Infleqtion/qLDPC | qldpc/objects_test.py | [
{
"identifier": "abstract",
"path": "qldpc/abstract.py",
"snippet": "DEFAULT_FIELD_ORDER = 2\nclass GroupMember(comb.Permutation):\nclass Group:\nclass Element:\nclass Protograph:\nclass TrivialGroup(Group):\nclass CyclicGroup(Group):\nclass DihedralGroup(Group):\nclass QuaternionGroup(Group):\n def ... | import numpy as np
import pytest
from qldpc import abstract, objects | 1,648 | """Unit tests for objects.py
Copyright 2023 The qLDPC Authors and Infleqtion Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
def test_pauli() -> None:
"""Pauli operator capabilities."""
for string in ["I", "X", "Y", "Z"]:
| """Unit tests for objects.py
Copyright 2023 The qLDPC Authors and Infleqtion Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
def test_pauli() -> None:
"""Pauli operator capabilities."""
for string in ["I", "X", "Y", "Z"]: | assert str(objects.Pauli.from_string(string)) == string | 1 | 2023-12-19 22:29:42+00:00 | 4k |
CosmicLaca/ComfyUI_Primere_Nodes | Nodes/Inputs.py | [
{
"identifier": "ImageExifReader",
"path": "Nodes/modules/image_meta_reader.py",
"snippet": "class ImageExifReader:\n def __init__(self, file):\n self._raw = \"\"\n self._parser = {}\n self._parameter = {}\n self._tool = \"\"\n self.read_data(file)\n\n def read_d... | from ..components.tree import TREE_INPUTS
from ..components.tree import PRIMERE_ROOT
from dynamicprompts.parser.parse import ParserConfig
from dynamicprompts.wildcards.wildcard_manager import WildcardManager
from .modules.image_meta_reader import ImageExifReader
from .modules import exif_data_checker
from ..components import utility
from pathlib import Path
from .modules.adv_encode import advanced_encode
import os
import re
import chardet
import pandas
import comfy.samplers
import folder_paths
import hashlib
import nodes
import random
import string | 2,718 | },
}
def get_prompt(self, positive_prompt, negative_prompt, extra_pnginfo, id, subpath="", model="", orientation=""):
def debug_state(self, extra_pnginfo, id):
workflow = extra_pnginfo["workflow"]
for node in workflow["nodes"]:
node_id = str(node["id"])
name = node["type"]
if node_id == id and name == 'PrimerePrompt':
if "Debug" in name or "Show" in name or "Function" in name or "Evaluate" in name:
continue
return node['widgets_values']
rawResult = debug_state(self, extra_pnginfo, id)
if not rawResult:
rawResult = (positive_prompt, negative_prompt)
if len(subpath.strip()) < 1 or subpath.strip() == 'None':
subpath = None
if len(model.strip()) < 1 or model.strip() == 'None':
model = None
if len(orientation.strip()) < 1 or orientation.strip() == 'None':
orientation = None
if orientation == 'Random':
orientations = ["Horizontal", "Vertical"]
orientation = random.choice(orientations)
return (rawResult[0].replace('\n', ' '), rawResult[1].replace('\n', ' '), subpath, model, orientation)
class PrimereRefinerPrompt:
RETURN_TYPES = ("STRING", "STRING", "CONDITIONING", "CONDITIONING")
RETURN_NAMES = ("PROMPT+", "PROMPT-", "COND+", "COND-")
FUNCTION = "refiner_prompt"
CATEGORY = TREE_INPUTS
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"positive_refiner": ("STRING", {"default": "", "multiline": True}),
"negative_refiner": ("STRING", {"default": "", "multiline": True}),
"positive_refiner_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"negative_refiner_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"positive_original_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"negative_original_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"clip": ("CLIP",),
"seed": ("INT", {"default": 0, "min": -1, "max": 0xffffffffffffffff, "forceInput": True}),
"token_normalization": (["none", "mean", "length", "length+mean"],),
"weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],),
},
"optional": {
"positive_original": ("STRING", {"default": None, "forceInput": True}),
"negative_original": ("STRING", {"default": None, "forceInput": True}),
},
"hidden": {
"extra_pnginfo": "EXTRA_PNGINFO",
"id": "UNIQUE_ID",
},
}
def __init__(self):
wildcard_dir = os.path.join(PRIMERE_ROOT, 'wildcards')
self._wildcard_manager = WildcardManager(wildcard_dir)
self._parser_config = ParserConfig(
variant_start = "{",
variant_end = "}",
wildcard_wrap = "__"
)
def refiner_prompt(self, extra_pnginfo, id, clip, seed, token_normalization, weight_interpretation, positive_refiner = "", negative_refiner = "", positive_original = None, negative_original = None, positive_refiner_strength = 1, negative_refiner_strength = 1, positive_original_strength = 1, negative_original_strength = 1):
def refiner_debug_state(self, extra_pnginfo, id):
workflow = extra_pnginfo["workflow"]
for node in workflow["nodes"]:
node_id = str(node["id"])
name = node["type"]
if node_id == id and name == 'PrimereRefinerPrompt':
if "Debug" in name or "Show" in name or "Function" in name or "Evaluate" in name:
continue
return node['widgets_values']
rawResult = refiner_debug_state(self, extra_pnginfo, id)
if not rawResult:
rawResult = (positive_refiner, negative_refiner)
output_positive = rawResult[0].replace('\n', ' ')
output_negative = rawResult[1].replace('\n', ' ')
final_positive = ""
final_negative = ""
if positive_refiner_strength != 0:
if positive_refiner_strength != 1:
final_positive = f'({output_positive}:{positive_refiner_strength:.2f})' if output_positive is not None and output_positive != '' else ''
else:
final_positive = f'{output_positive}' if output_positive is not None and output_positive != '' else ''
if negative_refiner_strength != 0:
if negative_refiner_strength != 1:
final_negative = f'({output_negative}:{negative_refiner_strength:.2f})' if output_negative is not None and output_negative != '' else ''
else:
final_negative = f'{output_negative}' if output_negative is not None and output_negative != '' else ''
if positive_original is not None and positive_original != "" and positive_original_strength != 0:
if positive_original_strength != 1:
final_positive = f'{final_positive} ({positive_original}:{positive_original_strength:.2f})'
else:
final_positive = f'{final_positive} {positive_original}'
if negative_original is not None and negative_original != "" and negative_original_strength != 0:
if negative_original_strength != 1:
final_negative = f'{final_negative} ({negative_original}:{negative_original_strength:.2f})'
else:
final_negative = f'{final_negative} {negative_original}'
final_positive = utility.DynPromptDecoder(self, final_positive.strip(' ,;'), seed)
final_negative = utility.DynPromptDecoder(self, final_negative.strip(' ,;'), seed)
|
class PrimereDoublePrompt:
RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING", "STRING")
RETURN_NAMES = ("PROMPT+", "PROMPT-", "SUBPATH", "MODEL", "ORIENTATION")
FUNCTION = "get_prompt"
CATEGORY = TREE_INPUTS
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"positive_prompt": ("STRING", {"default": "", "multiline": True}),
"negative_prompt": ("STRING", {"default": "", "multiline": True}),
},
"optional": {
"subpath": ("STRING", {"default": "", "multiline": False}),
"model": (["None"] + folder_paths.get_filename_list("checkpoints"), {"default": "None"}),
"orientation": (["None", "Random", "Horizontal", "Vertical"], {"default": "None"}),
},
"hidden": {
"extra_pnginfo": "EXTRA_PNGINFO",
"id": "UNIQUE_ID",
},
}
def get_prompt(self, positive_prompt, negative_prompt, extra_pnginfo, id, subpath="", model="", orientation=""):
def debug_state(self, extra_pnginfo, id):
workflow = extra_pnginfo["workflow"]
for node in workflow["nodes"]:
node_id = str(node["id"])
name = node["type"]
if node_id == id and name == 'PrimerePrompt':
if "Debug" in name or "Show" in name or "Function" in name or "Evaluate" in name:
continue
return node['widgets_values']
rawResult = debug_state(self, extra_pnginfo, id)
if not rawResult:
rawResult = (positive_prompt, negative_prompt)
if len(subpath.strip()) < 1 or subpath.strip() == 'None':
subpath = None
if len(model.strip()) < 1 or model.strip() == 'None':
model = None
if len(orientation.strip()) < 1 or orientation.strip() == 'None':
orientation = None
if orientation == 'Random':
orientations = ["Horizontal", "Vertical"]
orientation = random.choice(orientations)
return (rawResult[0].replace('\n', ' '), rawResult[1].replace('\n', ' '), subpath, model, orientation)
class PrimereRefinerPrompt:
RETURN_TYPES = ("STRING", "STRING", "CONDITIONING", "CONDITIONING")
RETURN_NAMES = ("PROMPT+", "PROMPT-", "COND+", "COND-")
FUNCTION = "refiner_prompt"
CATEGORY = TREE_INPUTS
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"positive_refiner": ("STRING", {"default": "", "multiline": True}),
"negative_refiner": ("STRING", {"default": "", "multiline": True}),
"positive_refiner_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"negative_refiner_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"positive_original_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"negative_original_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"clip": ("CLIP",),
"seed": ("INT", {"default": 0, "min": -1, "max": 0xffffffffffffffff, "forceInput": True}),
"token_normalization": (["none", "mean", "length", "length+mean"],),
"weight_interpretation": (["comfy", "A1111", "compel", "comfy++", "down_weight"],),
},
"optional": {
"positive_original": ("STRING", {"default": None, "forceInput": True}),
"negative_original": ("STRING", {"default": None, "forceInput": True}),
},
"hidden": {
"extra_pnginfo": "EXTRA_PNGINFO",
"id": "UNIQUE_ID",
},
}
def __init__(self):
wildcard_dir = os.path.join(PRIMERE_ROOT, 'wildcards')
self._wildcard_manager = WildcardManager(wildcard_dir)
self._parser_config = ParserConfig(
variant_start = "{",
variant_end = "}",
wildcard_wrap = "__"
)
def refiner_prompt(self, extra_pnginfo, id, clip, seed, token_normalization, weight_interpretation, positive_refiner = "", negative_refiner = "", positive_original = None, negative_original = None, positive_refiner_strength = 1, negative_refiner_strength = 1, positive_original_strength = 1, negative_original_strength = 1):
def refiner_debug_state(self, extra_pnginfo, id):
workflow = extra_pnginfo["workflow"]
for node in workflow["nodes"]:
node_id = str(node["id"])
name = node["type"]
if node_id == id and name == 'PrimereRefinerPrompt':
if "Debug" in name or "Show" in name or "Function" in name or "Evaluate" in name:
continue
return node['widgets_values']
rawResult = refiner_debug_state(self, extra_pnginfo, id)
if not rawResult:
rawResult = (positive_refiner, negative_refiner)
output_positive = rawResult[0].replace('\n', ' ')
output_negative = rawResult[1].replace('\n', ' ')
final_positive = ""
final_negative = ""
if positive_refiner_strength != 0:
if positive_refiner_strength != 1:
final_positive = f'({output_positive}:{positive_refiner_strength:.2f})' if output_positive is not None and output_positive != '' else ''
else:
final_positive = f'{output_positive}' if output_positive is not None and output_positive != '' else ''
if negative_refiner_strength != 0:
if negative_refiner_strength != 1:
final_negative = f'({output_negative}:{negative_refiner_strength:.2f})' if output_negative is not None and output_negative != '' else ''
else:
final_negative = f'{output_negative}' if output_negative is not None and output_negative != '' else ''
if positive_original is not None and positive_original != "" and positive_original_strength != 0:
if positive_original_strength != 1:
final_positive = f'{final_positive} ({positive_original}:{positive_original_strength:.2f})'
else:
final_positive = f'{final_positive} {positive_original}'
if negative_original is not None and negative_original != "" and negative_original_strength != 0:
if negative_original_strength != 1:
final_negative = f'{final_negative} ({negative_original}:{negative_original_strength:.2f})'
else:
final_negative = f'{final_negative} {negative_original}'
final_positive = utility.DynPromptDecoder(self, final_positive.strip(' ,;'), seed)
final_negative = utility.DynPromptDecoder(self, final_negative.strip(' ,;'), seed)
| embeddings_final_pos, pooled_pos = advanced_encode(clip, final_positive, token_normalization, weight_interpretation, w_max=1.0, apply_to_pooled=True) | 2 | 2023-12-17 20:42:27+00:00 | 4k |
amazon-science/c2f-seg | test_c2f_seg.py | [
{
"identifier": "load_dataset",
"path": "data/dataloader_transformer.py",
"snippet": "def load_dataset(config, args, mode):\n if mode==\"train\":\n if args.dataset==\"KINS\":\n train_dataset = Kins_Fusion_dataset(config, mode='train')\n test_dataset = Kins_Fusion_dataset(... | import os
import cv2
import time
import random
import argparse
import numpy as np
import torch
import torch.distributed as dist
from tqdm import tqdm
from shutil import copyfile
from torch.utils.data import DataLoader
from data.dataloader_transformer import load_dataset
from utils.logger import setup_logger
from utils.utils import Config, to_cuda
from src.image_model import C2F_Seg
from src.video_model import C2F_Seg | 2,031 |
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
# path
parser.add_argument('--path', type=str, required=True, help='model checkpoints path')
parser.add_argument('--check_point_path', type=str, default="../check_points", )
parser.add_argument('--vq_path', type=str, required=True, default='KINS_vqgan')
# dataset
parser.add_argument('--dataset', type=str, default="MOViD_A", help = "select dataset")
parser.add_argument('--data_type', type=str, default="image", help = "select image or video model")
parser.add_argument('--batch', type=int, default=1)
parser.add_argument("--local_rank", default=-1, type=int, help="node rank for distributed training")
args = parser.parse_args()
if args.data_type=="image":
elif args.data_type=="video":
dist.init_process_group(backend="nccl")
torch.cuda.set_device(args.local_rank)
rank = dist.get_rank()
args.path = os.path.join(args.check_point_path, args.path)
vq_model_path = os.path.join(args.check_point_path, args.vq_path)
os.makedirs(args.path, exist_ok=True)
config_path = os.path.join(args.path, 'c2f_seg_{}.yml'.format(args.dataset))
# copy config template if does't exist
if not os.path.exists(config_path):
copyfile('./configs/c2f_seg_{}.yml'.format(args.dataset), config_path)
# load config file
config = Config(config_path)
config.path = args.path
config.batch_size = args.batch
config.dataset = args.dataset
log_file = 'log-{}.txt'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
logger = setup_logger(os.path.join(args.path, 'logs'), logfile_name=log_file)
if rank==0:
# copy config template if does't exist
if not os.path.exists(config_path):
copyfile('./configs/c2f_seg_{}.yml'.format(args.dataset), config_path)
# save samples and eval pictures
os.makedirs(os.path.join(args.path, 'test_samples'), exist_ok=True)
for k in config._dict:
logger.info("{}:{}".format(k, config._dict[k]))
# init device
if torch.cuda.is_available():
config.device = torch.device("cuda")
torch.backends.cudnn.benchmark = True # cudnn auto-tuner
else:
config.device = torch.device("cpu")
n_gpu = torch.cuda.device_count()
# set cv2 running threads to 1 (prevents deadlocks with pytorch dataloader)
cv2.setNumThreads(0)
# initialize random seed
torch.manual_seed(config.seed)
np.random.seed(config.seed)
random.seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
test_dataset = load_dataset(config, args, "test")
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)
test_loader = DataLoader(
dataset=test_dataset,
sampler=test_sampler,
batch_size=config.batch_size,
num_workers=8,
drop_last=False
)
sample_iterator = test_dataset.create_iterator(config.sample_size)
model = C2F_Seg(config, vq_model_path, mode='test', logger=logger)
model.load(is_test=True ,prefix = config.stage2_iteration)
model.restore_from_stage1(prefix = config.stage1_iteration)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank])
iter = 0
iou = 0
iou_count = 0
invisible_iou_ = 0
occ_count = 0
iou_post = 0
iou_count_post = 0
invisible_iou_post = 0
occ_count_post = 0
model.eval()
with torch.no_grad():
if rank==0:
test_loader = tqdm(test_loader)
for items in test_loader:
|
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42)
# path
parser.add_argument('--path', type=str, required=True, help='model checkpoints path')
parser.add_argument('--check_point_path', type=str, default="../check_points", )
parser.add_argument('--vq_path', type=str, required=True, default='KINS_vqgan')
# dataset
parser.add_argument('--dataset', type=str, default="MOViD_A", help = "select dataset")
parser.add_argument('--data_type', type=str, default="image", help = "select image or video model")
parser.add_argument('--batch', type=int, default=1)
parser.add_argument("--local_rank", default=-1, type=int, help="node rank for distributed training")
args = parser.parse_args()
if args.data_type=="image":
elif args.data_type=="video":
dist.init_process_group(backend="nccl")
torch.cuda.set_device(args.local_rank)
rank = dist.get_rank()
args.path = os.path.join(args.check_point_path, args.path)
vq_model_path = os.path.join(args.check_point_path, args.vq_path)
os.makedirs(args.path, exist_ok=True)
config_path = os.path.join(args.path, 'c2f_seg_{}.yml'.format(args.dataset))
# copy config template if does't exist
if not os.path.exists(config_path):
copyfile('./configs/c2f_seg_{}.yml'.format(args.dataset), config_path)
# load config file
config = Config(config_path)
config.path = args.path
config.batch_size = args.batch
config.dataset = args.dataset
log_file = 'log-{}.txt'.format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
logger = setup_logger(os.path.join(args.path, 'logs'), logfile_name=log_file)
if rank==0:
# copy config template if does't exist
if not os.path.exists(config_path):
copyfile('./configs/c2f_seg_{}.yml'.format(args.dataset), config_path)
# save samples and eval pictures
os.makedirs(os.path.join(args.path, 'test_samples'), exist_ok=True)
for k in config._dict:
logger.info("{}:{}".format(k, config._dict[k]))
# init device
if torch.cuda.is_available():
config.device = torch.device("cuda")
torch.backends.cudnn.benchmark = True # cudnn auto-tuner
else:
config.device = torch.device("cpu")
n_gpu = torch.cuda.device_count()
# set cv2 running threads to 1 (prevents deadlocks with pytorch dataloader)
cv2.setNumThreads(0)
# initialize random seed
torch.manual_seed(config.seed)
np.random.seed(config.seed)
random.seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
test_dataset = load_dataset(config, args, "test")
test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)
test_loader = DataLoader(
dataset=test_dataset,
sampler=test_sampler,
batch_size=config.batch_size,
num_workers=8,
drop_last=False
)
sample_iterator = test_dataset.create_iterator(config.sample_size)
model = C2F_Seg(config, vq_model_path, mode='test', logger=logger)
model.load(is_test=True ,prefix = config.stage2_iteration)
model.restore_from_stage1(prefix = config.stage1_iteration)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank])
iter = 0
iou = 0
iou_count = 0
invisible_iou_ = 0
occ_count = 0
iou_post = 0
iou_count_post = 0
invisible_iou_post = 0
occ_count_post = 0
model.eval()
with torch.no_grad():
if rank==0:
test_loader = tqdm(test_loader)
for items in test_loader: | items = to_cuda(items, config.device) | 3 | 2023-12-21 04:25:47+00:00 | 4k |
alipay/PainlessInferenceAcceleration | pia/lookahead/common/pretrained_model.py | [
{
"identifier": "LookaheadCache",
"path": "pia/lookahead/common/lookahead_cache.py",
"snippet": "class LookaheadCache():\n def __init__(self, debug=False, eos=2, stop_words=None, max_node=512, max_output_node=256):\n self.debug = debug\n self.eos = eos\n self.max_node = max_node\... | import copy
import inspect
import time
import warnings
import numpy as np
import torch
import torch.distributed as dist
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from torch import nn
from transformers import PreTrainedModel
from transformers.generation.beam_constraints import DisjunctiveConstraint, PhrasalConstraint
from transformers.generation.beam_search import BeamSearchScorer, ConstrainedBeamSearchScorer
from transformers.generation.logits_process import (
LogitsProcessorList,
MinLengthLogitsProcessor,
)
from transformers.generation.stopping_criteria import (
MaxLengthCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
from transformers.generation.utils import (
GreedySearchEncoderDecoderOutput,
GreedySearchDecoderOnlyOutput)
from transformers.generation.utils import (
GreedySearchOutput,
GenerateOutput)
from transformers.utils import ModelOutput, logging
from transformers.generation.configuration_utils import GenerationConfig
from pia.lookahead.common.lookahead_cache import LookaheadCache
from pia.lookahead.common.lookahead_generation_utils import GenerationMode, LookaheadDecoderOnlyOutput | 3,574 | # -*- coding: utf-8 -*-
"""
Copyright (c) Ant Financial Service Group and its affiliates.
"""
from __future__ import print_function
# from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
logger = logging.get_logger(__name__)
class LookaheadPreTrainedModel(PreTrainedModel):
_batch_generation = False
_stream_generation = False
def __init__(self, config):
super().__init__(config=config)
def _get_generation_mode(
self, generation_config: GenerationConfig, assistant_model: Optional["PreTrainedModel"]
| # -*- coding: utf-8 -*-
"""
Copyright (c) Ant Financial Service Group and its affiliates.
"""
from __future__ import print_function
# from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
logger = logging.get_logger(__name__)
class LookaheadPreTrainedModel(PreTrainedModel):
_batch_generation = False
_stream_generation = False
def __init__(self, config):
super().__init__(config=config)
def _get_generation_mode(
self, generation_config: GenerationConfig, assistant_model: Optional["PreTrainedModel"] | ) -> GenerationMode: | 1 | 2023-12-19 13:11:38+00:00 | 4k |
Hammour-steak/GOUB | codes/models/modules/DenoisingUNet_arch.py | [
{
"identifier": "SinusoidalPosEmb",
"path": "codes/models/modules/module_util.py",
"snippet": "class SinusoidalPosEmb(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n device = x.device\n half_dim = self.dim // 2\n ... | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import functools
from .module_util import (
SinusoidalPosEmb,
RandomOrLearnedSinusoidalPosEmb,
NonLinearity,
Upsample, Downsample,
default_conv,
ResBlock, Upsampler,
LinearAttention, Attention,
PreNorm, Residual) | 2,537 |
class ConditionalUNet(nn.Module):
def __init__(self, in_nc, out_nc, nf, depth=4, upscale=1):
super().__init__()
self.depth = depth
self.upscale = upscale # not used
block_class = functools.partial(ResBlock, conv=default_conv, act=NonLinearity())
self.init_conv = default_conv(in_nc*2, nf, 7)
# time embeddings
time_dim = nf * 4
self.random_or_learned_sinusoidal_cond = False
if self.random_or_learned_sinusoidal_cond:
learned_sinusoidal_dim = 16
sinu_pos_emb = RandomOrLearnedSinusoidalPosEmb(learned_sinusoidal_dim, False)
fourier_dim = learned_sinusoidal_dim + 1
else:
sinu_pos_emb = SinusoidalPosEmb(nf)
fourier_dim = nf
self.time_mlp = nn.Sequential(
sinu_pos_emb,
nn.Linear(fourier_dim, time_dim),
nn.GELU(),
nn.Linear(time_dim, time_dim)
)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
for i in range(depth):
dim_in = nf * int(math.pow(2, i))
dim_out = nf * int(math.pow(2, i+1))
self.downs.append(nn.ModuleList([
block_class(dim_in=dim_in, dim_out=dim_in, time_emb_dim=time_dim),
block_class(dim_in=dim_in, dim_out=dim_in, time_emb_dim=time_dim),
Residual(PreNorm(dim_in, LinearAttention(dim_in))),
Downsample(dim_in, dim_out) if i != (depth-1) else default_conv(dim_in, dim_out)
]))
self.ups.insert(0, nn.ModuleList([
block_class(dim_in=dim_out + dim_in, dim_out=dim_out, time_emb_dim=time_dim),
block_class(dim_in=dim_out + dim_in, dim_out=dim_out, time_emb_dim=time_dim),
Residual(PreNorm(dim_out, LinearAttention(dim_out))),
|
class ConditionalUNet(nn.Module):
def __init__(self, in_nc, out_nc, nf, depth=4, upscale=1):
super().__init__()
self.depth = depth
self.upscale = upscale # not used
block_class = functools.partial(ResBlock, conv=default_conv, act=NonLinearity())
self.init_conv = default_conv(in_nc*2, nf, 7)
# time embeddings
time_dim = nf * 4
self.random_or_learned_sinusoidal_cond = False
if self.random_or_learned_sinusoidal_cond:
learned_sinusoidal_dim = 16
sinu_pos_emb = RandomOrLearnedSinusoidalPosEmb(learned_sinusoidal_dim, False)
fourier_dim = learned_sinusoidal_dim + 1
else:
sinu_pos_emb = SinusoidalPosEmb(nf)
fourier_dim = nf
self.time_mlp = nn.Sequential(
sinu_pos_emb,
nn.Linear(fourier_dim, time_dim),
nn.GELU(),
nn.Linear(time_dim, time_dim)
)
# layers
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
for i in range(depth):
dim_in = nf * int(math.pow(2, i))
dim_out = nf * int(math.pow(2, i+1))
self.downs.append(nn.ModuleList([
block_class(dim_in=dim_in, dim_out=dim_in, time_emb_dim=time_dim),
block_class(dim_in=dim_in, dim_out=dim_in, time_emb_dim=time_dim),
Residual(PreNorm(dim_in, LinearAttention(dim_in))),
Downsample(dim_in, dim_out) if i != (depth-1) else default_conv(dim_in, dim_out)
]))
self.ups.insert(0, nn.ModuleList([
block_class(dim_in=dim_out + dim_in, dim_out=dim_out, time_emb_dim=time_dim),
block_class(dim_in=dim_out + dim_in, dim_out=dim_out, time_emb_dim=time_dim),
Residual(PreNorm(dim_out, LinearAttention(dim_out))), | Upsample(dim_out, dim_in) if i!=0 else default_conv(dim_out, dim_in) | 3 | 2023-12-15 09:40:18+00:00 | 4k |
eldar-eln-bigabid/airflow-aerospike-provider | tests/operators/test_aerospike.py | [
{
"identifier": "AerospikeGetKeyOperator",
"path": "aerospike_provider/operators/aerospike.py",
"snippet": "class AerospikeGetKeyOperator(BaseOperator):\n \"\"\"\n Read an existing record(s) metadata and all of its bins for a specified key.\n\n :param namespace: namespace to use in aerospike db... | import unittest
import aerospike
from unittest.mock import patch, Mock
from aerospike_provider.operators.aerospike import AerospikeGetKeyOperator, AerospikePutKeyOperator | 2,118 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class TestAerospikeGetKeyOperator(unittest.TestCase):
def setUp(self):
self.namespace = 'test_namespace'
self.set = 'test_set'
self.key = 'test_key'
self.policy = { aerospike.POLICY_KEY_SEND }
self.task_id = 'test_task'
self.metadata = {'ttl': 1000, 'gen': 4}
self.bins = {'name': 'Aerospike Test', 'version': "1.0.0"}
self.operator = AerospikeGetKeyOperator(
namespace=self.namespace,
set=self.set,
key=self.key,
policy=self.policy,
task_id=self.task_id
)
@patch('aerospike_provider.hooks.aerospike.AerospikeHook.get_conn')
def test_execute(self, mock_hock_conn):
mock_hock_conn.return_value = Mock()
self.operator.parse_records = Mock()
self.operator.parse_records.return_value = [1]
self.operator.execute({})
mock_hock_conn.return_value.get_record.assert_called_once_with(
namespace='test_namespace',
set='test_set',
key='test_key',
policy={ aerospike.POLICY_KEY_SEND }
)
def test_parse_records_as_tuple(self):
mock = ( (self.namespace, self.set, self.key), self.metadata, self.bins)
mock_parsed = self.operator.parse_records(records=mock)
expected = [{"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata, "bins": self.bins}]
assert mock_parsed == expected
def test_parse_records_as_list(self):
mock = [( (self.namespace, self.set, self.key), self.metadata, self.bins), ( (self.namespace, self.set, self.key), self.metadata, self.bins)]
mock_parsed = self.operator.parse_records(records=mock)
expected = [
{"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata, "bins": self.bins},
{"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata, "bins": self.bins}
]
assert mock_parsed == expected
def test_parse_records_as_exception(self):
mock = {}
with self.assertRaises(ValueError):
self.operator.parse_records(records=mock)
def test_create_dict_from_record_with_bins(self):
mock = ( (self.namespace, self.set, self.key), self.metadata, self.bins)
mock_result = self.operator.create_dict_from_record(record=mock)
expected = {"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata, "bins": self.bins}
assert mock_result == expected
def test_create_dict_from_record_no_bins(self):
mock = ( (self.namespace, self.set, self.key), self.metadata)
mock_result = self.operator.create_dict_from_record(record=mock)
expected = {"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata}
assert mock_result == expected
class TestAerospikePutKeyOperator(unittest.TestCase):
def setUp(self):
| #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class TestAerospikeGetKeyOperator(unittest.TestCase):
def setUp(self):
self.namespace = 'test_namespace'
self.set = 'test_set'
self.key = 'test_key'
self.policy = { aerospike.POLICY_KEY_SEND }
self.task_id = 'test_task'
self.metadata = {'ttl': 1000, 'gen': 4}
self.bins = {'name': 'Aerospike Test', 'version': "1.0.0"}
self.operator = AerospikeGetKeyOperator(
namespace=self.namespace,
set=self.set,
key=self.key,
policy=self.policy,
task_id=self.task_id
)
@patch('aerospike_provider.hooks.aerospike.AerospikeHook.get_conn')
def test_execute(self, mock_hock_conn):
mock_hock_conn.return_value = Mock()
self.operator.parse_records = Mock()
self.operator.parse_records.return_value = [1]
self.operator.execute({})
mock_hock_conn.return_value.get_record.assert_called_once_with(
namespace='test_namespace',
set='test_set',
key='test_key',
policy={ aerospike.POLICY_KEY_SEND }
)
def test_parse_records_as_tuple(self):
mock = ( (self.namespace, self.set, self.key), self.metadata, self.bins)
mock_parsed = self.operator.parse_records(records=mock)
expected = [{"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata, "bins": self.bins}]
assert mock_parsed == expected
def test_parse_records_as_list(self):
mock = [( (self.namespace, self.set, self.key), self.metadata, self.bins), ( (self.namespace, self.set, self.key), self.metadata, self.bins)]
mock_parsed = self.operator.parse_records(records=mock)
expected = [
{"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata, "bins": self.bins},
{"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata, "bins": self.bins}
]
assert mock_parsed == expected
def test_parse_records_as_exception(self):
mock = {}
with self.assertRaises(ValueError):
self.operator.parse_records(records=mock)
def test_create_dict_from_record_with_bins(self):
mock = ( (self.namespace, self.set, self.key), self.metadata, self.bins)
mock_result = self.operator.create_dict_from_record(record=mock)
expected = {"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata, "bins": self.bins}
assert mock_result == expected
def test_create_dict_from_record_no_bins(self):
mock = ( (self.namespace, self.set, self.key), self.metadata)
mock_result = self.operator.create_dict_from_record(record=mock)
expected = {"namespace": self.namespace, "set": self.set, "key": self.key, "metadata": self.metadata}
assert mock_result == expected
class TestAerospikePutKeyOperator(unittest.TestCase):
def setUp(self): | self.operator = AerospikePutKeyOperator( | 1 | 2023-12-17 18:35:36+00:00 | 4k |
Its-Haze/league-rpc-linux | league_rpc_linux/champion.py | [
{
"identifier": "Colors",
"path": "league_rpc_linux/colors.py",
"snippet": "class Colors:\n \"\"\"\n Dataclass, storing the different colors that is used in the program.\n \"\"\"\n\n dred = \"\\033[31m\"\n dgreen = \"\\033[32m\"\n yellow = \"\\033[33m\"\n dblue = \"\\033[34m\"\n ... | from http import HTTPStatus
from typing import Any, Optional
from league_rpc_linux.colors import Colors
from league_rpc_linux.const import (
ALL_GAME_DATA_URL,
BASE_SKIN_URL,
CHAMPION_NAME_CONVERT_MAP,
DDRAGON_CHAMPION_DATA,
GAME_MODE_CONVERT_MAP,
)
from league_rpc_linux.kda import get_gold, get_level
from league_rpc_linux.latest_version import get_latest_version
from league_rpc_linux.polling import wait_until_exists
from league_rpc_linux.username import get_summoner_name
import requests
import urllib3 | 2,322 |
urllib3.disable_warnings()
def get_specific_champion_data(name: str) -> dict[str, Any]:
response = requests.get(
url=DDRAGON_CHAMPION_DATA.format_map(
{"version": get_latest_version(), "name": name}
),
timeout=15,
)
return response.json()
def gather_ingame_information() -> tuple[str, str, int, str, int, int]:
"""
Get the current playing champion name.
"""
all_game_data_url = ALL_GAME_DATA_URL
your_summoner_name = get_summoner_name()
champion_name: str | None = None
skin_id: int | None = None
skin_name: str | None = None
game_mode: str | None = None # Set if the game mode was never found.. Maybe you are playing something new?
level: int | None = None
gold: int | None = None
if response := wait_until_exists(
url=all_game_data_url,
custom_message="Did not find game data.. Will try again in 5 seconds",
):
parsed_data = response.json()
game_mode = GAME_MODE_CONVERT_MAP.get(
parsed_data["gameData"]["gameMode"],
parsed_data["gameData"]["gameMode"],
)
if game_mode == "TFT":
# If the currentGame is TFT.. gather the relevant information
level = get_level()
else:
# If the gamemode is LEAGUE gather the relevant information.
champion_name, skin_id, skin_name = gather_league_data(
parsed_data=parsed_data, summoners_name=your_summoner_name
)
if game_mode == "Arena":
level, gold = get_level(), get_gold()
print("-" * 50)
if champion_name:
print(
|
urllib3.disable_warnings()
def get_specific_champion_data(name: str) -> dict[str, Any]:
response = requests.get(
url=DDRAGON_CHAMPION_DATA.format_map(
{"version": get_latest_version(), "name": name}
),
timeout=15,
)
return response.json()
def gather_ingame_information() -> tuple[str, str, int, str, int, int]:
"""
Get the current playing champion name.
"""
all_game_data_url = ALL_GAME_DATA_URL
your_summoner_name = get_summoner_name()
champion_name: str | None = None
skin_id: int | None = None
skin_name: str | None = None
game_mode: str | None = None # Set if the game mode was never found.. Maybe you are playing something new?
level: int | None = None
gold: int | None = None
if response := wait_until_exists(
url=all_game_data_url,
custom_message="Did not find game data.. Will try again in 5 seconds",
):
parsed_data = response.json()
game_mode = GAME_MODE_CONVERT_MAP.get(
parsed_data["gameData"]["gameMode"],
parsed_data["gameData"]["gameMode"],
)
if game_mode == "TFT":
# If the currentGame is TFT.. gather the relevant information
level = get_level()
else:
# If the gamemode is LEAGUE gather the relevant information.
champion_name, skin_id, skin_name = gather_league_data(
parsed_data=parsed_data, summoners_name=your_summoner_name
)
if game_mode == "Arena":
level, gold = get_level(), get_gold()
print("-" * 50)
if champion_name:
print( | f"{Colors.yellow}Champion name found {Colors.green}({CHAMPION_NAME_CONVERT_MAP.get(champion_name, champion_name)}),{Colors.yellow} continuing..{Colors.reset}" | 3 | 2023-12-15 22:21:53+00:00 | 4k |
huahuahuage/Bert-VITS2-Speech | onnx_infer/text/cleaner.py | [
{
"identifier": "symbol_to_id",
"path": "onnx_infer/text/symbols.py",
"snippet": ""
},
{
"identifier": "text_normalize",
"path": "onnx_infer/text/chinese.py",
"snippet": "def text_normalize(text: str):\r\n \"\"\"\r\n 替换所有阿拉伯数字为中文,同时将中文符号替换为英文符号\r\n \"\"\"\r\n # 提取文本中所有的阿拉伯数字\... | from .symbols import symbol_to_id, language_tone_start_map, language_id_map
from typing import Callable
from dataclasses import dataclass
from .chinese import text_normalize as zh_text_normalize
from .japanese import text_normalize as jp_text_normalize
from .english import text_normalize as en_text_normalize
from .chinese import g2p as zh_g2p
from .japanese import g2p as jp_g2p
from .english import g2p as en_g2p
| 2,181 |
# from text import cleaned_text_to_sequence
@dataclass
class TextNormalizeDict:
"""
文本序列化 替换所有阿拉伯数字为对应语言,同时将符号替换为指定列表内的英文符号
"""
ZH: Callable = zh_text_normalize
JP: Callable = jp_text_normalize
EN: Callable = en_text_normalize
@dataclass
class G2PDict:
"""
文本序列化
"""
ZH: Callable = zh_g2p
|
# from text import cleaned_text_to_sequence
@dataclass
class TextNormalizeDict:
"""
文本序列化 替换所有阿拉伯数字为对应语言,同时将符号替换为指定列表内的英文符号
"""
ZH: Callable = zh_text_normalize
JP: Callable = jp_text_normalize
EN: Callable = en_text_normalize
@dataclass
class G2PDict:
"""
文本序列化
"""
ZH: Callable = zh_g2p
| JP: Callable = jp_g2p
| 0 | 2023-12-21 13:50:50+00:00 | 4k |
haseeb-heaven/Gemini-Vision-Pro | script.py | [
{
"identifier": "Logger",
"path": "libs/logger.py",
"snippet": "class Logger:\n _logger = None\n\n @staticmethod\n def get_logger(file_name):\n if Logger._logger is None:\n Logger._logger = Logger._setup_logger(file_name)\n return Logger._logger\n\n @staticmethod\n ... | import streamlit as st
import cv2
import io
import traceback
import traceback
from streamlit_webrtc import webrtc_streamer, WebRtcMode, RTCConfiguration
from PIL import Image
from io import BytesIO
from pathlib import Path
from libs.logger import Logger
from libs.gemini_vision import GeminiVision
from libs.speech import SpeechToText
from libs.voice import TextToSpeech
from libs.image_cv2 import ImageCV2 | 3,088 | """
Description: This is the amazing Google Gemini Vision Pro.
This scans the image and using Gemini AI pro vision API it generates the descrption of the image.
It also uses the speech to text and text to speech to speak the prompt and display the description of the image.
It also uses the webcam to capture the image and display it.
Features:
1. Webcam detection using WebRTC, OpenCV and PIL
2. Speech to text using Google Cloud Speech to Text API
3. Text to speech using Google Cloud Text to Speech API
4. Image processing using Gemini AI Pro Vision API
5. Logging using Python logging module
6. Error handling using Python exception handling
Modules used:
1. Streamlit - Is is the Web App framework used to build the app
2. Streamlit Webrtc - It is used to capture the image from the webcam
3. OpenCV - It is used to capture the image from the webcam
4. PIL - It is image processing library used to convert the image.
5. gTTS - It is used to convert the text to speech
6. SpeechRecognition - It is used to convert the speech to text
7. google.cloud.speech - It is used to convert the speech to text
Author: HeavenHM
Date: 17-12-2023
Version: 1.0
"""
# Initialize session state
def init_session_state():
if 'api_key' not in st.session_state:
st.session_state['api_key'] = ''
if 'temperature' not in st.session_state:
st.session_state['temperature'] = 0.1
if 'top_k' not in st.session_state:
st.session_state['top_k'] = 32
if 'top_p' not in st.session_state:
st.session_state['top_p'] = 1.0
if 'captured_image' not in st.session_state:
st.session_state['captured_image'] = None
if 'prompt' not in st.session_state:
st.session_state['prompt'] = ''
if 'api_key' not in st.session_state:
st.session_state['api_key'] = ''
if 'captured_image' not in st.session_state:
st.session_state['captured_image'] = None
if 'prompt' not in st.session_state:
st.session_state['prompt'] = ''
if "logger" not in st.session_state:
st.session_state["logger"] = None
if "tts" not in st.session_state:
st.session_state["tts"] = None
if "stt" not in st.session_state:
st.session_state["stt"] = None
if "gemini_vision" not in st.session_state:
st.session_state["gemini_vision"] = None
if "webrtc_ctx" not in st.session_state:
st.session_state["webrtc_ctx"] = None
if "response" not in st.session_state:
st.session_state["response"] = None
# Exception handling decorator
def exception_handler(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exception:
st.session_state.logger.error(f"An error occurred in {func.__name__}: {exception}")
st.error(f"An error occurred: {exception}")
st.session_state.logger.error(traceback.format_exc())
st.stop()
return wrapper
@exception_handler
def validate_image(image_path):
if not image_path.exists():
st.session_state.logger.error(f"Could not find image: {image_path}")
raise FileNotFoundError(f"Could not find image: {image_path}")
@exception_handler
def process_image():
image_contents = [st.session_state['prompt'], st.session_state['captured_image']]
st.session_state.logger.info(f"Image data is: {st.session_state['captured_image']}")
response = st.session_state.gemini_vision.generate_content(image_contents)
if 'error' in response:
raise ValueError(f"An error occurred: {response}")
else:
if response.text:
st.session_state.tts.speak(response.text)
st.session_state.logger.info(f"Response: {response.text}")
st.session_state.response = response.text
@exception_handler
def get_prompt_from_mic():
prompt = st.session_state.stt.listen_and_convert()
return prompt
@exception_handler
def log_webrtc_context_states(webrtc_ctx):
if webrtc_ctx is not None:
# Log the state of the WebRTC context
st.session_state.logger.info(f"WebRTC context: {webrtc_ctx}")
st.session_state.logger.info(f"Is WebRTC playing: {webrtc_ctx.state.playing}")
st.session_state.logger.info(f"Is audio receiver ready: {webrtc_ctx.audio_receiver}")
st.session_state.logger.info(f"Is video receiver ready: {webrtc_ctx.video_receiver}")
else:
st.error("WebRTC context is None.")
@exception_handler
def capture_image():
st.session_state.logger.info("Attempting to capture image from webcam with ImageCV2...")
# Capture the image from the webcam
web_image = None
| """
Description: This is the amazing Google Gemini Vision Pro.
This scans the image and using Gemini AI pro vision API it generates the descrption of the image.
It also uses the speech to text and text to speech to speak the prompt and display the description of the image.
It also uses the webcam to capture the image and display it.
Features:
1. Webcam detection using WebRTC, OpenCV and PIL
2. Speech to text using Google Cloud Speech to Text API
3. Text to speech using Google Cloud Text to Speech API
4. Image processing using Gemini AI Pro Vision API
5. Logging using Python logging module
6. Error handling using Python exception handling
Modules used:
1. Streamlit - Is is the Web App framework used to build the app
2. Streamlit Webrtc - It is used to capture the image from the webcam
3. OpenCV - It is used to capture the image from the webcam
4. PIL - It is image processing library used to convert the image.
5. gTTS - It is used to convert the text to speech
6. SpeechRecognition - It is used to convert the speech to text
7. google.cloud.speech - It is used to convert the speech to text
Author: HeavenHM
Date: 17-12-2023
Version: 1.0
"""
# Initialize session state
def init_session_state():
if 'api_key' not in st.session_state:
st.session_state['api_key'] = ''
if 'temperature' not in st.session_state:
st.session_state['temperature'] = 0.1
if 'top_k' not in st.session_state:
st.session_state['top_k'] = 32
if 'top_p' not in st.session_state:
st.session_state['top_p'] = 1.0
if 'captured_image' not in st.session_state:
st.session_state['captured_image'] = None
if 'prompt' not in st.session_state:
st.session_state['prompt'] = ''
if 'api_key' not in st.session_state:
st.session_state['api_key'] = ''
if 'captured_image' not in st.session_state:
st.session_state['captured_image'] = None
if 'prompt' not in st.session_state:
st.session_state['prompt'] = ''
if "logger" not in st.session_state:
st.session_state["logger"] = None
if "tts" not in st.session_state:
st.session_state["tts"] = None
if "stt" not in st.session_state:
st.session_state["stt"] = None
if "gemini_vision" not in st.session_state:
st.session_state["gemini_vision"] = None
if "webrtc_ctx" not in st.session_state:
st.session_state["webrtc_ctx"] = None
if "response" not in st.session_state:
st.session_state["response"] = None
# Exception handling decorator
def exception_handler(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exception:
st.session_state.logger.error(f"An error occurred in {func.__name__}: {exception}")
st.error(f"An error occurred: {exception}")
st.session_state.logger.error(traceback.format_exc())
st.stop()
return wrapper
@exception_handler
def validate_image(image_path):
if not image_path.exists():
st.session_state.logger.error(f"Could not find image: {image_path}")
raise FileNotFoundError(f"Could not find image: {image_path}")
@exception_handler
def process_image():
image_contents = [st.session_state['prompt'], st.session_state['captured_image']]
st.session_state.logger.info(f"Image data is: {st.session_state['captured_image']}")
response = st.session_state.gemini_vision.generate_content(image_contents)
if 'error' in response:
raise ValueError(f"An error occurred: {response}")
else:
if response.text:
st.session_state.tts.speak(response.text)
st.session_state.logger.info(f"Response: {response.text}")
st.session_state.response = response.text
@exception_handler
def get_prompt_from_mic():
prompt = st.session_state.stt.listen_and_convert()
return prompt
@exception_handler
def log_webrtc_context_states(webrtc_ctx):
if webrtc_ctx is not None:
# Log the state of the WebRTC context
st.session_state.logger.info(f"WebRTC context: {webrtc_ctx}")
st.session_state.logger.info(f"Is WebRTC playing: {webrtc_ctx.state.playing}")
st.session_state.logger.info(f"Is audio receiver ready: {webrtc_ctx.audio_receiver}")
st.session_state.logger.info(f"Is video receiver ready: {webrtc_ctx.video_receiver}")
else:
st.error("WebRTC context is None.")
@exception_handler
def capture_image():
st.session_state.logger.info("Attempting to capture image from webcam with ImageCV2...")
# Capture the image from the webcam
web_image = None | web_cam = ImageCV2() | 4 | 2023-12-16 23:24:46+00:00 | 4k |
jaypyles/obsidian-to-bookstack | obsidian_to_bookstack/bookstack/bookstack.py | [
{
"identifier": "console",
"path": "obsidian_to_bookstack/console.py",
"snippet": ""
},
{
"identifier": "con_hash",
"path": "obsidian_to_bookstack/utils.py",
"snippet": "def con_hash(key: str) -> int:\n \"\"\"Get a consistent hash of a key\"\"\"\n hash_obj = hashlib.md5(key.encode(... | import os
import shutil
import urllib3
from datetime import datetime, timedelta
from typing import List
from ..console import console
from ..utils import con_hash
from .artifacts import Book, Chapter, Page, Shelf
from .client import LocalClient, RemoteClient
from .collectors.local import *
from .collectors.remote import *
from .constants import * | 2,469 |
class BookstackClient(RemoteClient):
"""Represents the remote Bookstack instance"""
def __init__(self, verbose: bool) -> None:
# if verbose is set, will issue logs
super().__init__()
self.verbose = verbose
if self.verbose:
console.log("Building remote client...")
self.__set_collectors()
self.__set_artifacts()
self.__set_maps()
def __set_collectors(self):
self.shelf_collector = RemoteShelfCollector(self.verbose, self)
self.book_collector = RemoteBookCollector(self.verbose, self)
self.page_collector = RemotePageCollector(self.verbose, self)
self.chapter_collector = RemoteChapterCollector(self.verbose, self)
def __set_artifacts(self):
self.shelves: List[Shelf] = self.shelf_collector.get_shelves()
self.books: List[Book] = self.book_collector.get_books(self.shelves)
self.pages: List[Page] = self.page_collector.get_pages(self.books)
self.chapters: List[Chapter] = self.chapter_collector.get_chapters(self.books)
def __set_maps(self):
self.shelf_map = self._build_shelf_map()
self.book_map = self._build_book_map()
self.page_map = self._build_page_map()
self.chapter_map = self._build_chapter_map()
def _refresh(self):
"""Simply update the client"""
self.http = urllib3.PoolManager()
self.__set_collectors()
self.__set_artifacts()
self.__set_maps()
def _build_shelf_map(self):
"""Build a map of all client shelves"""
return {con_hash(shelf.name): shelf for shelf in self.shelves}
def _build_book_map(self):
"""Build a map of all client books"""
book_map = {}
for book in self.books:
if book.shelf:
book_map[con_hash(book.name + book.shelf.name)] = book
else:
book_map[con_hash(book.name)] = book
return book_map
def _build_page_map(self):
"""Build a map of all client pages"""
page_map = {}
for page in self.pages:
if page.chapter and page.book:
page_map[
con_hash(page.name + page.book.name + page.chapter.name)
] = page
elif page.book:
page_map[con_hash(page.name + page.book.name)] = page
else:
page_map[con_hash(page.name)] = page
return page_map
def _build_chapter_map(self):
"""Build a map of all client chapters"""
page_map = {}
for chapter in self.chapters:
if chapter.book:
page_map[con_hash(chapter.name + chapter.book.name)] = chapter
return page_map
def _get_temp_book_map(self):
"""Get books from the client, but don't add to the client"""
books = self._get_from_client(BookstackAPIEndpoints.BOOKS)
return {book["name"]: book["id"] for book in books}
def _retrieve_from_client_map(self, obj: Page | Shelf | Book | Chapter):
"""Retrieve the client version of the local object"""
if isinstance(obj, Page):
name = os.path.splitext(obj.name)[0]
if obj.chapter and obj.book:
return self.page_map[con_hash(name + obj.book.name + obj.chapter.name)]
return (
self.page_map[con_hash(name + obj.book.name)]
if obj.book
else self.page_map[con_hash(name)]
)
if isinstance(obj, Book):
return (
self.book_map[con_hash(obj.name + obj.shelf.name)]
if obj.shelf
else self.book_map[con_hash(obj.name)]
)
if isinstance(obj, Shelf):
return self.shelf_map[con_hash(obj.name)]
if isinstance(obj, Chapter):
return self.chapter_map[con_hash(obj.name + obj.book.name)]
|
class BookstackClient(RemoteClient):
"""Represents the remote Bookstack instance"""
def __init__(self, verbose: bool) -> None:
# if verbose is set, will issue logs
super().__init__()
self.verbose = verbose
if self.verbose:
console.log("Building remote client...")
self.__set_collectors()
self.__set_artifacts()
self.__set_maps()
def __set_collectors(self):
self.shelf_collector = RemoteShelfCollector(self.verbose, self)
self.book_collector = RemoteBookCollector(self.verbose, self)
self.page_collector = RemotePageCollector(self.verbose, self)
self.chapter_collector = RemoteChapterCollector(self.verbose, self)
def __set_artifacts(self):
self.shelves: List[Shelf] = self.shelf_collector.get_shelves()
self.books: List[Book] = self.book_collector.get_books(self.shelves)
self.pages: List[Page] = self.page_collector.get_pages(self.books)
self.chapters: List[Chapter] = self.chapter_collector.get_chapters(self.books)
def __set_maps(self):
self.shelf_map = self._build_shelf_map()
self.book_map = self._build_book_map()
self.page_map = self._build_page_map()
self.chapter_map = self._build_chapter_map()
def _refresh(self):
"""Simply update the client"""
self.http = urllib3.PoolManager()
self.__set_collectors()
self.__set_artifacts()
self.__set_maps()
def _build_shelf_map(self):
"""Build a map of all client shelves"""
return {con_hash(shelf.name): shelf for shelf in self.shelves}
def _build_book_map(self):
"""Build a map of all client books"""
book_map = {}
for book in self.books:
if book.shelf:
book_map[con_hash(book.name + book.shelf.name)] = book
else:
book_map[con_hash(book.name)] = book
return book_map
def _build_page_map(self):
"""Build a map of all client pages"""
page_map = {}
for page in self.pages:
if page.chapter and page.book:
page_map[
con_hash(page.name + page.book.name + page.chapter.name)
] = page
elif page.book:
page_map[con_hash(page.name + page.book.name)] = page
else:
page_map[con_hash(page.name)] = page
return page_map
def _build_chapter_map(self):
"""Build a map of all client chapters"""
page_map = {}
for chapter in self.chapters:
if chapter.book:
page_map[con_hash(chapter.name + chapter.book.name)] = chapter
return page_map
def _get_temp_book_map(self):
"""Get books from the client, but don't add to the client"""
books = self._get_from_client(BookstackAPIEndpoints.BOOKS)
return {book["name"]: book["id"] for book in books}
def _retrieve_from_client_map(self, obj: Page | Shelf | Book | Chapter):
"""Retrieve the client version of the local object"""
if isinstance(obj, Page):
name = os.path.splitext(obj.name)[0]
if obj.chapter and obj.book:
return self.page_map[con_hash(name + obj.book.name + obj.chapter.name)]
return (
self.page_map[con_hash(name + obj.book.name)]
if obj.book
else self.page_map[con_hash(name)]
)
if isinstance(obj, Book):
return (
self.book_map[con_hash(obj.name + obj.shelf.name)]
if obj.shelf
else self.book_map[con_hash(obj.name)]
)
if isinstance(obj, Shelf):
return self.shelf_map[con_hash(obj.name)]
if isinstance(obj, Chapter):
return self.chapter_map[con_hash(obj.name + obj.book.name)]
| class Bookstack(LocalClient): | 6 | 2023-12-20 02:22:33+00:00 | 4k |
lipku/metahuman-stream | nerf_triplane/renderer.py | [
{
"identifier": "custom_meshgrid",
"path": "nerf_triplane/utils.py",
"snippet": "def custom_meshgrid(*args):\n # ref: https://pytorch.org/docs/stable/generated/torch.meshgrid.html?highlight=meshgrid#torch.meshgrid\n if pver.parse(torch.__version__) < pver.parse('1.10'):\n return torch.meshg... | import math
import trimesh
import numpy as np
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import raymarching
from .utils import custom_meshgrid, get_audio_features, euler_angles_to_matrix, convert_poses | 2,708 | # color: [N, 3/4]
print('[visualize points]', pc.shape, pc.dtype, pc.min(0), pc.max(0))
pc = trimesh.PointCloud(pc, color)
# axis
axes = trimesh.creation.axis(axis_length=4)
# sphere
sphere = trimesh.creation.icosphere(radius=1)
trimesh.Scene([pc, axes, sphere]).show()
class NeRFRenderer(nn.Module):
def __init__(self, opt):
super().__init__()
self.opt = opt
self.bound = opt.bound
self.cascade = 1 + math.ceil(math.log2(opt.bound))
self.grid_size = 128
self.density_scale = 1
self.min_near = opt.min_near
self.density_thresh = opt.density_thresh
self.density_thresh_torso = opt.density_thresh_torso
self.exp_eye = opt.exp_eye
self.test_train = opt.test_train
self.smooth_lips = opt.smooth_lips
self.torso = opt.torso
self.cuda_ray = opt.cuda_ray
# prepare aabb with a 6D tensor (xmin, ymin, zmin, xmax, ymax, zmax)
# NOTE: aabb (can be rectangular) is only used to generate points, we still rely on bound (always cubic) to calculate density grid and hashing.
aabb_train = torch.FloatTensor([-opt.bound, -opt.bound/2, -opt.bound, opt.bound, opt.bound/2, opt.bound])
aabb_infer = aabb_train.clone()
self.register_buffer('aabb_train', aabb_train)
self.register_buffer('aabb_infer', aabb_infer)
# individual codes
self.individual_num = opt.ind_num
self.individual_dim = opt.ind_dim
if self.individual_dim > 0:
self.individual_codes = nn.Parameter(torch.randn(self.individual_num, self.individual_dim) * 0.1)
if self.torso:
self.individual_dim_torso = opt.ind_dim_torso
if self.individual_dim_torso > 0:
self.individual_codes_torso = nn.Parameter(torch.randn(self.individual_num, self.individual_dim_torso) * 0.1)
# optimize camera pose
self.train_camera = self.opt.train_camera
if self.train_camera:
self.camera_dR = nn.Parameter(torch.zeros(self.individual_num, 3)) # euler angle
self.camera_dT = nn.Parameter(torch.zeros(self.individual_num, 3)) # xyz offset
# extra state for cuda raymarching
# 3D head density grid
density_grid = torch.zeros([self.cascade, self.grid_size ** 3]) # [CAS, H * H * H]
density_bitfield = torch.zeros(self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8]
self.register_buffer('density_grid', density_grid)
self.register_buffer('density_bitfield', density_bitfield)
self.mean_density = 0
self.iter_density = 0
# 2D torso density grid
if self.torso:
density_grid_torso = torch.zeros([self.grid_size ** 2]) # [H * H]
self.register_buffer('density_grid_torso', density_grid_torso)
self.mean_density_torso = 0
# step counter
step_counter = torch.zeros(16, 2, dtype=torch.int32) # 16 is hardcoded for averaging...
self.register_buffer('step_counter', step_counter)
self.mean_count = 0
self.local_step = 0
# decay for enc_a
if self.smooth_lips:
self.enc_a = None
def forward(self, x, d):
raise NotImplementedError()
# separated density and color query (can accelerate non-cuda-ray mode.)
def density(self, x):
raise NotImplementedError()
def color(self, x, d, mask=None, **kwargs):
raise NotImplementedError()
def reset_extra_state(self):
if not self.cuda_ray:
return
# density grid
self.density_grid.zero_()
self.mean_density = 0
self.iter_density = 0
# step counter
self.step_counter.zero_()
self.mean_count = 0
self.local_step = 0
def run_cuda(self, rays_o, rays_d, auds, bg_coords, poses, eye=None, index=0, dt_gamma=0, bg_color=None, perturb=False, force_all_rays=False, max_steps=1024, T_thresh=1e-4, **kwargs):
# rays_o, rays_d: [B, N, 3], assumes B == 1
# auds: [B, 16]
# index: [B]
# return: image: [B, N, 3], depth: [B, N]
prefix = rays_o.shape[:-1]
rays_o = rays_o.contiguous().view(-1, 3)
rays_d = rays_d.contiguous().view(-1, 3)
bg_coords = bg_coords.contiguous().view(-1, 2)
# only add camera offset at training!
if self.train_camera and (self.training or self.test_train):
dT = self.camera_dT[index] # [1, 3]
|
def sample_pdf(bins, weights, n_samples, det=False):
# This implementation is from NeRF
# bins: [B, T], old_z_vals
# weights: [B, T - 1], bin weights.
# return: [B, n_samples], new_z_vals
# Get pdf
weights = weights + 1e-5 # prevent nans
pdf = weights / torch.sum(weights, -1, keepdim=True)
cdf = torch.cumsum(pdf, -1)
cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1)
# Take uniform samples
if det:
u = torch.linspace(0. + 0.5 / n_samples, 1. - 0.5 / n_samples, steps=n_samples).to(weights.device)
u = u.expand(list(cdf.shape[:-1]) + [n_samples])
else:
u = torch.rand(list(cdf.shape[:-1]) + [n_samples]).to(weights.device)
# Invert CDF
u = u.contiguous()
inds = torch.searchsorted(cdf, u, right=True)
below = torch.max(torch.zeros_like(inds - 1), inds - 1)
above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds)
inds_g = torch.stack([below, above], -1) # (B, n_samples, 2)
matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]]
cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g)
bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g)
denom = (cdf_g[..., 1] - cdf_g[..., 0])
denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom)
t = (u - cdf_g[..., 0]) / denom
samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0])
return samples
def plot_pointcloud(pc, color=None):
# pc: [N, 3]
# color: [N, 3/4]
print('[visualize points]', pc.shape, pc.dtype, pc.min(0), pc.max(0))
pc = trimesh.PointCloud(pc, color)
# axis
axes = trimesh.creation.axis(axis_length=4)
# sphere
sphere = trimesh.creation.icosphere(radius=1)
trimesh.Scene([pc, axes, sphere]).show()
class NeRFRenderer(nn.Module):
def __init__(self, opt):
super().__init__()
self.opt = opt
self.bound = opt.bound
self.cascade = 1 + math.ceil(math.log2(opt.bound))
self.grid_size = 128
self.density_scale = 1
self.min_near = opt.min_near
self.density_thresh = opt.density_thresh
self.density_thresh_torso = opt.density_thresh_torso
self.exp_eye = opt.exp_eye
self.test_train = opt.test_train
self.smooth_lips = opt.smooth_lips
self.torso = opt.torso
self.cuda_ray = opt.cuda_ray
# prepare aabb with a 6D tensor (xmin, ymin, zmin, xmax, ymax, zmax)
# NOTE: aabb (can be rectangular) is only used to generate points, we still rely on bound (always cubic) to calculate density grid and hashing.
aabb_train = torch.FloatTensor([-opt.bound, -opt.bound/2, -opt.bound, opt.bound, opt.bound/2, opt.bound])
aabb_infer = aabb_train.clone()
self.register_buffer('aabb_train', aabb_train)
self.register_buffer('aabb_infer', aabb_infer)
# individual codes
self.individual_num = opt.ind_num
self.individual_dim = opt.ind_dim
if self.individual_dim > 0:
self.individual_codes = nn.Parameter(torch.randn(self.individual_num, self.individual_dim) * 0.1)
if self.torso:
self.individual_dim_torso = opt.ind_dim_torso
if self.individual_dim_torso > 0:
self.individual_codes_torso = nn.Parameter(torch.randn(self.individual_num, self.individual_dim_torso) * 0.1)
# optimize camera pose
self.train_camera = self.opt.train_camera
if self.train_camera:
self.camera_dR = nn.Parameter(torch.zeros(self.individual_num, 3)) # euler angle
self.camera_dT = nn.Parameter(torch.zeros(self.individual_num, 3)) # xyz offset
# extra state for cuda raymarching
# 3D head density grid
density_grid = torch.zeros([self.cascade, self.grid_size ** 3]) # [CAS, H * H * H]
density_bitfield = torch.zeros(self.cascade * self.grid_size ** 3 // 8, dtype=torch.uint8) # [CAS * H * H * H // 8]
self.register_buffer('density_grid', density_grid)
self.register_buffer('density_bitfield', density_bitfield)
self.mean_density = 0
self.iter_density = 0
# 2D torso density grid
if self.torso:
density_grid_torso = torch.zeros([self.grid_size ** 2]) # [H * H]
self.register_buffer('density_grid_torso', density_grid_torso)
self.mean_density_torso = 0
# step counter
step_counter = torch.zeros(16, 2, dtype=torch.int32) # 16 is hardcoded for averaging...
self.register_buffer('step_counter', step_counter)
self.mean_count = 0
self.local_step = 0
# decay for enc_a
if self.smooth_lips:
self.enc_a = None
def forward(self, x, d):
raise NotImplementedError()
# separated density and color query (can accelerate non-cuda-ray mode.)
def density(self, x):
raise NotImplementedError()
def color(self, x, d, mask=None, **kwargs):
raise NotImplementedError()
def reset_extra_state(self):
if not self.cuda_ray:
return
# density grid
self.density_grid.zero_()
self.mean_density = 0
self.iter_density = 0
# step counter
self.step_counter.zero_()
self.mean_count = 0
self.local_step = 0
def run_cuda(self, rays_o, rays_d, auds, bg_coords, poses, eye=None, index=0, dt_gamma=0, bg_color=None, perturb=False, force_all_rays=False, max_steps=1024, T_thresh=1e-4, **kwargs):
# rays_o, rays_d: [B, N, 3], assumes B == 1
# auds: [B, 16]
# index: [B]
# return: image: [B, N, 3], depth: [B, N]
prefix = rays_o.shape[:-1]
rays_o = rays_o.contiguous().view(-1, 3)
rays_d = rays_d.contiguous().view(-1, 3)
bg_coords = bg_coords.contiguous().view(-1, 2)
# only add camera offset at training!
if self.train_camera and (self.training or self.test_train):
dT = self.camera_dT[index] # [1, 3] | dR = euler_angles_to_matrix(self.camera_dR[index] / 180 * np.pi + 1e-8).squeeze(0) # [1, 3] --> [3, 3] | 2 | 2023-12-19 01:32:46+00:00 | 4k |
MingtaoGuo/AnimateAnyone_unofficial | aldm/ddim_hacked.py | [
{
"identifier": "make_ddim_sampling_parameters",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):\n # select alphas for computing the variance schedule\n alphas = alphacums[ddim_timesteps]\n alphas_prev ... | import torch
import numpy as np
from tqdm import tqdm
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor | 3,145 | mask=mask, x0=x0,
ddim_use_original_steps=False,
noise_dropout=noise_dropout,
temperature=temperature,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
x_T=x_T,
log_every_t=log_every_t,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
dynamic_threshold=dynamic_threshold,
ucg_schedule=ucg_schedule
)
return samples, intermediates
@torch.no_grad()
def ddim_sampling(self, cond, shape,
x_T=None, ddim_use_original_steps=False,
callback=None, timesteps=None, quantize_denoised=False,
mask=None, x0=None, img_callback=None, log_every_t=100,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,
ucg_schedule=None):
device = self.model.betas.device
b = shape[0]
if x_T is None:
img = torch.randn(shape, device=device)
else:
img = x_T
if timesteps is None:
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
elif timesteps is not None and not ddim_use_original_steps:
subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
timesteps = self.ddim_timesteps[:subset_end]
intermediates = {'x_inter': [img], 'pred_x0': [img]}
time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
print(f"Running DDIM Sampling with {total_steps} timesteps")
iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((b,), step, device=device, dtype=torch.long)
if mask is not None:
assert x0 is not None
img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
img = img_orig * mask + (1. - mask) * img
if ucg_schedule is not None:
assert len(ucg_schedule) == len(time_range)
unconditional_guidance_scale = ucg_schedule[i]
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
quantize_denoised=quantize_denoised, temperature=temperature,
noise_dropout=noise_dropout, score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
dynamic_threshold=dynamic_threshold)
img, pred_x0 = outs
if callback: callback(i)
if img_callback: img_callback(pred_x0, i)
if index % log_every_t == 0 or index == total_steps - 1:
intermediates['x_inter'].append(img)
intermediates['pred_x0'].append(pred_x0)
return img, intermediates
@torch.no_grad()
def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None,
dynamic_threshold=None):
b, *_, device = *x.shape, x.device
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
model_output = self.model.apply_model(x, t, c)
else:
model_t = self.model.apply_model(x, t, c)
model_uncond = self.model.apply_model(x, t, unconditional_conditioning)
model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)
if self.model.parameterization == "v":
e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)
else:
e_t = model_output
if score_corrector is not None:
assert self.model.parameterization == "eps", 'not implemented'
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
# current prediction for x_0
if self.model.parameterization != "v":
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
else:
pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)
if quantize_denoised:
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
if dynamic_threshold is not None:
raise NotImplementedError()
# direction pointing to x_t
dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
| """SAMPLING ONLY."""
class DDIMSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
alphas_cumprod = self.model.alphas_cumprod
assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
self.register_buffer('betas', to_torch(self.model.betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
# ddim sampling parameters
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
ddim_timesteps=self.ddim_timesteps,
eta=ddim_eta,verbose=verbose)
self.register_buffer('ddim_sigmas', ddim_sigmas)
self.register_buffer('ddim_alphas', ddim_alphas)
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
(1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
1 - self.alphas_cumprod / self.alphas_cumprod_prev))
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
@torch.no_grad()
def sample(self,
S,
batch_size,
shape,
conditioning=None,
callback=None,
normals_sequence=None,
img_callback=None,
quantize_x0=False,
eta=0.,
mask=None,
x0=None,
temperature=1.,
noise_dropout=0.,
score_corrector=None,
corrector_kwargs=None,
verbose=True,
x_T=None,
log_every_t=100,
unconditional_guidance_scale=1.,
unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
dynamic_threshold=None,
ucg_schedule=None,
**kwargs
):
if conditioning is not None:
if isinstance(conditioning, dict):
ctmp = conditioning[list(conditioning.keys())[0]]
while isinstance(ctmp, list): ctmp = ctmp[0]
cbs = ctmp.shape[0]
if cbs != batch_size:
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
elif isinstance(conditioning, list):
for ctmp in conditioning:
if ctmp.shape[0] != batch_size:
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
else:
if conditioning.shape[0] != batch_size:
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
# sampling
C, H, W = shape
size = (batch_size, C, H, W)
print(f'Data shape for DDIM sampling is {size}, eta {eta}')
samples, intermediates = self.ddim_sampling(conditioning, size,
callback=callback,
img_callback=img_callback,
quantize_denoised=quantize_x0,
mask=mask, x0=x0,
ddim_use_original_steps=False,
noise_dropout=noise_dropout,
temperature=temperature,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
x_T=x_T,
log_every_t=log_every_t,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
dynamic_threshold=dynamic_threshold,
ucg_schedule=ucg_schedule
)
return samples, intermediates
@torch.no_grad()
def ddim_sampling(self, cond, shape,
x_T=None, ddim_use_original_steps=False,
callback=None, timesteps=None, quantize_denoised=False,
mask=None, x0=None, img_callback=None, log_every_t=100,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,
ucg_schedule=None):
device = self.model.betas.device
b = shape[0]
if x_T is None:
img = torch.randn(shape, device=device)
else:
img = x_T
if timesteps is None:
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
elif timesteps is not None and not ddim_use_original_steps:
subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
timesteps = self.ddim_timesteps[:subset_end]
intermediates = {'x_inter': [img], 'pred_x0': [img]}
time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
print(f"Running DDIM Sampling with {total_steps} timesteps")
iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((b,), step, device=device, dtype=torch.long)
if mask is not None:
assert x0 is not None
img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
img = img_orig * mask + (1. - mask) * img
if ucg_schedule is not None:
assert len(ucg_schedule) == len(time_range)
unconditional_guidance_scale = ucg_schedule[i]
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
quantize_denoised=quantize_denoised, temperature=temperature,
noise_dropout=noise_dropout, score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
dynamic_threshold=dynamic_threshold)
img, pred_x0 = outs
if callback: callback(i)
if img_callback: img_callback(pred_x0, i)
if index % log_every_t == 0 or index == total_steps - 1:
intermediates['x_inter'].append(img)
intermediates['pred_x0'].append(pred_x0)
return img, intermediates
@torch.no_grad()
def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None,
dynamic_threshold=None):
b, *_, device = *x.shape, x.device
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
model_output = self.model.apply_model(x, t, c)
else:
model_t = self.model.apply_model(x, t, c)
model_uncond = self.model.apply_model(x, t, unconditional_conditioning)
model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)
if self.model.parameterization == "v":
e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)
else:
e_t = model_output
if score_corrector is not None:
assert self.model.parameterization == "eps", 'not implemented'
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
# current prediction for x_0
if self.model.parameterization != "v":
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
else:
pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)
if quantize_denoised:
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
if dynamic_threshold is not None:
raise NotImplementedError()
# direction pointing to x_t
dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t | noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature | 2 | 2023-12-16 03:31:33+00:00 | 4k |
yasserben/CLOUDS | clouds/modeling/pixel_decoder/msdeformattn.py | [
{
"identifier": "PositionEmbeddingSine",
"path": "clouds/modeling/transformer_decoder/position_encoding.py",
"snippet": "class PositionEmbeddingSine(nn.Module):\n \"\"\"\n This is a more standard version of the position embedding, very similar to the one\n used by the Attention is all you need ... | import logging
import numpy as np
import fvcore.nn.weight_init as weight_init
import torch
import copy
from typing import Callable, Dict, List, Optional, Tuple, Union
from torch import nn
from torch.nn import functional as F
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
from torch.cuda.amp import autocast
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer_decoder.position_encoding import PositionEmbeddingSine
from .ops.modules import MSDeformAttn | 3,030 | """
Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved.
Licensed under the Apache License, Version 2.0
Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/modeling/pixel_decoder/msdeformattn.py
"""
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
def build_pixel_decoder(cfg, input_shape):
"""
Build a pixel decoder from `cfg.MODEL.ONE_FORMER.PIXEL_DECODER_NAME`.
"""
name = cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME
model = SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape)
forward_features = getattr(model, "forward_features", None)
if not callable(forward_features):
raise ValueError(
"Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. "
f"Please implement forward_features for {name} to only return mask features."
)
return model
# MSDeformAttn Transformer encoder in deformable detr
class MSDeformAttnTransformerEncoderOnly(nn.Module):
def __init__(
self,
d_model=256,
nhead=8,
num_encoder_layers=6,
dim_feedforward=1024,
dropout=0.1,
activation="relu",
num_feature_levels=4,
enc_n_points=4,
):
super().__init__()
self.d_model = d_model
self.nhead = nhead
encoder_layer = MSDeformAttnTransformerEncoderLayer(
d_model,
dim_feedforward,
dropout,
activation,
num_feature_levels,
nhead,
enc_n_points,
)
self.encoder = MSDeformAttnTransformerEncoder(encoder_layer, num_encoder_layers)
self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
| """
Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved.
Licensed under the Apache License, Version 2.0
Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/modeling/pixel_decoder/msdeformattn.py
"""
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
def build_pixel_decoder(cfg, input_shape):
"""
Build a pixel decoder from `cfg.MODEL.ONE_FORMER.PIXEL_DECODER_NAME`.
"""
name = cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME
model = SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape)
forward_features = getattr(model, "forward_features", None)
if not callable(forward_features):
raise ValueError(
"Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. "
f"Please implement forward_features for {name} to only return mask features."
)
return model
# MSDeformAttn Transformer encoder in deformable detr
class MSDeformAttnTransformerEncoderOnly(nn.Module):
def __init__(
self,
d_model=256,
nhead=8,
num_encoder_layers=6,
dim_feedforward=1024,
dropout=0.1,
activation="relu",
num_feature_levels=4,
enc_n_points=4,
):
super().__init__()
self.d_model = d_model
self.nhead = nhead
encoder_layer = MSDeformAttnTransformerEncoderLayer(
d_model,
dim_feedforward,
dropout,
activation,
num_feature_levels,
nhead,
enc_n_points,
)
self.encoder = MSDeformAttnTransformerEncoder(encoder_layer, num_encoder_layers)
self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules(): | if isinstance(m, MSDeformAttn): | 1 | 2023-12-15 15:40:58+00:00 | 4k |
ASK-03/Reverse-Chain | main.py | [
{
"identifier": "FinalAPISelector",
"path": "modules.py",
"snippet": "class FinalAPISelector(ReverseChainBaseClass):\n def __init__(self, model: str, temperature: float) -> None:\n super(FinalAPISelector, self).__init__(model, temperature)\n self.template = \"\"\"\n We have below... | from modules import FinalAPISelector, ArgumentExtractor, SubAPISelector
from retriever import VectorDataBase
from executor import Executor
from result_formatter import ResultFormatter
from configparser import ConfigParser
from collections import deque
import os
import json
import logging
import warnings | 2,223 |
warnings.filterwarnings("ignore")
config = ConfigParser()
config.read("config.ini")
DATA_PATH = config["faiss"]["data"]
OPENAI_SECRET_KEY = config["openai"]["secret_key"]
MODEL = config["openai"]["model"]
TEMPERATURE = float(config["openai"]["temperature"])
QUERY = config["query"]["query"]
os.environ["OPENAI_API_KEY"] = OPENAI_SECRET_KEY
if __name__ == "__main__":
|
warnings.filterwarnings("ignore")
config = ConfigParser()
config.read("config.ini")
DATA_PATH = config["faiss"]["data"]
OPENAI_SECRET_KEY = config["openai"]["secret_key"]
MODEL = config["openai"]["model"]
TEMPERATURE = float(config["openai"]["temperature"])
QUERY = config["query"]["query"]
os.environ["OPENAI_API_KEY"] = OPENAI_SECRET_KEY
if __name__ == "__main__": | vector_db = VectorDataBase() | 3 | 2023-12-15 19:19:01+00:00 | 4k |
linyq2117/TagCLIP | classify.py | [
{
"identifier": "scoremap2bbox",
"path": "utils.py",
"snippet": "def scoremap2bbox(scoremap, threshold, multi_contour_eval=False):\n height, width = scoremap.shape\n scoremap_image = np.expand_dims((scoremap * 255).astype(np.uint8), 2)\n _, thr_gray_heatmap = cv2.threshold(\n src=scorema... | import clip
import torch
import cv2
import numpy as np
import pickle
import os
import math
import torch.nn.functional as F
import os
import argparse
import warnings
from PIL import Image
from tqdm import tqdm
from lxml import etree
from utils import scoremap2bbox, parse_xml_to_dict, _convert_image_to_rgb, compute_AP, compute_F1, _transform_resize
from clip_text import class_names_voc, BACKGROUND_CATEGORY_VOC, class_names_coco, BACKGROUND_CATEGORY_COCO, class_names_coco_stuff182_dict, coco_stuff_182_to_27 | 3,150 | candidate_cls_list = []
logits_refined = logits.clone()
logits_max = torch.max(logits, dim=0)[0]
for tempid,tempv in enumerate(logits_max):
if tempv > 0:
candidate_cls_list.append(tempid)
for ccls in candidate_cls_list:
temp_logits = logits[:,ccls]
temp_logits = temp_logits - temp_logits.min()
temp_logits = temp_logits / temp_logits.max()
mask = temp_logits
mask = mask.reshape(h // patch_size, w // patch_size)
box, cnt = scoremap2bbox(mask.detach().cpu().numpy(), threshold=temp_logits.mean(), multi_contour_eval=True)
aff_mask = torch.zeros((mask.shape[0],mask.shape[1])).to(device)
for i_ in range(cnt):
x0_, y0_, x1_, y1_ = box[i_]
aff_mask[y0_:y1_, x0_:x1_] = 1
aff_mask = aff_mask.view(1,mask.shape[0] * mask.shape[1])
trans_mat = attn_weight * aff_mask
logits_refined_ccls = torch.matmul(trans_mat, logits_coarse[:,ccls:ccls+1])
logits_refined[:, ccls] = logits_refined_ccls.squeeze()
return logits_refined
def cwr(logits, logits_max, h, w, image, text_features):
patch_size = 16
input_size = 224
stride = input_size // patch_size
candidate_cls_list = []
ma = logits.max()
mi = logits.min()
step = ma - mi
if args.dataset == 'cocostuff':
thres_abs = 0.1
else:
thres_abs = 0.5
thres = mi + thres_abs*step
for tempid,tempv in enumerate(logits_max):
if tempv > thres:
candidate_cls_list.append(tempid)
for ccls in candidate_cls_list:
temp_logits = logits[:,ccls]
temp_logits = temp_logits - temp_logits.min()
temp_logits = temp_logits / temp_logits.max()
mask = temp_logits > 0.5
mask = mask.reshape(h // patch_size, w // patch_size)
horizontal_indicies = np.where(np.any(mask.cpu().numpy(), axis=0))[0]
vertical_indicies = np.where(np.any(mask.cpu().numpy(), axis=1))[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
x2 += 1
y2 += 1
else:
x1, x2, y1, y2 = 0, 0, 0, 0
y1 = max(y1, 0)
x1 = max(x1, 0)
y2 = min(y2, mask.shape[-2] - 1)
x2 = min(x2, mask.shape[-1] - 1)
if x1 == x2 or y1 == y2:
return logits_max
mask = mask[y1:y2, x1:x2]
mask = mask.float()
mask = mask[None, None, :, :]
mask = F.interpolate(mask, size=(stride, stride), mode="nearest")
mask = mask.squeeze()
mask = mask.reshape(-1).bool()
image_cut = image[:, :, int(y1*patch_size):int(y2*patch_size), int(x1*patch_size):int(x2*patch_size)]
image_cut = F.interpolate(image_cut, size=(input_size, input_size), mode="bilinear", align_corners=False)
cls_attn = 1 - torch.ones((stride*stride+1, stride*stride+1))
for j in range(1, cls_attn.shape[1]):
if not mask[j - 1]:
cls_attn[0, j] = -1000
image_features = model.encode_image_tagclip(image_cut, input_size, input_size, attn_mask=cls_attn)[0]
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
logit_scale = model.logit_scale.exp()
cur_logits = logit_scale * image_features @ text_features.t()
cur_logits = cur_logits[:, 0, :]
cur_logits = cur_logits.softmax(dim=-1).squeeze()
cur_logits_norm = cur_logits[ccls]
logits_max[ccls] = 0.5 * logits_max[ccls] + (1 - 0.5) * cur_logits_norm
return logits_max
def classify():
pred_label_id = []
gt_label_id = []
with torch.no_grad():
text_features = clip.encode_text_with_prompt_ensemble(model, class_names, device)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
for im_idx, im in enumerate(tqdm(image_list)):
image_path = os.path.join(args.img_root, im)
label_id_list = all_label_list[im_idx]
label_id_list = [int(lid) for lid in label_id_list]
if args.dataset == 'cocostuff':
label_id_list = [coco_stuff_182_to_171[int(lid)] for lid in label_id_list]
gt_label_id.append(label_id_list)
pil_img = Image.open(image_path)
array_img = np.array(pil_img)
ori_height, ori_width = array_img.shape[:2]
if len(array_img.shape) == 2:
array_img = np.stack([array_img, array_img, array_img], axis=2)
pil_img = Image.fromarray(np.uint8(array_img))
if model_type == 'clip':
patch_size = 16
|
warnings.filterwarnings("ignore")
def mask_attn(logits_coarse, logits, h, w, attn_weight):
patch_size = 16
candidate_cls_list = []
logits_refined = logits.clone()
logits_max = torch.max(logits, dim=0)[0]
for tempid,tempv in enumerate(logits_max):
if tempv > 0:
candidate_cls_list.append(tempid)
for ccls in candidate_cls_list:
temp_logits = logits[:,ccls]
temp_logits = temp_logits - temp_logits.min()
temp_logits = temp_logits / temp_logits.max()
mask = temp_logits
mask = mask.reshape(h // patch_size, w // patch_size)
box, cnt = scoremap2bbox(mask.detach().cpu().numpy(), threshold=temp_logits.mean(), multi_contour_eval=True)
aff_mask = torch.zeros((mask.shape[0],mask.shape[1])).to(device)
for i_ in range(cnt):
x0_, y0_, x1_, y1_ = box[i_]
aff_mask[y0_:y1_, x0_:x1_] = 1
aff_mask = aff_mask.view(1,mask.shape[0] * mask.shape[1])
trans_mat = attn_weight * aff_mask
logits_refined_ccls = torch.matmul(trans_mat, logits_coarse[:,ccls:ccls+1])
logits_refined[:, ccls] = logits_refined_ccls.squeeze()
return logits_refined
def cwr(logits, logits_max, h, w, image, text_features):
patch_size = 16
input_size = 224
stride = input_size // patch_size
candidate_cls_list = []
ma = logits.max()
mi = logits.min()
step = ma - mi
if args.dataset == 'cocostuff':
thres_abs = 0.1
else:
thres_abs = 0.5
thres = mi + thres_abs*step
for tempid,tempv in enumerate(logits_max):
if tempv > thres:
candidate_cls_list.append(tempid)
for ccls in candidate_cls_list:
temp_logits = logits[:,ccls]
temp_logits = temp_logits - temp_logits.min()
temp_logits = temp_logits / temp_logits.max()
mask = temp_logits > 0.5
mask = mask.reshape(h // patch_size, w // patch_size)
horizontal_indicies = np.where(np.any(mask.cpu().numpy(), axis=0))[0]
vertical_indicies = np.where(np.any(mask.cpu().numpy(), axis=1))[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
x2 += 1
y2 += 1
else:
x1, x2, y1, y2 = 0, 0, 0, 0
y1 = max(y1, 0)
x1 = max(x1, 0)
y2 = min(y2, mask.shape[-2] - 1)
x2 = min(x2, mask.shape[-1] - 1)
if x1 == x2 or y1 == y2:
return logits_max
mask = mask[y1:y2, x1:x2]
mask = mask.float()
mask = mask[None, None, :, :]
mask = F.interpolate(mask, size=(stride, stride), mode="nearest")
mask = mask.squeeze()
mask = mask.reshape(-1).bool()
image_cut = image[:, :, int(y1*patch_size):int(y2*patch_size), int(x1*patch_size):int(x2*patch_size)]
image_cut = F.interpolate(image_cut, size=(input_size, input_size), mode="bilinear", align_corners=False)
cls_attn = 1 - torch.ones((stride*stride+1, stride*stride+1))
for j in range(1, cls_attn.shape[1]):
if not mask[j - 1]:
cls_attn[0, j] = -1000
image_features = model.encode_image_tagclip(image_cut, input_size, input_size, attn_mask=cls_attn)[0]
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
logit_scale = model.logit_scale.exp()
cur_logits = logit_scale * image_features @ text_features.t()
cur_logits = cur_logits[:, 0, :]
cur_logits = cur_logits.softmax(dim=-1).squeeze()
cur_logits_norm = cur_logits[ccls]
logits_max[ccls] = 0.5 * logits_max[ccls] + (1 - 0.5) * cur_logits_norm
return logits_max
def classify():
pred_label_id = []
gt_label_id = []
with torch.no_grad():
text_features = clip.encode_text_with_prompt_ensemble(model, class_names, device)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
for im_idx, im in enumerate(tqdm(image_list)):
image_path = os.path.join(args.img_root, im)
label_id_list = all_label_list[im_idx]
label_id_list = [int(lid) for lid in label_id_list]
if args.dataset == 'cocostuff':
label_id_list = [coco_stuff_182_to_171[int(lid)] for lid in label_id_list]
gt_label_id.append(label_id_list)
pil_img = Image.open(image_path)
array_img = np.array(pil_img)
ori_height, ori_width = array_img.shape[:2]
if len(array_img.shape) == 2:
array_img = np.stack([array_img, array_img, array_img], axis=2)
pil_img = Image.fromarray(np.uint8(array_img))
if model_type == 'clip':
patch_size = 16 | preprocess = _transform_resize(int(np.ceil(int(ori_height) / patch_size) * patch_size), int(np.ceil(int(ori_width) / patch_size) * patch_size)) | 5 | 2023-12-21 03:20:47+00:00 | 4k |
cypypccpy/dynamic_handover | dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/backbones/mlp.py | [
{
"identifier": "BACKBONES",
"path": "dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/builder.py",
"snippet": "BACKBONES = Registry('backbone')"
},
{
"identifier": "ConvModule",
"path": "dexteroushandenvs/algorithms/utils/mani_skill_learn/networks/modules/conv_module.py",
"s... | import torch.nn as nn
from torch.nn.modules.batchnorm import _BatchNorm
from algorithms.utils.mani_skill_learn.utils.meta import get_root_logger
from algorithms.utils.mani_skill_learn.utils.torch import load_checkpoint
from ..builder import BACKBONES
from ..modules import ConvModule, build_init
from ..modules import build_activation_layer, build_norm_layer | 3,334 |
@BACKBONES.register_module()
class LinearMLP(nn.Module):
def __init__(self, mlp_spec, norm_cfg=dict(type='BN1d'), bias='auto', inactivated_output=True,
pretrained=None, linear_init_cfg=None, norm_init_cfg=None):
super(LinearMLP, self).__init__()
self.mlp = nn.Sequential()
for i in range(len(mlp_spec) - 1):
if i == len(mlp_spec) - 2 and inactivated_output:
act_cfg = None
norm_cfg = None
else:
act_cfg = dict(type='ReLU')
bias_i = norm_cfg is None if bias == 'auto' else bias
# print(mlp_spec[i], mlp_spec[i + 1], bias_i)
self.mlp.add_module(f'linear{i}', nn.Linear(mlp_spec[i], mlp_spec[i + 1], bias=bias_i))
if norm_cfg:
self.mlp.add_module(f'norm{i}', build_norm_layer(norm_cfg, mlp_spec[i + 1])[1])
if act_cfg:
self.mlp.add_module(f'act{i}', build_activation_layer(act_cfg))
self.init_weights(pretrained, linear_init_cfg, norm_init_cfg)
def forward(self, input):
input = input
return self.mlp(input)
def init_weights(self, pretrained=None, linear_init_cfg=None, norm_init_cfg=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
linear_init = build_init(linear_init_cfg) if linear_init_cfg else None
norm_init = build_init(norm_init_cfg) if norm_init_cfg else None
for m in self.modules():
if isinstance(m, nn.Linear) and linear_init:
linear_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)) and norm_init:
norm_init(m)
else:
raise TypeError('pretrained must be a str or None')
@BACKBONES.register_module()
class ConvMLP(nn.Module):
def __init__(self, mlp_spec, norm_cfg=dict(type='BN1d'), bias='auto', inactivated_output=True,
pretrained=None, conv_init_cfg=None, norm_init_cfg=None):
super(ConvMLP, self).__init__()
self.mlp = nn.Sequential()
for i in range(len(mlp_spec) - 1):
if i == len(mlp_spec) - 2 and inactivated_output:
act_cfg = None
else:
act_cfg = dict(type='ReLU')
self.mlp.add_module(
f'layer{i}',
|
@BACKBONES.register_module()
class LinearMLP(nn.Module):
def __init__(self, mlp_spec, norm_cfg=dict(type='BN1d'), bias='auto', inactivated_output=True,
pretrained=None, linear_init_cfg=None, norm_init_cfg=None):
super(LinearMLP, self).__init__()
self.mlp = nn.Sequential()
for i in range(len(mlp_spec) - 1):
if i == len(mlp_spec) - 2 and inactivated_output:
act_cfg = None
norm_cfg = None
else:
act_cfg = dict(type='ReLU')
bias_i = norm_cfg is None if bias == 'auto' else bias
# print(mlp_spec[i], mlp_spec[i + 1], bias_i)
self.mlp.add_module(f'linear{i}', nn.Linear(mlp_spec[i], mlp_spec[i + 1], bias=bias_i))
if norm_cfg:
self.mlp.add_module(f'norm{i}', build_norm_layer(norm_cfg, mlp_spec[i + 1])[1])
if act_cfg:
self.mlp.add_module(f'act{i}', build_activation_layer(act_cfg))
self.init_weights(pretrained, linear_init_cfg, norm_init_cfg)
def forward(self, input):
input = input
return self.mlp(input)
def init_weights(self, pretrained=None, linear_init_cfg=None, norm_init_cfg=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
linear_init = build_init(linear_init_cfg) if linear_init_cfg else None
norm_init = build_init(norm_init_cfg) if norm_init_cfg else None
for m in self.modules():
if isinstance(m, nn.Linear) and linear_init:
linear_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)) and norm_init:
norm_init(m)
else:
raise TypeError('pretrained must be a str or None')
@BACKBONES.register_module()
class ConvMLP(nn.Module):
def __init__(self, mlp_spec, norm_cfg=dict(type='BN1d'), bias='auto', inactivated_output=True,
pretrained=None, conv_init_cfg=None, norm_init_cfg=None):
super(ConvMLP, self).__init__()
self.mlp = nn.Sequential()
for i in range(len(mlp_spec) - 1):
if i == len(mlp_spec) - 2 and inactivated_output:
act_cfg = None
else:
act_cfg = dict(type='ReLU')
self.mlp.add_module(
f'layer{i}', | ConvModule( | 1 | 2023-12-16 16:49:38+00:00 | 4k |
video-db/videodb-python | videodb/video.py | [
{
"identifier": "play_stream",
"path": "videodb/_utils/_video.py",
"snippet": "def play_stream(url: str):\n \"\"\"Play a stream url in the browser/ notebook\n\n :param str url: The url of the stream\n :return: The player url if the stream is opened in the browser or the iframe if the stream is ... | from typing import Optional
from videodb._utils._video import play_stream
from videodb._constants import (
ApiPath,
SearchType,
IndexType,
Workflows,
)
from videodb.search import SearchFactory, SearchResult
from videodb.shot import Shot | 1,948 |
class Video:
def __init__(self, _connection, id: str, collection_id: str, **kwargs) -> None:
self._connection = _connection
self.id = id
self.collection_id = collection_id
self.stream_url = kwargs.get("stream_url", None)
self.player_url = kwargs.get("player_url", None)
self.name = kwargs.get("name", None)
self.description = kwargs.get("description", None)
self.thumbnail_url = kwargs.get("thumbnail_url", None)
self.length = float(kwargs.get("length", 0.0))
self.transcript = kwargs.get("transcript", None)
self.transcript_text = kwargs.get("transcript_text", None)
def __repr__(self) -> str:
return (
f"Video("
f"id={self.id}, "
f"collection_id={self.collection_id}, "
f"stream_url={self.stream_url}, "
f"player_url={self.player_url}, "
f"name={self.name}, "
f"description={self.description}, "
f"thumbnail_url={self.thumbnail_url}, "
f"length={self.length})"
)
def __getitem__(self, key):
return self.__dict__[key]
def search(
self,
query: str,
search_type: Optional[str] = SearchType.semantic,
result_threshold: Optional[int] = None,
score_threshold: Optional[int] = None,
dynamic_score_percentage: Optional[int] = None,
) -> SearchResult:
|
class Video:
def __init__(self, _connection, id: str, collection_id: str, **kwargs) -> None:
self._connection = _connection
self.id = id
self.collection_id = collection_id
self.stream_url = kwargs.get("stream_url", None)
self.player_url = kwargs.get("player_url", None)
self.name = kwargs.get("name", None)
self.description = kwargs.get("description", None)
self.thumbnail_url = kwargs.get("thumbnail_url", None)
self.length = float(kwargs.get("length", 0.0))
self.transcript = kwargs.get("transcript", None)
self.transcript_text = kwargs.get("transcript_text", None)
def __repr__(self) -> str:
return (
f"Video("
f"id={self.id}, "
f"collection_id={self.collection_id}, "
f"stream_url={self.stream_url}, "
f"player_url={self.player_url}, "
f"name={self.name}, "
f"description={self.description}, "
f"thumbnail_url={self.thumbnail_url}, "
f"length={self.length})"
)
def __getitem__(self, key):
return self.__dict__[key]
def search(
self,
query: str,
search_type: Optional[str] = SearchType.semantic,
result_threshold: Optional[int] = None,
score_threshold: Optional[int] = None,
dynamic_score_percentage: Optional[int] = None,
) -> SearchResult: | search = SearchFactory(self._connection).get_search(search_type) | 5 | 2023-12-18 15:20:04+00:00 | 4k |
IDEA-CCNL/Real-Gemini | test/test_tool.py | [
{
"identifier": "Text2MusicTool",
"path": "real_gemini/tools/music_tool.py",
"snippet": "class Text2MusicTool(object):\n _name_ = \"Text2Music\"\n _description_ = \"这个工具是从文本生成音乐的调用接口,它可以根据一段文字,生成符合这段文字内容的音乐风格。本工具的输入是一段文本指令。This tool is an API that generates music from text. It can create music tha... | import os
import sys
import json
import argparse
from real_gemini.tools.music_tool import Text2MusicTool
from real_gemini.tools.image_generation_tool import TaiyiGeneralTool
from real_gemini.tools.weather_tool import WeatherTool
from real_gemini.tools.tts_tool import TTSTool
from dotenv import load_dotenv | 2,979 | #encoding=utf8
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
TOOL_DICT = {
"music": Text2MusicTool,
"image": TaiyiGeneralTool,
"weather": WeatherTool,
| #encoding=utf8
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
TOOL_DICT = {
"music": Text2MusicTool,
"image": TaiyiGeneralTool,
"weather": WeatherTool, | "tts": TTSTool, | 3 | 2023-12-15 04:09:37+00:00 | 4k |
aiim-research/GRETEL | src/explainer/ensemble/aggregators/top_select.py | [
{
"identifier": "Explainer",
"path": "src/core/explainer_base.py",
"snippet": "class Explainer(Configurable, metaclass=ABCMeta):\n \n def __init__(self, context: Context, local_config):\n self.dataset = retake_dataset(local_config)\n self.oracle = retake_oracle(local_config)\n ... | import copy
import sys
import numpy as np
from abc import ABC
from typing import List
from src.core.explainer_base import Explainer
from src.dataset.instances.graph import GraphInstance
from src.explainer.ensemble.aggregators.base import ExplanationAggregator
from src.evaluation.evaluation_metric_ged import GraphEditDistanceMetric
from src.core.factory_base import get_instance_kvargs
from src.utils.cfg_utils import get_dflts_to_of, init_dflts_to_of, inject_dataset, inject_oracle, retake_oracle, retake_dataset | 2,781 |
class ExplanationTopSelect(ExplanationAggregator):
def init(self):
super().init()
self.distance_metric = get_instance_kvargs(self.local_config['parameters']['distance_metric']['class'],
self.local_config['parameters']['distance_metric']['parameters'])
|
class ExplanationTopSelect(ExplanationAggregator):
def init(self):
super().init()
self.distance_metric = get_instance_kvargs(self.local_config['parameters']['distance_metric']['class'],
self.local_config['parameters']['distance_metric']['parameters'])
| def real_aggregate(self, org_instance: GraphInstance, explanations: List[GraphInstance]): | 1 | 2023-12-15 16:34:16+00:00 | 4k |
modelscope/scepter | scepter/modules/annotator/midas_op.py | [
{
"identifier": "BaseAnnotator",
"path": "scepter/modules/annotator/base_annotator.py",
"snippet": "class BaseAnnotator(BaseModel, metaclass=ABCMeta):\n para_dict = {}\n\n def __init__(self, cfg, logger=None):\n super().__init__(cfg, logger=logger)\n\n @torch.no_grad()\n @torch.infere... | from abc import ABCMeta
from einops import rearrange
from PIL import Image
from scepter.modules.annotator.base_annotator import BaseAnnotator
from scepter.modules.annotator.midas.api import MiDaSInference
from scepter.modules.annotator.registry import ANNOTATORS
from scepter.modules.annotator.utils import resize_image, resize_image_ori
from scepter.modules.utils.config import dict_to_yaml
from scepter.modules.utils.distribute import we
from scepter.modules.utils.file_system import FS
import numpy as np
import torch | 2,873 | # -*- coding: utf-8 -*-
# Midas Depth Estimation
# From https://github.com/isl-org/MiDaS
# MIT LICENSE
@ANNOTATORS.register_class()
class MidasDetector(BaseAnnotator, metaclass=ABCMeta):
def __init__(self, cfg, logger=None):
super().__init__(cfg, logger=logger)
pretrained_model = cfg.get('PRETRAINED_MODEL', None)
if pretrained_model:
with FS.get_from(pretrained_model, wait_finish=True) as local_path:
self.model = MiDaSInference(model_type='dpt_hybrid',
model_path=local_path)
self.a = cfg.get('A', np.pi * 2.0)
self.bg_th = cfg.get('BG_TH', 0.1)
@torch.no_grad()
@torch.inference_mode()
@torch.autocast('cuda', enabled=False)
def forward(self, image):
if isinstance(image, Image.Image):
image = np.array(image)
elif isinstance(image, torch.Tensor):
image = image.detach().cpu().numpy()
elif isinstance(image, np.ndarray):
image = image.copy()
else:
raise f'Unsurpport datatype{type(image)}, only surpport np.ndarray, torch.Tensor, Pillow Image.'
image_depth = image
h, w, c = image.shape
| # -*- coding: utf-8 -*-
# Midas Depth Estimation
# From https://github.com/isl-org/MiDaS
# MIT LICENSE
@ANNOTATORS.register_class()
class MidasDetector(BaseAnnotator, metaclass=ABCMeta):
def __init__(self, cfg, logger=None):
super().__init__(cfg, logger=logger)
pretrained_model = cfg.get('PRETRAINED_MODEL', None)
if pretrained_model:
with FS.get_from(pretrained_model, wait_finish=True) as local_path:
self.model = MiDaSInference(model_type='dpt_hybrid',
model_path=local_path)
self.a = cfg.get('A', np.pi * 2.0)
self.bg_th = cfg.get('BG_TH', 0.1)
@torch.no_grad()
@torch.inference_mode()
@torch.autocast('cuda', enabled=False)
def forward(self, image):
if isinstance(image, Image.Image):
image = np.array(image)
elif isinstance(image, torch.Tensor):
image = image.detach().cpu().numpy()
elif isinstance(image, np.ndarray):
image = image.copy()
else:
raise f'Unsurpport datatype{type(image)}, only surpport np.ndarray, torch.Tensor, Pillow Image.'
image_depth = image
h, w, c = image.shape | image_depth, k = resize_image(image_depth, | 3 | 2023-12-21 02:01:48+00:00 | 4k |
YyzHarry/shortcut-ood-fairness | learning/algorithms.py | [
{
"identifier": "networks",
"path": "models/networks.py",
"snippet": "class Identity(nn.Module):\nclass MLP(nn.Module):\nclass PretrainedImageModel(torch.nn.Module):\nclass ResNet(PretrainedImageModel):\nclass TimmModel(PretrainedImageModel):\nclass HubModel(PretrainedImageModel):\nclass ImportedModel(P... | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.autograd as autograd
import copy
import numpy as np
from transformers import get_scheduler
from models import networks
from learning import joint_dro
from learning.optimizers import get_optimizers
from utils.misc import mixup_data | 1,675 | 'ERM',
'StratifiedERM',
# subgroup methods
'GroupDRO',
'IRM',
'CVaRDRO',
'JTT',
'LISA',
'DFR',
# data augmentation
'Mixup',
# domain generalization methods
'MMD',
'CORAL',
'DANN',
'CDANN',
# imbalanced learning methods
'ReSample',
'ReWeight',
'SqrtReWeight',
'CBLoss',
'Focal',
'LDAM',
'BSoftmax',
'CRT',
'ReWeightCRT',
'VanillaCRT',
# flat minima optimizer
'MA',
'SAM',
# attribute balancing
'GroupDROAttr',
'ReSampleAttr',
'ReWeightAttr',
]
def get_algorithm_class(algorithm_name):
"""Return the algorithm class with the given name."""
if algorithm_name not in globals():
raise NotImplementedError("Algorithm not found: {}".format(algorithm_name))
return globals()[algorithm_name]
class Algorithm(torch.nn.Module):
"""
A subclass of Algorithm implements a subgroup robustness algorithm.
Subclasses should implement the following:
- _init_model()
- _compute_loss()
- update()
- return_feats()
- predict()
"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(Algorithm, self).__init__()
self.hparams = hparams
self.data_type = data_type
self.num_classes = num_classes
self.num_attributes = num_attributes
self.num_examples = num_examples
def _init_model(self):
raise NotImplementedError
def _compute_loss(self, i, x, y, a, step):
raise NotImplementedError
def update(self, minibatch, step):
"""Perform one update step."""
raise NotImplementedError
def return_feats(self, x):
raise NotImplementedError
def predict(self, x):
raise NotImplementedError
def return_groups(self, y, a):
"""Given a list of (y, a) tuples, return indexes of samples belonging to each subgroup"""
idx_g, idx_samples = [], []
all_g = y * self.num_attributes + a
for g in all_g.unique():
idx_g.append(g)
idx_samples.append(all_g == g)
return zip(idx_g, idx_samples)
@staticmethod
def return_attributes(all_a):
"""Given a list of attributes, return indexes of samples belonging to each attribute"""
idx_a, idx_samples = [], []
for a in all_a.unique():
idx_a.append(a)
idx_samples.append(all_a == a)
return zip(idx_a, idx_samples)
class ERM(Algorithm):
"""Empirical Risk Minimization (ERM)"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(ERM, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
self.featurizer = networks.Featurizer(data_type, input_shape, self.hparams)
self.classifier = networks.Classifier(
self.featurizer.n_outputs,
num_classes,
self.hparams['nonlinear_classifier']
)
self.network = nn.Sequential(self.featurizer, self.classifier)
self._init_model()
def _init_model(self):
self.clip_grad = (self.data_type == "text" and self.hparams["optimizer"] == "adamw")
if self.data_type in ["images", "tabular"]:
|
ALGORITHMS = [
'ERM',
'StratifiedERM',
# subgroup methods
'GroupDRO',
'IRM',
'CVaRDRO',
'JTT',
'LISA',
'DFR',
# data augmentation
'Mixup',
# domain generalization methods
'MMD',
'CORAL',
'DANN',
'CDANN',
# imbalanced learning methods
'ReSample',
'ReWeight',
'SqrtReWeight',
'CBLoss',
'Focal',
'LDAM',
'BSoftmax',
'CRT',
'ReWeightCRT',
'VanillaCRT',
# flat minima optimizer
'MA',
'SAM',
# attribute balancing
'GroupDROAttr',
'ReSampleAttr',
'ReWeightAttr',
]
def get_algorithm_class(algorithm_name):
"""Return the algorithm class with the given name."""
if algorithm_name not in globals():
raise NotImplementedError("Algorithm not found: {}".format(algorithm_name))
return globals()[algorithm_name]
class Algorithm(torch.nn.Module):
"""
A subclass of Algorithm implements a subgroup robustness algorithm.
Subclasses should implement the following:
- _init_model()
- _compute_loss()
- update()
- return_feats()
- predict()
"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(Algorithm, self).__init__()
self.hparams = hparams
self.data_type = data_type
self.num_classes = num_classes
self.num_attributes = num_attributes
self.num_examples = num_examples
def _init_model(self):
raise NotImplementedError
def _compute_loss(self, i, x, y, a, step):
raise NotImplementedError
def update(self, minibatch, step):
"""Perform one update step."""
raise NotImplementedError
def return_feats(self, x):
raise NotImplementedError
def predict(self, x):
raise NotImplementedError
def return_groups(self, y, a):
"""Given a list of (y, a) tuples, return indexes of samples belonging to each subgroup"""
idx_g, idx_samples = [], []
all_g = y * self.num_attributes + a
for g in all_g.unique():
idx_g.append(g)
idx_samples.append(all_g == g)
return zip(idx_g, idx_samples)
@staticmethod
def return_attributes(all_a):
"""Given a list of attributes, return indexes of samples belonging to each attribute"""
idx_a, idx_samples = [], []
for a in all_a.unique():
idx_a.append(a)
idx_samples.append(all_a == a)
return zip(idx_a, idx_samples)
class ERM(Algorithm):
"""Empirical Risk Minimization (ERM)"""
def __init__(self, data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes=None, attr_sizes=None):
super(ERM, self).__init__(
data_type, input_shape, num_classes, num_attributes, num_examples, hparams, grp_sizes, attr_sizes)
self.featurizer = networks.Featurizer(data_type, input_shape, self.hparams)
self.classifier = networks.Classifier(
self.featurizer.n_outputs,
num_classes,
self.hparams['nonlinear_classifier']
)
self.network = nn.Sequential(self.featurizer, self.classifier)
self._init_model()
def _init_model(self):
self.clip_grad = (self.data_type == "text" and self.hparams["optimizer"] == "adamw")
if self.data_type in ["images", "tabular"]: | self.optimizer = get_optimizers[self.hparams['optimizer']]( | 2 | 2023-12-15 04:10:31+00:00 | 4k |
RomGai/BrainVis | dc_ldm/modules/diffusionmodules/openaimodel.py | [
{
"identifier": "checkpoint",
"path": "dc_ldm/modules/diffusionmodules/util.py",
"snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n... | from abc import abstractmethod
from functools import partial
from typing import Iterable
from dc_ldm.modules.diffusionmodules.util import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
from dc_ldm.modules.attention import SpatialTransformer
from omegaconf.listconfig import ListConfig
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch | 2,860 | if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class TransposedUpsample(nn.Module):
'Learned 2x upsampling without padding'
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
def forward(self,x):
return self.up(x)
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
|
# dummy replace
def convert_module_to_f16(x):
pass
def convert_module_to_f32(x):
pass
## go
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0].contiguous()
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class TransposedUpsample(nn.Module):
'Learned 2x upsampling without padding'
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
def forward(self,x):
return self.up(x)
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential( | normalization(channels), | 5 | 2023-12-16 12:52:14+00:00 | 4k |
Rajeshwaran2001/DRM-Media-Tool | decrypter.py | [
{
"identifier": "FileMergerDialog",
"path": "file_merger_dialog.py",
"snippet": "class FileMergerDialog(QDialog):\n def __init__(self, debug_logger, info_logger, folder_path, parent=None):\n super().__init__(parent)\n\n self.folder_path = folder_path\n\n self.setWindowTitle(\"Fil... | from PyQt5.QtWidgets import QWidget, QVBoxLayout, QHBoxLayout, QLabel, QLineEdit, QPushButton, QFileDialog, QListWidget
from file_merger_dialog import FileMergerDialog
from helper.message import show_error_message, show_success_message
import os
import sqlite3
import subprocess | 3,468 |
class Decrypter(QWidget):
def __init__(self, debug_logger, info_logger):
super().__init__()
self.init_ui()
self.conn = None # Database connection
self.cursor = None # Database cursor
self.debug_logger = debug_logger
self.info_logger = info_logger
self.create_database()
def init_ui(self):
layout = QVBoxLayout()
# Create a horizontal layout for the "Select Folder" and folder path
select_folder_layout = QHBoxLayout()
select_folder_label = QLabel("Select Folder:")
select_button = QPushButton("Select Folder")
select_button.clicked.connect(self.browse_folder)
self.folder_path_lineedit = QLineEdit()
select_folder_layout.addWidget(select_folder_label)
select_folder_layout.addWidget(select_button)
select_folder_layout.addWidget(self.folder_path_lineedit)
layout.addLayout(select_folder_layout)
# Create horizontal layout for buttons (Check Folder, GetKeys, Decrypt)
buttons_layout = QHBoxLayout()
check_folder_button = QPushButton("Check Folder")
check_folder_button.clicked.connect(self.check_folder_existence)
buttons_layout.addWidget(check_folder_button)
get_keys_button = QPushButton("Get Keys from DB")
get_keys_button.clicked.connect(self.get_keys_from_db)
buttons_layout.addWidget(get_keys_button)
decrypt_button = QPushButton("Decrypt")
decrypt_button.clicked.connect(self.decrypt_files)
buttons_layout.addWidget(decrypt_button)
merge_button = QPushButton("Media Merger")
merge_button.clicked.connect(self.merger)
buttons_layout.addWidget(merge_button)
layout.addLayout(buttons_layout)
# Create a QListWidget for displaying search results
layout.addWidget(QLabel("Search Results:"))
self.search_result_list = QListWidget()
layout.addWidget(self.search_result_list)
self.setLayout(layout)
# Add these methods to handle button clicks
def browse_folder(self):
folder_path = QFileDialog.getExistingDirectory(self, "Select Folder")
if folder_path:
self.folder_path_lineedit.setText(folder_path)
# self.search_database(folder_path)
def check_folder_existence(self):
folder_path = self.folder_path_lineedit.text()
if os.path.exists(folder_path):
show_success_message(self, "Folder exists.")
self.info_logger.info("Folder exists.")
else:
|
class Decrypter(QWidget):
def __init__(self, debug_logger, info_logger):
super().__init__()
self.init_ui()
self.conn = None # Database connection
self.cursor = None # Database cursor
self.debug_logger = debug_logger
self.info_logger = info_logger
self.create_database()
def init_ui(self):
layout = QVBoxLayout()
# Create a horizontal layout for the "Select Folder" and folder path
select_folder_layout = QHBoxLayout()
select_folder_label = QLabel("Select Folder:")
select_button = QPushButton("Select Folder")
select_button.clicked.connect(self.browse_folder)
self.folder_path_lineedit = QLineEdit()
select_folder_layout.addWidget(select_folder_label)
select_folder_layout.addWidget(select_button)
select_folder_layout.addWidget(self.folder_path_lineedit)
layout.addLayout(select_folder_layout)
# Create horizontal layout for buttons (Check Folder, GetKeys, Decrypt)
buttons_layout = QHBoxLayout()
check_folder_button = QPushButton("Check Folder")
check_folder_button.clicked.connect(self.check_folder_existence)
buttons_layout.addWidget(check_folder_button)
get_keys_button = QPushButton("Get Keys from DB")
get_keys_button.clicked.connect(self.get_keys_from_db)
buttons_layout.addWidget(get_keys_button)
decrypt_button = QPushButton("Decrypt")
decrypt_button.clicked.connect(self.decrypt_files)
buttons_layout.addWidget(decrypt_button)
merge_button = QPushButton("Media Merger")
merge_button.clicked.connect(self.merger)
buttons_layout.addWidget(merge_button)
layout.addLayout(buttons_layout)
# Create a QListWidget for displaying search results
layout.addWidget(QLabel("Search Results:"))
self.search_result_list = QListWidget()
layout.addWidget(self.search_result_list)
self.setLayout(layout)
# Add these methods to handle button clicks
def browse_folder(self):
folder_path = QFileDialog.getExistingDirectory(self, "Select Folder")
if folder_path:
self.folder_path_lineedit.setText(folder_path)
# self.search_database(folder_path)
def check_folder_existence(self):
folder_path = self.folder_path_lineedit.text()
if os.path.exists(folder_path):
show_success_message(self, "Folder exists.")
self.info_logger.info("Folder exists.")
else: | show_error_message(self, "Folder does not exist.") | 1 | 2023-12-18 11:50:40+00:00 | 4k |
gmum/ViewingDirectionGaussianSplatting | scene/gaussian_model.py | [
{
"identifier": "inverse_sigmoid",
"path": "utils/general_utils.py",
"snippet": "def inverse_sigmoid(x):\n return torch.log(x/(1-x))"
},
{
"identifier": "get_expon_lr_func",
"path": "utils/general_utils.py",
"snippet": "def get_expon_lr_func(\n lr_init, lr_final, lr_delay_steps=0, ... | import torch
import numpy as np
import os
from utils.general_utils import inverse_sigmoid, get_expon_lr_func, build_rotation
from torch import nn
from utils.system_utils import mkdir_p
from plyfile import PlyData, PlyElement
from utils.sh_utils import RGB2SH
from simple_knn._C import distCUDA2
from utils.graphics_utils import BasicPointCloud
from utils.general_utils import strip_symmetric, build_scaling_rotation
from scene.nerf_model import MLP, Embedder | 2,609 | #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact george.drettakis@inria.fr
#
class GaussianModel:
def setup_functions(self):
def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
L = build_scaling_rotation(scaling_modifier * scaling, rotation)
actual_covariance = L @ L.transpose(1, 2)
symm = strip_symmetric(actual_covariance)
return symm
self.scaling_activation = torch.exp
self.scaling_inverse_activation = torch.log
self.covariance_activation = build_covariance_from_scaling_rotation
self.opacity_activation = torch.sigmoid
self.inverse_opacity_activation = inverse_sigmoid
self.rotation_activation = torch.nn.functional.normalize
def __init__(self, sh_degree : int):
self.active_sh_degree = 0
self.max_sh_degree = sh_degree
self._xyz = torch.empty(0)
self._features_dc = torch.empty(0)
self._features_rest = torch.empty(0)
self._scaling = torch.empty(0)
self._rotation = torch.empty(0)
self._opacity = torch.empty(0)
self.max_radii2D = torch.empty(0)
self.xyz_gradient_accum = torch.empty(0)
self.denom = torch.empty(0)
#self._mlp_r: MLP = None
self._mlp: MLP = None
self.optimizer = None
self.nn_optimizer = None
self.percent_dense = 0
self.spatial_lr_scale = 0
self.setup_functions()
def capture(self):
return (
self.active_sh_degree,
self._xyz,
self._features_dc,
self._features_rest,
self._scaling,
self._rotation,
self._opacity,
self.max_radii2D,
self.xyz_gradient_accum,
self.denom,
self.optimizer.state_dict(),
self._mlp.state_dict(),
self.mlp_optimizer.state_dict(),
self.spatial_lr_scale,
)
def restore(self, model_args, training_args):
(self.active_sh_degree,
self._xyz,
self._features_dc,
self._features_rest,
self._scaling,
self._rotation,
self._opacity,
self.max_radii2D,
xyz_gradient_accum,
denom,
opt_dict,
self.spatial_lr_scale) = model_args
self.training_setup(training_args)
self.xyz_gradient_accum = xyz_gradient_accum
self.denom = denom
self.optimizer.load_state_dict(opt_dict)
self._mlp.load_state_dict(opt_dict)
@property
def get_scaling(self):
return self.scaling_activation(self._scaling)
@property
def get_rotation(self):
return self.rotation_activation(self._rotation)
@property
def get_xyz(self):
return self._xyz
@property
def get_features(self):
features_dc = self._features_dc
features_rest = self._features_rest
return torch.cat((features_dc, features_rest), dim=1)
@property
def get_opacity(self):
return self.opacity_activation(self._opacity)
def get_covariance(self, scaling_modifier = 1):
return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation)
def oneupSHdegree(self):
if self.active_sh_degree < self.max_sh_degree:
self.active_sh_degree += 1
| #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact george.drettakis@inria.fr
#
class GaussianModel:
def setup_functions(self):
def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
L = build_scaling_rotation(scaling_modifier * scaling, rotation)
actual_covariance = L @ L.transpose(1, 2)
symm = strip_symmetric(actual_covariance)
return symm
self.scaling_activation = torch.exp
self.scaling_inverse_activation = torch.log
self.covariance_activation = build_covariance_from_scaling_rotation
self.opacity_activation = torch.sigmoid
self.inverse_opacity_activation = inverse_sigmoid
self.rotation_activation = torch.nn.functional.normalize
def __init__(self, sh_degree : int):
self.active_sh_degree = 0
self.max_sh_degree = sh_degree
self._xyz = torch.empty(0)
self._features_dc = torch.empty(0)
self._features_rest = torch.empty(0)
self._scaling = torch.empty(0)
self._rotation = torch.empty(0)
self._opacity = torch.empty(0)
self.max_radii2D = torch.empty(0)
self.xyz_gradient_accum = torch.empty(0)
self.denom = torch.empty(0)
#self._mlp_r: MLP = None
self._mlp: MLP = None
self.optimizer = None
self.nn_optimizer = None
self.percent_dense = 0
self.spatial_lr_scale = 0
self.setup_functions()
def capture(self):
return (
self.active_sh_degree,
self._xyz,
self._features_dc,
self._features_rest,
self._scaling,
self._rotation,
self._opacity,
self.max_radii2D,
self.xyz_gradient_accum,
self.denom,
self.optimizer.state_dict(),
self._mlp.state_dict(),
self.mlp_optimizer.state_dict(),
self.spatial_lr_scale,
)
def restore(self, model_args, training_args):
(self.active_sh_degree,
self._xyz,
self._features_dc,
self._features_rest,
self._scaling,
self._rotation,
self._opacity,
self.max_radii2D,
xyz_gradient_accum,
denom,
opt_dict,
self.spatial_lr_scale) = model_args
self.training_setup(training_args)
self.xyz_gradient_accum = xyz_gradient_accum
self.denom = denom
self.optimizer.load_state_dict(opt_dict)
self._mlp.load_state_dict(opt_dict)
@property
def get_scaling(self):
return self.scaling_activation(self._scaling)
@property
def get_rotation(self):
return self.rotation_activation(self._rotation)
@property
def get_xyz(self):
return self._xyz
@property
def get_features(self):
features_dc = self._features_dc
features_rest = self._features_rest
return torch.cat((features_dc, features_rest), dim=1)
@property
def get_opacity(self):
return self.opacity_activation(self._opacity)
def get_covariance(self, scaling_modifier = 1):
return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation)
def oneupSHdegree(self):
if self.active_sh_degree < self.max_sh_degree:
self.active_sh_degree += 1
| def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float): | 5 | 2023-12-21 10:09:17+00:00 | 4k |
tonnetonne814/PL-Bert-VITS2 | models.py | [
{
"identifier": "get_padding",
"path": "commons.py",
"snippet": "def get_padding(kernel_size, dilation=1):\n return int((kernel_size * dilation - dilation) / 2)"
},
{
"identifier": "init_weights",
"path": "commons.py",
"snippet": "def init_weights(m, mean=0.0, std=0.01):\n classnam... | import copy
import math
import torch
import attentions
import commons
import modules
import monotonic_align
from torch import nn
from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d
from torch.nn import functional as F
from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
from commons import get_padding, init_weights | 2,274 | ):
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.n_flows = n_flows
self.gin_channels = gin_channels
self.flows = nn.ModuleList()
for i in range(n_flows):
self.flows.append(
modules.ResidualCouplingLayer(
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=gin_channels,
mean_only=True,
)
)
self.flows.append(modules.Flip())
def forward(self, x, x_mask, g=None, reverse=False):
if not reverse:
for flow in self.flows:
x, _ = flow(x, x_mask, g=g, reverse=reverse)
else:
for flow in reversed(self.flows):
x = flow(x, x_mask, g=g, reverse=reverse)
return x
class PosteriorEncoder(nn.Module):
def __init__(
self,
in_channels,
out_channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0,
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
self.enc = modules.WN(
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=gin_channels,
)
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
def forward(self, x, x_lengths, g=None):
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
x.dtype
)
x = self.pre(x) * x_mask
x = self.enc(x, x_mask, g=g)
stats = self.proj(x) * x_mask
m, logs = torch.split(stats, self.out_channels, dim=1)
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
return z, m, logs, x_mask
class Generator(torch.nn.Module):
def __init__(
self,
initial_channel,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
gin_channels=0,
):
super(Generator, self).__init__()
self.num_kernels = len(resblock_kernel_sizes)
self.num_upsamples = len(upsample_rates)
self.conv_pre = Conv1d(
initial_channel, upsample_initial_channel, 7, 1, padding=3
)
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
self.ups.append(
weight_norm(
ConvTranspose1d(
upsample_initial_channel // (2**i),
upsample_initial_channel // (2 ** (i + 1)),
k,
u,
padding=(k - u) // 2,
)
)
)
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = upsample_initial_channel // (2 ** (i + 1))
for j, (k, d) in enumerate(
zip(resblock_kernel_sizes, resblock_dilation_sizes)
):
self.resblocks.append(resblock(ch, k, d))
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
|
AVAILABLE_FLOW_TYPES = [
"pre_conv",
"pre_conv2",
"fft",
"mono_layer_inter_residual",
"mono_layer_post_residual",
]
AVAILABLE_DURATION_DISCRIMINATOR_TYPES = [
"dur_disc_1",
"dur_disc_2",
]
class StochasticDurationPredictor(nn.Module):
def __init__(
self,
in_channels,
filter_channels,
kernel_size,
p_dropout,
n_flows=4,
gin_channels=0,
):
super().__init__()
filter_channels = in_channels # it needs to be removed from future version.
self.in_channels = in_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.n_flows = n_flows
self.gin_channels = gin_channels
self.log_flow = modules.Log()
self.flows = nn.ModuleList()
self.flows.append(modules.ElementwiseAffine(2))
for i in range(n_flows):
self.flows.append(
modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)
)
self.flows.append(modules.Flip())
self.post_pre = nn.Conv1d(1, filter_channels, 1)
self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1)
self.post_convs = modules.DDSConv(
filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout
)
self.post_flows = nn.ModuleList()
self.post_flows.append(modules.ElementwiseAffine(2))
for i in range(4):
self.post_flows.append(
modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)
)
self.post_flows.append(modules.Flip())
self.pre = nn.Conv1d(in_channels, filter_channels, 1)
self.proj = nn.Conv1d(filter_channels, filter_channels, 1)
self.convs = modules.DDSConv(
filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout
)
if gin_channels != 0:
self.cond = nn.Conv1d(gin_channels, filter_channels, 1)
def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0):
x = torch.detach(x)
x = self.pre(x)
if g is not None:
g = torch.detach(g)
x = x + self.cond(g)
x = self.convs(x, x_mask)
x = self.proj(x) * x_mask
if not reverse:
flows = self.flows
assert w is not None
logdet_tot_q = 0
h_w = self.post_pre(w)
h_w = self.post_convs(h_w, x_mask)
h_w = self.post_proj(h_w) * x_mask
e_q = (
torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype)
* x_mask
)
z_q = e_q
for flow in self.post_flows:
z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w))
logdet_tot_q += logdet_q
z_u, z1 = torch.split(z_q, [1, 1], 1)
u = torch.sigmoid(z_u) * x_mask
z0 = (w - u) * x_mask
logdet_tot_q += torch.sum(
(F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]
)
logq = (
torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q**2)) * x_mask, [1, 2])
- logdet_tot_q
)
logdet_tot = 0
z0, logdet = self.log_flow(z0, x_mask)
logdet_tot += logdet
z = torch.cat([z0, z1], 1)
for flow in flows:
z, logdet = flow(z, x_mask, g=x, reverse=reverse)
logdet_tot = logdet_tot + logdet
nll = (
torch.sum(0.5 * (math.log(2 * math.pi) + (z**2)) * x_mask, [1, 2])
- logdet_tot
)
return nll + logq # [b]
else:
flows = list(reversed(self.flows))
flows = flows[:-2] + [flows[-1]] # remove a useless vflow
z = (
torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype)
* noise_scale
)
for flow in flows:
z = flow(z, x_mask, g=x, reverse=reverse)
z0, z1 = torch.split(z, [1, 1], 1)
logw = z0
return logw
class DurationPredictor(nn.Module):
def __init__(
self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0
):
super().__init__()
self.in_channels = in_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.gin_channels = gin_channels
self.drop = nn.Dropout(p_dropout)
self.conv_1 = nn.Conv1d(
in_channels, filter_channels, kernel_size, padding=kernel_size // 2
)
self.norm_1 = modules.LayerNorm(filter_channels)
self.conv_2 = nn.Conv1d(
filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
)
self.norm_2 = modules.LayerNorm(filter_channels)
self.proj = nn.Conv1d(filter_channels, 1, 1)
if gin_channels != 0:
self.cond = nn.Conv1d(gin_channels, in_channels, 1)
def forward(self, x, x_mask, g=None):
x = torch.detach(x)
if g is not None:
g = torch.detach(g)
x = x + self.cond(g)
x = self.conv_1(x * x_mask)
x = torch.relu(x)
x = self.norm_1(x)
x = self.drop(x)
x = self.conv_2(x * x_mask)
x = torch.relu(x)
x = self.norm_2(x)
x = self.drop(x)
x = self.proj(x * x_mask)
return x * x_mask
class DurationDiscriminatorV1(nn.Module): # vits2
# TODO : not using "spk conditioning" for now according to the paper.
# Can be a better discriminator if we use it.
def __init__(
self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0
):
super().__init__()
self.in_channels = in_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.gin_channels = gin_channels
self.drop = nn.Dropout(p_dropout)
self.conv_1 = nn.Conv1d(
in_channels, filter_channels, kernel_size, padding=kernel_size // 2
)
# self.norm_1 = modules.LayerNorm(filter_channels)
self.conv_2 = nn.Conv1d(
filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
)
# self.norm_2 = modules.LayerNorm(filter_channels)
self.dur_proj = nn.Conv1d(1, filter_channels, 1)
self.pre_out_conv_1 = nn.Conv1d(
2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
)
self.pre_out_norm_1 = modules.LayerNorm(filter_channels)
self.pre_out_conv_2 = nn.Conv1d(
filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
)
self.pre_out_norm_2 = modules.LayerNorm(filter_channels)
# if gin_channels != 0:
# self.cond = nn.Conv1d(gin_channels, in_channels, 1)
self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())
def forward_probability(self, x, x_mask, dur, g=None):
dur = self.dur_proj(dur)
x = torch.cat([x, dur], dim=1)
x = self.pre_out_conv_1(x * x_mask)
# x = torch.relu(x)
# x = self.pre_out_norm_1(x)
# x = self.drop(x)
x = self.pre_out_conv_2(x * x_mask)
# x = torch.relu(x)
# x = self.pre_out_norm_2(x)
# x = self.drop(x)
x = x * x_mask
x = x.transpose(1, 2)
output_prob = self.output_layer(x)
return output_prob
def forward(self, x, x_mask, dur_r, dur_hat, g=None):
x = torch.detach(x)
# if g is not None:
# g = torch.detach(g)
# x = x + self.cond(g)
x = self.conv_1(x * x_mask)
# x = torch.relu(x)
# x = self.norm_1(x)
# x = self.drop(x)
x = self.conv_2(x * x_mask)
# x = torch.relu(x)
# x = self.norm_2(x)
# x = self.drop(x)
output_probs = []
for dur in [dur_r, dur_hat]:
output_prob = self.forward_probability(x, x_mask, dur, g)
output_probs.append(output_prob)
return output_probs
class DurationDiscriminatorV2(nn.Module): # vits2
# TODO : not using "spk conditioning" for now according to the paper.
# Can be a better discriminator if we use it.
def __init__(
self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0
):
super().__init__()
self.in_channels = in_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.gin_channels = gin_channels
self.conv_1 = nn.Conv1d(
in_channels, filter_channels, kernel_size, padding=kernel_size // 2
)
self.norm_1 = modules.LayerNorm(filter_channels)
self.conv_2 = nn.Conv1d(
filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
)
self.norm_2 = modules.LayerNorm(filter_channels)
self.dur_proj = nn.Conv1d(1, filter_channels, 1)
self.pre_out_conv_1 = nn.Conv1d(
2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
)
self.pre_out_norm_1 = modules.LayerNorm(filter_channels)
self.pre_out_conv_2 = nn.Conv1d(
filter_channels, filter_channels, kernel_size, padding=kernel_size // 2
)
self.pre_out_norm_2 = modules.LayerNorm(filter_channels)
# if gin_channels != 0:
# self.cond = nn.Conv1d(gin_channels, in_channels, 1)
self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())
def forward_probability(self, x, x_mask, dur, g=None):
dur = self.dur_proj(dur)
x = torch.cat([x, dur], dim=1)
x = self.pre_out_conv_1(x * x_mask)
x = torch.relu(x)
x = self.pre_out_norm_1(x)
x = self.pre_out_conv_2(x * x_mask)
x = torch.relu(x)
x = self.pre_out_norm_2(x)
x = x * x_mask
x = x.transpose(1, 2)
output_prob = self.output_layer(x)
return output_prob
def forward(self, x, x_mask, dur_r, dur_hat, g=None):
x = torch.detach(x)
# if g is not None:
# g = torch.detach(g)
# x = x + self.cond(g)
x = self.conv_1(x * x_mask)
x = torch.relu(x)
x = self.norm_1(x)
x = self.conv_2(x * x_mask)
x = torch.relu(x)
x = self.norm_2(x)
output_probs = []
for dur in [dur_r, dur_hat]:
output_prob = self.forward_probability(x, x_mask, dur, g)
output_probs.append([output_prob])
return output_probs
class TextEncoder(nn.Module):
def __init__(
self,
n_vocab,
out_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
bert_emb_size = 768,
gin_channels=0,
):
super().__init__()
self.n_vocab = n_vocab
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.gin_channels = gin_channels
self.emb = nn.Embedding(n_vocab, hidden_channels)
nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5)
self.encoder = attentions.Encoder(
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
gin_channels=self.gin_channels,
)
self.bert_proj = nn.Conv1d(bert_emb_size, hidden_channels, 1)
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
def forward(self, x, x_lengths, bert, bert_lengths, g=None):
x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
x = torch.transpose(x, 1, -1) # [b, h, t]
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
x.dtype
)
bert = self.bert_proj(bert.permute(0,2,1))
x = x + bert
x = self.encoder(x * x_mask, x_mask, g=g)
stats = self.proj(x) * x_mask
m, logs = torch.split(stats, self.out_channels, dim=1)
return x, m, logs, x_mask
class ResidualCouplingTransformersLayer2(nn.Module): # vits2
def __init__(
self,
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=0,
gin_channels=0,
mean_only=False,
):
assert channels % 2 == 0, "channels should be divisible by 2"
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.half_channels = channels // 2
self.mean_only = mean_only
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
self.pre_transformer = attentions.Encoder(
hidden_channels,
hidden_channels,
n_heads=2,
n_layers=1,
kernel_size=kernel_size,
p_dropout=p_dropout,
# window_size=None,
)
self.enc = modules.WN(
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=p_dropout,
gin_channels=gin_channels,
)
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
self.post.weight.data.zero_()
self.post.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
h = self.pre(x0) * x_mask
h = h + self.pre_transformer(h * x_mask, x_mask) # vits2 residual connection
h = self.enc(h, x_mask, g=g)
stats = self.post(h) * x_mask
if not self.mean_only:
m, logs = torch.split(stats, [self.half_channels] * 2, 1)
else:
m = stats
logs = torch.zeros_like(m)
if not reverse:
x1 = m + x1 * torch.exp(logs) * x_mask
x = torch.cat([x0, x1], 1)
logdet = torch.sum(logs, [1, 2])
return x, logdet
else:
x1 = (x1 - m) * torch.exp(-logs) * x_mask
x = torch.cat([x0, x1], 1)
return x
class ResidualCouplingTransformersLayer(nn.Module): # vits2
def __init__(
self,
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=0,
gin_channels=0,
mean_only=False,
):
assert channels % 2 == 0, "channels should be divisible by 2"
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.half_channels = channels // 2
self.mean_only = mean_only
# vits2
self.pre_transformer = attentions.Encoder(
self.half_channels,
self.half_channels,
n_heads=2,
n_layers=2,
kernel_size=3,
p_dropout=0.1,
window_size=None,
)
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
self.enc = modules.WN(
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=p_dropout,
gin_channels=gin_channels,
)
# vits2
self.post_transformer = attentions.Encoder(
self.hidden_channels,
self.hidden_channels,
n_heads=2,
n_layers=2,
kernel_size=3,
p_dropout=0.1,
window_size=None,
)
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
self.post.weight.data.zero_()
self.post.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
x0_ = self.pre_transformer(x0 * x_mask, x_mask) # vits2
x0_ = x0_ + x0 # vits2 residual connection
h = self.pre(x0_) * x_mask # changed from x0 to x0_ to retain x0 for the flow
h = self.enc(h, x_mask, g=g)
# vits2 - (experimental;uncomment the following 2 line to use)
# h_ = self.post_transformer(h, x_mask)
# h = h + h_ #vits2 residual connection
stats = self.post(h) * x_mask
if not self.mean_only:
m, logs = torch.split(stats, [self.half_channels] * 2, 1)
else:
m = stats
logs = torch.zeros_like(m)
if not reverse:
x1 = m + x1 * torch.exp(logs) * x_mask
x = torch.cat([x0, x1], 1)
logdet = torch.sum(logs, [1, 2])
return x, logdet
else:
x1 = (x1 - m) * torch.exp(-logs) * x_mask
x = torch.cat([x0, x1], 1)
return x
class FFTransformerCouplingLayer(nn.Module): # vits2
def __init__(
self,
channels,
hidden_channels,
kernel_size,
n_layers,
n_heads,
p_dropout=0,
filter_channels=768,
mean_only=False,
gin_channels=0,
):
assert channels % 2 == 0, "channels should be divisible by 2"
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.half_channels = channels // 2
self.mean_only = mean_only
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
self.enc = attentions.FFT(
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
isflow=True,
gin_channels=gin_channels,
)
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
self.post.weight.data.zero_()
self.post.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
h = self.pre(x0) * x_mask
h_ = self.enc(h, x_mask, g=g)
h = h_ + h
stats = self.post(h) * x_mask
if not self.mean_only:
m, logs = torch.split(stats, [self.half_channels] * 2, 1)
else:
m = stats
logs = torch.zeros_like(m)
if not reverse:
x1 = m + x1 * torch.exp(logs) * x_mask
x = torch.cat([x0, x1], 1)
logdet = torch.sum(logs, [1, 2])
return x, logdet
else:
x1 = (x1 - m) * torch.exp(-logs) * x_mask
x = torch.cat([x0, x1], 1)
return x
class MonoTransformerFlowLayer(nn.Module): # vits2
def __init__(
self,
channels,
hidden_channels,
mean_only=False,
residual_connection=False,
# according to VITS-2 paper fig 1B set residual_connection=True
):
assert channels % 2 == 0, "channels should be divisible by 2"
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.half_channels = channels // 2
self.mean_only = mean_only
self.residual_connection = residual_connection
# vits2
self.pre_transformer = attentions.Encoder(
self.half_channels,
self.half_channels,
n_heads=2,
n_layers=2,
kernel_size=3,
p_dropout=0.1,
window_size=None,
)
self.post = nn.Conv1d(
self.half_channels, self.half_channels * (2 - mean_only), 1
)
self.post.weight.data.zero_()
self.post.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
if self.residual_connection:
if not reverse:
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
x0_ = self.pre_transformer(x0, x_mask) # vits2
stats = self.post(x0_) * x_mask
if not self.mean_only:
m, logs = torch.split(stats, [self.half_channels] * 2, 1)
else:
m = stats
logs = torch.zeros_like(m)
x1 = m + x1 * torch.exp(logs) * x_mask
x_ = torch.cat([x0, x1], 1)
x = x + x_
logdet = torch.sum(torch.log(torch.exp(logs) + 1), [1, 2])
logdet = logdet + torch.log(torch.tensor(2)) * (
x0.shape[1] * x0.shape[2]
)
return x, logdet
else:
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
x0 = x0 / 2
x0_ = x0 * x_mask
x0_ = self.pre_transformer(x0, x_mask) # vits2
stats = self.post(x0_) * x_mask
if not self.mean_only:
m, logs = torch.split(stats, [self.half_channels] * 2, 1)
else:
m = stats
logs = torch.zeros_like(m)
x1_ = ((x1 - m) / (1 + torch.exp(-logs))) * x_mask
x = torch.cat([x0, x1_], 1)
return x
else:
x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
x0_ = self.pre_transformer(x0 * x_mask, x_mask) # vits2
h = x0_ + x0 # vits2
stats = self.post(h) * x_mask
if not self.mean_only:
m, logs = torch.split(stats, [self.half_channels] * 2, 1)
else:
m = stats
logs = torch.zeros_like(m)
if not reverse:
x1 = m + x1 * torch.exp(logs) * x_mask
x = torch.cat([x0, x1], 1)
logdet = torch.sum(logs, [1, 2])
return x, logdet
else:
x1 = (x1 - m) * torch.exp(-logs) * x_mask
x = torch.cat([x0, x1], 1)
return x
class ResidualCouplingTransformersBlock(nn.Module): # vits2
def __init__(
self,
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
n_flows=4,
gin_channels=0,
use_transformer_flows=False,
transformer_flow_type="pre_conv",
):
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.n_flows = n_flows
self.gin_channels = gin_channels
self.flows = nn.ModuleList()
if use_transformer_flows:
if transformer_flow_type == "pre_conv":
for i in range(n_flows):
self.flows.append(
ResidualCouplingTransformersLayer(
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=gin_channels,
mean_only=True,
)
)
self.flows.append(modules.Flip())
elif transformer_flow_type == "pre_conv2":
for i in range(n_flows):
self.flows.append(
ResidualCouplingTransformersLayer2(
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=gin_channels,
mean_only=True,
)
)
self.flows.append(modules.Flip())
elif transformer_flow_type == "fft":
for i in range(n_flows):
self.flows.append(
FFTransformerCouplingLayer(
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=gin_channels,
mean_only=True,
)
)
self.flows.append(modules.Flip())
elif transformer_flow_type == "mono_layer_inter_residual":
for i in range(n_flows):
self.flows.append(
modules.ResidualCouplingLayer(
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=gin_channels,
mean_only=True,
)
)
self.flows.append(modules.Flip())
self.flows.append(
MonoTransformerFlowLayer(
channels, hidden_channels, mean_only=True
)
)
elif transformer_flow_type == "mono_layer_post_residual":
for i in range(n_flows):
self.flows.append(
modules.ResidualCouplingLayer(
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=gin_channels,
mean_only=True,
)
)
self.flows.append(modules.Flip())
self.flows.append(
MonoTransformerFlowLayer(
channels,
hidden_channels,
mean_only=True,
residual_connection=True,
)
)
else:
for i in range(n_flows):
self.flows.append(
modules.ResidualCouplingLayer(
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=gin_channels,
mean_only=True,
)
)
self.flows.append(modules.Flip())
def forward(self, x, x_mask, g=None, reverse=False):
if not reverse:
for flow in self.flows:
x, _ = flow(x, x_mask, g=g, reverse=reverse)
else:
for flow in reversed(self.flows):
x = flow(x, x_mask, g=g, reverse=reverse)
return x
class ResidualCouplingBlock(nn.Module):
def __init__(
self,
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
n_flows=4,
gin_channels=0,
):
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.n_flows = n_flows
self.gin_channels = gin_channels
self.flows = nn.ModuleList()
for i in range(n_flows):
self.flows.append(
modules.ResidualCouplingLayer(
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=gin_channels,
mean_only=True,
)
)
self.flows.append(modules.Flip())
def forward(self, x, x_mask, g=None, reverse=False):
if not reverse:
for flow in self.flows:
x, _ = flow(x, x_mask, g=g, reverse=reverse)
else:
for flow in reversed(self.flows):
x = flow(x, x_mask, g=g, reverse=reverse)
return x
class PosteriorEncoder(nn.Module):
def __init__(
self,
in_channels,
out_channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0,
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
self.enc = modules.WN(
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=gin_channels,
)
self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
def forward(self, x, x_lengths, g=None):
x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
x.dtype
)
x = self.pre(x) * x_mask
x = self.enc(x, x_mask, g=g)
stats = self.proj(x) * x_mask
m, logs = torch.split(stats, self.out_channels, dim=1)
z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
return z, m, logs, x_mask
class Generator(torch.nn.Module):
def __init__(
self,
initial_channel,
resblock,
resblock_kernel_sizes,
resblock_dilation_sizes,
upsample_rates,
upsample_initial_channel,
upsample_kernel_sizes,
gin_channels=0,
):
super(Generator, self).__init__()
self.num_kernels = len(resblock_kernel_sizes)
self.num_upsamples = len(upsample_rates)
self.conv_pre = Conv1d(
initial_channel, upsample_initial_channel, 7, 1, padding=3
)
resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
self.ups.append(
weight_norm(
ConvTranspose1d(
upsample_initial_channel // (2**i),
upsample_initial_channel // (2 ** (i + 1)),
k,
u,
padding=(k - u) // 2,
)
)
)
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = upsample_initial_channel // (2 ** (i + 1))
for j, (k, d) in enumerate(
zip(resblock_kernel_sizes, resblock_dilation_sizes)
):
self.resblocks.append(resblock(ch, k, d))
self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) | self.ups.apply(init_weights) | 1 | 2023-12-16 05:34:02+00:00 | 4k |
Ruiyuan-Zhang/CCS | multi_part_assembly/models/modules/encoder/point_transformer/model.py | [
{
"identifier": "PointNetFeaturePropagation",
"path": "multi_part_assembly/models/modules/encoder/point_transformer/pointnet_util.py",
"snippet": "class PointNetFeaturePropagation(nn.Module):\n def __init__(self, in_channel, mlp):\n super(PointNetFeaturePropagation, self).__init__()\n s... | import torch
import torch.nn as nn
from multi_part_assembly.models.modules.encoder.point_transformer.pointnet_util import PointNetFeaturePropagation, PointNetSetAbstraction
from .transformer import TransformerBlock | 2,117 |
class TransitionDown(nn.Module):
def __init__(self, k, nneighbor, channels):
super().__init__()
# The objective of PointNetSetAbstraction is to downsample and aggregate the input point cloud dataset, generating more advanced feature representations.
self.sa = PointNetSetAbstraction(k, 0, nneighbor, channels[0], channels[1:], group_all=False, knn=True)
def forward(self, xyz, points):
return self.sa(xyz, points)
class TransitionUp(nn.Module):
def __init__(self, dim1, dim2, dim_out):
class SwapAxes(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.transpose(1, 2)
super().__init__()
self.fc1 = nn.Sequential(
nn.Linear(dim1, dim_out),
SwapAxes(),
nn.BatchNorm1d(dim_out), # TODO
SwapAxes(),
nn.ReLU(),
)
self.fc2 = nn.Sequential(
nn.Linear(dim2, dim_out),
SwapAxes(),
nn.BatchNorm1d(dim_out), # TODO
SwapAxes(),
nn.ReLU(),
)
self.fp = PointNetFeaturePropagation(-1, [])
def forward(self, xyz1, points1, xyz2, points2):
feats1 = self.fc1(points1)
feats2 = self.fc2(points2)
feats1 = self.fp(xyz2.transpose(1, 2), xyz1.transpose(1, 2), None, feats1.transpose(1, 2)).transpose(1, 2)
return feats1 + feats2
class Backbone(nn.Module):
def __init__(self, cfg):
super().__init__()
npoints, nblocks, nneighbor, n_c, d_points = cfg.num_point, cfg.model.nblocks, cfg.model.nneighbor, cfg.num_class, cfg.input_dim
self.fc1 = nn.Sequential(
nn.Linear(d_points, 32),
nn.ReLU(),
nn.Linear(32, 32)
)
|
class TransitionDown(nn.Module):
def __init__(self, k, nneighbor, channels):
super().__init__()
# The objective of PointNetSetAbstraction is to downsample and aggregate the input point cloud dataset, generating more advanced feature representations.
self.sa = PointNetSetAbstraction(k, 0, nneighbor, channels[0], channels[1:], group_all=False, knn=True)
def forward(self, xyz, points):
return self.sa(xyz, points)
class TransitionUp(nn.Module):
def __init__(self, dim1, dim2, dim_out):
class SwapAxes(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.transpose(1, 2)
super().__init__()
self.fc1 = nn.Sequential(
nn.Linear(dim1, dim_out),
SwapAxes(),
nn.BatchNorm1d(dim_out), # TODO
SwapAxes(),
nn.ReLU(),
)
self.fc2 = nn.Sequential(
nn.Linear(dim2, dim_out),
SwapAxes(),
nn.BatchNorm1d(dim_out), # TODO
SwapAxes(),
nn.ReLU(),
)
self.fp = PointNetFeaturePropagation(-1, [])
def forward(self, xyz1, points1, xyz2, points2):
feats1 = self.fc1(points1)
feats2 = self.fc2(points2)
feats1 = self.fp(xyz2.transpose(1, 2), xyz1.transpose(1, 2), None, feats1.transpose(1, 2)).transpose(1, 2)
return feats1 + feats2
class Backbone(nn.Module):
def __init__(self, cfg):
super().__init__()
npoints, nblocks, nneighbor, n_c, d_points = cfg.num_point, cfg.model.nblocks, cfg.model.nneighbor, cfg.num_class, cfg.input_dim
self.fc1 = nn.Sequential(
nn.Linear(d_points, 32),
nn.ReLU(),
nn.Linear(32, 32)
) | self.transformer1 = TransformerBlock(32, cfg.model.transformer_dim, nneighbor) | 2 | 2023-12-15 13:13:01+00:00 | 4k |
uc-vision/taichi-splatting | taichi_splatting/renderer.py | [
{
"identifier": "check_packed3d",
"path": "taichi_splatting/data_types.py",
"snippet": "def check_packed3d(packed_gaussians: torch.Tensor):\n assert len(packed_gaussians.shape) == 2 and packed_gaussians.shape[1] == 11, f\"Expected shape (N, 11), got {packed_gaussians.shape}\" "
},
{
"identifie... | from dataclasses import dataclass
from typing import Optional
from taichi_splatting.data_types import check_packed3d
from taichi_splatting.misc.depth_variance import compute_depth_variance
from taichi_splatting.misc.encode_depth import encode_depth
from taichi_splatting.rasterizer import rasterize, RasterConfig
from taichi_splatting.spherical_harmonics import evaluate_sh_at
from taichi_splatting.perspective import (
frustum_culling, project_to_image, CameraParams)
import torch | 2,295 |
@dataclass
class Rendering:
image: torch.Tensor # (H, W, C)
depth: Optional[torch.Tensor] = None # (H, W)
depth_var: Optional[torch.Tensor] = None # (H, W)
def render_gaussians(
packed_gaussians: torch.Tensor,
features: torch.Tensor,
|
@dataclass
class Rendering:
image: torch.Tensor # (H, W, C)
depth: Optional[torch.Tensor] = None # (H, W)
depth_var: Optional[torch.Tensor] = None # (H, W)
def render_gaussians(
packed_gaussians: torch.Tensor,
features: torch.Tensor, | camera_params: CameraParams, | 8 | 2023-12-17 15:26:52+00:00 | 4k |
smoores-dev/storyteller | storyteller/synchronize/sync.py | [
{
"identifier": "CACHE_DIR",
"path": "storyteller/synchronize/files.py",
"snippet": "CACHE_DIR = f\"{DATA_DIR}/cache\""
},
{
"identifier": "TEXT_DIR",
"path": "storyteller/synchronize/files.py",
"snippet": "TEXT_DIR = f\"{DATA_DIR}/assets/text\""
},
{
"identifier": "get_audio_cha... | from dataclasses import dataclass
from itertools import groupby
from pathlib import Path
from typing import Any, Callable, Dict, List, TypedDict, Union, cast
from fuzzysearch import Match, find_near_matches
from ebooklib import epub
from mutagen.mp4 import MP4
from mutagen.mp3 import MP3
from .files import CACHE_DIR, TEXT_DIR
from .audio import (
get_audio_chapter_filenames,
get_transcriptions,
)
from .epub import (
SentenceRange,
create_media_overlay,
get_chapter_sentences,
get_chapter_text,
get_epub_audio_filename,
get_sentences_with_offsets,
read_epub,
get_chapters,
tag_sentences,
)
import json
import math
import os
import sys
import whisperx.types | 2,619 |
OFFSET_SEARCH_WINDOW_SIZE = 5000
def find_best_offset(
epub_sentences: list[str], transcription_text: str, last_match_offset: int
):
i = 0
while i < len(transcription_text):
start_sentence = 0
start_index = (last_match_offset + i) % len(transcription_text)
end_index = (start_index + OFFSET_SEARCH_WINDOW_SIZE) % len(transcription_text)
if end_index > start_index:
transcription_text_slice = transcription_text[start_index:end_index]
else:
transcription_text_slice = (
transcription_text[start_index:] + transcription_text[:end_index]
)
while start_sentence < len(epub_sentences):
query_string = " ".join(epub_sentences[start_sentence : start_sentence + 6])
with NullIO():
matches = find_near_matches(
query_string.lower(),
transcription_text_slice.lower(),
max_l_dist=math.floor(0.1 * len(query_string)),
)
matches = cast(List[Match], matches)
if len(matches) > 0:
return (start_sentence, matches[0].start + start_index)
start_sentence += 3
i += OFFSET_SEARCH_WINDOW_SIZE // 2
return (0, None)
class StorytellerTranscriptionSegment(whisperx.types.SingleAlignedSegment):
audiofile: str
class StorytellerTranscription(TypedDict):
segments: List[StorytellerTranscriptionSegment]
word_segments: List[whisperx.types.SingleWordSegment]
def concat_transcriptions(
transcriptions: List[whisperx.types.AlignedTranscriptionResult],
audiofiles: List[str],
):
result = StorytellerTranscription(segments=[], word_segments=[])
for transcription, audiofile in zip(transcriptions, audiofiles):
result["word_segments"].extend(transcription["word_segments"])
result["segments"].extend(
[
StorytellerTranscriptionSegment(**segment, audiofile=audiofile)
for segment in transcription["segments"]
]
)
return result
def get_transcription_text(transcription: StorytellerTranscription):
return " ".join([segment["text"] for segment in transcription["segments"]])
def find_timestamps(match_start_index: int, transcription: StorytellerTranscription):
s = 0
position = 0
while True:
while position + len(transcription["segments"][s]["text"]) < match_start_index: # type: ignore
position += len(transcription["segments"][s]["text"]) + 1 # type: ignore
s += 1
w = 0
segment = transcription["segments"][s]
while (
w < len(segment["words"])
and position + len(segment["words"][w]["word"]) <= match_start_index
):
position += len(segment["words"][w]["word"]) + 1
w += 1
if w >= len(segment["words"]):
s += 1
continue
break
start_word = segment["words"][w]
# If a segment only has one word, the start and
# end timestamps are only placed on the segment
if "start" in start_word:
return start_word["start"], segment["audiofile"]
return segment["start"], segment["audiofile"]
def get_window_index_from_offset(window: List[str], offset: int):
index = 0
while offset >= len(window[index]):
offset -= len(window[index])
index += 1
return index
def get_sentence_ranges(
start_sentence: int,
transcription: StorytellerTranscription,
sentences: List[str],
chapter_offset: int,
last_sentence_range: Union[SentenceRange, None],
):
sentence_ranges: List[SentenceRange] = []
transcription_text = get_transcription_text(transcription).lower()[chapter_offset:]
|
class NullIO:
def __enter__(self):
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
sys.stderr.close()
sys.stderr = self._original_stderr
OFFSET_SEARCH_WINDOW_SIZE = 5000
def find_best_offset(
epub_sentences: list[str], transcription_text: str, last_match_offset: int
):
i = 0
while i < len(transcription_text):
start_sentence = 0
start_index = (last_match_offset + i) % len(transcription_text)
end_index = (start_index + OFFSET_SEARCH_WINDOW_SIZE) % len(transcription_text)
if end_index > start_index:
transcription_text_slice = transcription_text[start_index:end_index]
else:
transcription_text_slice = (
transcription_text[start_index:] + transcription_text[:end_index]
)
while start_sentence < len(epub_sentences):
query_string = " ".join(epub_sentences[start_sentence : start_sentence + 6])
with NullIO():
matches = find_near_matches(
query_string.lower(),
transcription_text_slice.lower(),
max_l_dist=math.floor(0.1 * len(query_string)),
)
matches = cast(List[Match], matches)
if len(matches) > 0:
return (start_sentence, matches[0].start + start_index)
start_sentence += 3
i += OFFSET_SEARCH_WINDOW_SIZE // 2
return (0, None)
class StorytellerTranscriptionSegment(whisperx.types.SingleAlignedSegment):
audiofile: str
class StorytellerTranscription(TypedDict):
segments: List[StorytellerTranscriptionSegment]
word_segments: List[whisperx.types.SingleWordSegment]
def concat_transcriptions(
transcriptions: List[whisperx.types.AlignedTranscriptionResult],
audiofiles: List[str],
):
result = StorytellerTranscription(segments=[], word_segments=[])
for transcription, audiofile in zip(transcriptions, audiofiles):
result["word_segments"].extend(transcription["word_segments"])
result["segments"].extend(
[
StorytellerTranscriptionSegment(**segment, audiofile=audiofile)
for segment in transcription["segments"]
]
)
return result
def get_transcription_text(transcription: StorytellerTranscription):
return " ".join([segment["text"] for segment in transcription["segments"]])
def find_timestamps(match_start_index: int, transcription: StorytellerTranscription):
s = 0
position = 0
while True:
while position + len(transcription["segments"][s]["text"]) < match_start_index: # type: ignore
position += len(transcription["segments"][s]["text"]) + 1 # type: ignore
s += 1
w = 0
segment = transcription["segments"][s]
while (
w < len(segment["words"])
and position + len(segment["words"][w]["word"]) <= match_start_index
):
position += len(segment["words"][w]["word"]) + 1
w += 1
if w >= len(segment["words"]):
s += 1
continue
break
start_word = segment["words"][w]
# If a segment only has one word, the start and
# end timestamps are only placed on the segment
if "start" in start_word:
return start_word["start"], segment["audiofile"]
return segment["start"], segment["audiofile"]
def get_window_index_from_offset(window: List[str], offset: int):
index = 0
while offset >= len(window[index]):
offset -= len(window[index])
index += 1
return index
def get_sentence_ranges(
start_sentence: int,
transcription: StorytellerTranscription,
sentences: List[str],
chapter_offset: int,
last_sentence_range: Union[SentenceRange, None],
):
sentence_ranges: List[SentenceRange] = []
transcription_text = get_transcription_text(transcription).lower()[chapter_offset:] | transcription_sentences = get_sentences_with_offsets(transcription_text) | 9 | 2023-12-15 16:07:12+00:00 | 4k |
zyrant/SPGroup3D | tools/train.py | [
{
"identifier": "__version__",
"path": "mmdet3d/version.py",
"snippet": "def parse_version_info(version_str):"
},
{
"identifier": "__version__",
"path": "mmdet3d/version.py",
"snippet": "def parse_version_info(version_str):"
},
{
"identifier": "init_random_seed",
"path": "mmd... | import os
import argparse
import copy
import time
import warnings
import mmcv
import torch
import torch.distributed as dist
import importlib
from os import path as osp
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmdet import __version__ as mmdet_version
from mmdet3d import __version__ as mmdet3d_version
from mmdet3d.apis import init_random_seed, train_model
from mmdet3d.datasets import build_dataset
from mmdet3d.models import build_model
from mmdet3d.utils import collect_env, get_root_logger
from mmdet.apis import set_random_seed
from mmseg import __version__ as mmseg_version
from mmdet.utils import setup_multi_processes
from mmdet3d.utils import setup_multi_processes | 3,355 | if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both specified, '
'--options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from plguin/xx, registry will be updated
if hasattr(cfg, "plugin"):
if cfg.plugin:
if hasattr(cfg, "plugin_dir"):
plugin_dir = cfg.plugin_dir
_module_dir = os.path.dirname(plugin_dir)
_module_dir = _module_dir.split("/")
_module_path = _module_dir[0]
for m in _module_dir[1:]:
_module_path = _module_path + "." + m
print(_module_path)
plg_lib = importlib.import_module(_module_path)
else:
# import dir is the dirpath for the config file
_module_dir = os.path.dirname(args.config)
_module_dir = _module_dir.split("/")
_module_path = _module_dir[0]
for m in _module_dir[1:]:
_module_path = _module_path + "." + m
print(_module_path)
plg_lib = importlib.import_module(_module_path)
# set multi-process settings
setup_multi_processes(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.auto_resume:
cfg.auto_resume = args.auto_resume
warnings.warn('`--auto-resume` is only supported when mmdet'
'version >= 2.20.0 for 3D detection model or'
'mmsegmentation verision >= 0.21.0 for 3D'
'segmentation model')
if args.gpus is not None:
cfg.gpu_ids = range(1)
warnings.warn('`--gpus` is deprecated because we only support '
'single GPU mode in non-distributed training. '
'Use `gpus=1` now.')
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids[0:1]
warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '
'Because we only support single GPU mode in '
'non-distributed training. Use the first GPU '
'in `gpu_ids` now.')
if args.gpus is None and args.gpu_ids is None:
cfg.gpu_ids = [args.gpu_id]
if args.autoscale_lr:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
# specify logger name, if we still use 'mmdet', the output info will be
# filtered and won't be saved in the log_file
# TODO: ugly workaround to judge whether we are training det or seg model
if cfg.model.type in ['EncoderDecoder3D']:
logger_name = 'mmseg'
else:
logger_name = 'mmdet'
logger = get_root_logger(
log_file=log_file, log_level=cfg.log_level, name=logger_name)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
| # Copyright (c) OpenMMLab. All rights reserved.
from __future__ import division
try:
# If mmdet version > 2.20.0, setup_multi_processes would be imported and
# used from mmdet instead of mmdet3d.
except ImportError:
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from',
help='the checkpoint file to resume from')
parser.add_argument(
'--auto-resume',
action='store_true',
help='resume from the latest checkpoint automatically')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='(Deprecated, please use --gpu-id) number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='(Deprecated, please use --gpu-id) ids of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-id',
type=int,
default=0,
help='number of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument(
'--diff-seed',
action='store_true',
help='Whether or not set different seeds for different ranks')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--autoscale-lr',
action='store_true',
help='automatically scale lr with the number of gpus')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both specified, '
'--options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from plguin/xx, registry will be updated
if hasattr(cfg, "plugin"):
if cfg.plugin:
if hasattr(cfg, "plugin_dir"):
plugin_dir = cfg.plugin_dir
_module_dir = os.path.dirname(plugin_dir)
_module_dir = _module_dir.split("/")
_module_path = _module_dir[0]
for m in _module_dir[1:]:
_module_path = _module_path + "." + m
print(_module_path)
plg_lib = importlib.import_module(_module_path)
else:
# import dir is the dirpath for the config file
_module_dir = os.path.dirname(args.config)
_module_dir = _module_dir.split("/")
_module_path = _module_dir[0]
for m in _module_dir[1:]:
_module_path = _module_path + "." + m
print(_module_path)
plg_lib = importlib.import_module(_module_path)
# set multi-process settings
setup_multi_processes(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.auto_resume:
cfg.auto_resume = args.auto_resume
warnings.warn('`--auto-resume` is only supported when mmdet'
'version >= 2.20.0 for 3D detection model or'
'mmsegmentation verision >= 0.21.0 for 3D'
'segmentation model')
if args.gpus is not None:
cfg.gpu_ids = range(1)
warnings.warn('`--gpus` is deprecated because we only support '
'single GPU mode in non-distributed training. '
'Use `gpus=1` now.')
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids[0:1]
warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '
'Because we only support single GPU mode in '
'non-distributed training. Use the first GPU '
'in `gpu_ids` now.')
if args.gpus is None and args.gpu_ids is None:
cfg.gpu_ids = [args.gpu_id]
if args.autoscale_lr:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
# specify logger name, if we still use 'mmdet', the output info will be
# filtered and won't be saved in the log_file
# TODO: ugly workaround to judge whether we are training det or seg model
if cfg.model.type in ['EncoderDecoder3D']:
logger_name = 'mmseg'
else:
logger_name = 'mmdet'
logger = get_root_logger(
log_file=log_file, log_level=cfg.log_level, name=logger_name)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info | env_info_dict = collect_env() | 6 | 2023-12-21 12:50:35+00:00 | 4k |
jdejaegh/irm-kmi-ha | tests/conftest.py | [
{
"identifier": "IrmKmiApiError",
"path": "custom_components/irm_kmi/api.py",
"snippet": "class IrmKmiApiError(Exception):\n \"\"\"Exception to indicate a general API error.\"\"\""
},
{
"identifier": "IrmKmiApiParametersError",
"path": "custom_components/irm_kmi/api.py",
"snippet": "c... | import json
import pytest
from collections.abc import Generator
from unittest.mock import MagicMock, patch
from homeassistant.const import CONF_ZONE
from pytest_homeassistant_custom_component.common import (MockConfigEntry,
load_fixture)
from custom_components.irm_kmi.api import (IrmKmiApiError,
IrmKmiApiParametersError)
from custom_components.irm_kmi.const import (
CONF_DARK_MODE, CONF_STYLE, CONF_USE_DEPRECATED_FORECAST, DOMAIN,
OPTION_DEPRECATED_FORECAST_NOT_USED, OPTION_STYLE_STD) | 1,613 |
@pytest.fixture
def mock_config_entry() -> MockConfigEntry:
"""Return the default mocked config entry."""
return MockConfigEntry(
title="Home",
domain=DOMAIN,
data={CONF_ZONE: "zone.home",
CONF_STYLE: OPTION_STYLE_STD,
CONF_DARK_MODE: True,
CONF_USE_DEPRECATED_FORECAST: OPTION_DEPRECATED_FORECAST_NOT_USED},
unique_id="zone.home",
)
@pytest.fixture
def mock_setup_entry() -> Generator[None, None, None]:
"""Mock setting up a config entry."""
with patch(
"custom_components.irm_kmi.async_setup_entry", return_value=True
):
yield
@pytest.fixture
def mock_get_forecast_in_benelux():
"""Mock a call to IrmKmiApiClient.get_forecasts_coord() so that it returns something valid and in the Benelux"""
with patch("custom_components.irm_kmi.config_flow.IrmKmiApiClient.get_forecasts_coord",
return_value={'cityName': 'Brussels'}):
yield
@pytest.fixture
def mock_get_forecast_out_benelux():
"""Mock a call to IrmKmiApiClient.get_forecasts_coord() so that it returns something outside Benelux"""
with patch("custom_components.irm_kmi.config_flow.IrmKmiApiClient.get_forecasts_coord",
return_value={'cityName': "Outside the Benelux (Brussels)"}):
yield
@pytest.fixture
def mock_get_forecast_api_error():
"""Mock a call to IrmKmiApiClient.get_forecasts_coord() so that it raises an error"""
with patch("custom_components.irm_kmi.config_flow.IrmKmiApiClient.get_forecasts_coord",
side_effet=IrmKmiApiError):
return
@pytest.fixture
def mock_get_forecast_api_error_repair():
"""Mock a call to IrmKmiApiClient.get_forecasts_coord() so that it raises an error"""
with patch("custom_components.irm_kmi.repairs.IrmKmiApiClient.get_forecasts_coord",
side_effet=IrmKmiApiError):
return
@pytest.fixture()
def mock_irm_kmi_api(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]:
"""Return a mocked IrmKmi api client."""
fixture: str = "forecast.json"
forecast = json.loads(load_fixture(fixture))
with patch(
"custom_components.irm_kmi.coordinator.IrmKmiApiClient", autospec=True
) as irm_kmi_api_mock:
irm_kmi = irm_kmi_api_mock.return_value
irm_kmi.get_forecasts_coord.return_value = forecast
yield irm_kmi
@pytest.fixture()
def mock_irm_kmi_api_coordinator_out_benelux(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]:
"""Return a mocked IrmKmi api client."""
fixture: str = "forecast_out_of_benelux.json"
forecast = json.loads(load_fixture(fixture))
with patch(
"custom_components.irm_kmi.coordinator.IrmKmiApiClient", autospec=True
) as irm_kmi_api_mock:
irm_kmi = irm_kmi_api_mock.return_value
irm_kmi.get_forecasts_coord.return_value = forecast
yield irm_kmi
@pytest.fixture()
def mock_irm_kmi_api_repair_in_benelux(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]:
"""Return a mocked IrmKmi api client."""
fixture: str = "forecast.json"
forecast = json.loads(load_fixture(fixture))
with patch(
"custom_components.irm_kmi.repairs.IrmKmiApiClient", autospec=True
) as irm_kmi_api_mock:
irm_kmi = irm_kmi_api_mock.return_value
irm_kmi.get_forecasts_coord.return_value = forecast
yield irm_kmi
@pytest.fixture()
def mock_irm_kmi_api_repair_out_of_benelux(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]:
"""Return a mocked IrmKmi api client."""
fixture: str = "forecast_out_of_benelux.json"
forecast = json.loads(load_fixture(fixture))
with patch(
"custom_components.irm_kmi.repairs.IrmKmiApiClient", autospec=True
) as irm_kmi_api_mock:
irm_kmi = irm_kmi_api_mock.return_value
irm_kmi.get_forecasts_coord.return_value = forecast
yield irm_kmi
@pytest.fixture()
def mock_exception_irm_kmi_api(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]:
"""Return a mocked IrmKmi api client."""
with patch(
"custom_components.irm_kmi.coordinator.IrmKmiApiClient", autospec=True
) as irm_kmi_api_mock:
irm_kmi = irm_kmi_api_mock.return_value
| """Fixtures for the IRM KMI integration tests."""
from __future__ import annotations
def get_api_data(fixture: str) -> dict:
return json.loads(load_fixture(fixture))
async def patched(url: str, params: dict | None = None) -> bytes:
if "cdn.knmi.nl" in url:
file_name = "tests/fixtures/clouds_nl.png"
elif "app.meteo.be/services/appv4/?s=getIncaImage" in url:
file_name = "tests/fixtures/clouds_be.png"
elif "getLocalizationLayerBE" in url:
file_name = "tests/fixtures/loc_layer_be_n.png"
elif "getLocalizationLayerNL" in url:
file_name = "tests/fixtures/loc_layer_nl.png"
else:
raise ValueError("Not a valid parameter for the mock")
with open(file_name, "rb") as file:
return file.read()
@pytest.fixture(autouse=True)
def auto_enable_custom_integrations(enable_custom_integrations):
yield
@pytest.fixture
def mock_config_entry() -> MockConfigEntry:
"""Return the default mocked config entry."""
return MockConfigEntry(
title="Home",
domain=DOMAIN,
data={CONF_ZONE: "zone.home",
CONF_STYLE: OPTION_STYLE_STD,
CONF_DARK_MODE: True,
CONF_USE_DEPRECATED_FORECAST: OPTION_DEPRECATED_FORECAST_NOT_USED},
unique_id="zone.home",
)
@pytest.fixture
def mock_setup_entry() -> Generator[None, None, None]:
"""Mock setting up a config entry."""
with patch(
"custom_components.irm_kmi.async_setup_entry", return_value=True
):
yield
@pytest.fixture
def mock_get_forecast_in_benelux():
"""Mock a call to IrmKmiApiClient.get_forecasts_coord() so that it returns something valid and in the Benelux"""
with patch("custom_components.irm_kmi.config_flow.IrmKmiApiClient.get_forecasts_coord",
return_value={'cityName': 'Brussels'}):
yield
@pytest.fixture
def mock_get_forecast_out_benelux():
"""Mock a call to IrmKmiApiClient.get_forecasts_coord() so that it returns something outside Benelux"""
with patch("custom_components.irm_kmi.config_flow.IrmKmiApiClient.get_forecasts_coord",
return_value={'cityName': "Outside the Benelux (Brussels)"}):
yield
@pytest.fixture
def mock_get_forecast_api_error():
"""Mock a call to IrmKmiApiClient.get_forecasts_coord() so that it raises an error"""
with patch("custom_components.irm_kmi.config_flow.IrmKmiApiClient.get_forecasts_coord",
side_effet=IrmKmiApiError):
return
@pytest.fixture
def mock_get_forecast_api_error_repair():
"""Mock a call to IrmKmiApiClient.get_forecasts_coord() so that it raises an error"""
with patch("custom_components.irm_kmi.repairs.IrmKmiApiClient.get_forecasts_coord",
side_effet=IrmKmiApiError):
return
@pytest.fixture()
def mock_irm_kmi_api(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]:
"""Return a mocked IrmKmi api client."""
fixture: str = "forecast.json"
forecast = json.loads(load_fixture(fixture))
with patch(
"custom_components.irm_kmi.coordinator.IrmKmiApiClient", autospec=True
) as irm_kmi_api_mock:
irm_kmi = irm_kmi_api_mock.return_value
irm_kmi.get_forecasts_coord.return_value = forecast
yield irm_kmi
@pytest.fixture()
def mock_irm_kmi_api_coordinator_out_benelux(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]:
"""Return a mocked IrmKmi api client."""
fixture: str = "forecast_out_of_benelux.json"
forecast = json.loads(load_fixture(fixture))
with patch(
"custom_components.irm_kmi.coordinator.IrmKmiApiClient", autospec=True
) as irm_kmi_api_mock:
irm_kmi = irm_kmi_api_mock.return_value
irm_kmi.get_forecasts_coord.return_value = forecast
yield irm_kmi
@pytest.fixture()
def mock_irm_kmi_api_repair_in_benelux(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]:
"""Return a mocked IrmKmi api client."""
fixture: str = "forecast.json"
forecast = json.loads(load_fixture(fixture))
with patch(
"custom_components.irm_kmi.repairs.IrmKmiApiClient", autospec=True
) as irm_kmi_api_mock:
irm_kmi = irm_kmi_api_mock.return_value
irm_kmi.get_forecasts_coord.return_value = forecast
yield irm_kmi
@pytest.fixture()
def mock_irm_kmi_api_repair_out_of_benelux(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]:
"""Return a mocked IrmKmi api client."""
fixture: str = "forecast_out_of_benelux.json"
forecast = json.loads(load_fixture(fixture))
with patch(
"custom_components.irm_kmi.repairs.IrmKmiApiClient", autospec=True
) as irm_kmi_api_mock:
irm_kmi = irm_kmi_api_mock.return_value
irm_kmi.get_forecasts_coord.return_value = forecast
yield irm_kmi
@pytest.fixture()
def mock_exception_irm_kmi_api(request: pytest.FixtureRequest) -> Generator[None, MagicMock, None]:
"""Return a mocked IrmKmi api client."""
with patch(
"custom_components.irm_kmi.coordinator.IrmKmiApiClient", autospec=True
) as irm_kmi_api_mock:
irm_kmi = irm_kmi_api_mock.return_value | irm_kmi.get_forecasts_coord.side_effect = IrmKmiApiParametersError | 1 | 2023-12-17 16:35:01+00:00 | 4k |
v3ucn/Bert-vits2-V2.2 | compress_model.py | [
{
"identifier": "symbols",
"path": "text/symbols.py",
"snippet": ""
},
{
"identifier": "logger",
"path": "tools/log.py",
"snippet": ""
},
{
"identifier": "SynthesizerTrn",
"path": "models.py",
"snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for T... | from collections import OrderedDict
from text.symbols import symbols
from tools.log import logger
from models import SynthesizerTrn
import torch
import utils
import os
import argparse
import os.path | 3,177 |
def copyStateDict(state_dict):
if list(state_dict.keys())[0].startswith("module"):
start_idx = 1
else:
start_idx = 0
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = ",".join(k.split(".")[start_idx:])
new_state_dict[name] = v
return new_state_dict
def removeOptimizer(config: str, input_model: str, ishalf: bool, output_model: str):
hps = utils.get_hparams_from_file(config)
net_g = SynthesizerTrn(
len(symbols),
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model,
)
optim_g = torch.optim.AdamW(
net_g.parameters(),
hps.train.learning_rate,
betas=hps.train.betas,
eps=hps.train.eps,
)
state_dict_g = torch.load(input_model, map_location="cpu")
new_dict_g = copyStateDict(state_dict_g)
keys = []
for k, v in new_dict_g["model"].items():
if "enc_q" in k:
continue # noqa: E701
keys.append(k)
new_dict_g = (
{k: new_dict_g["model"][k].half() for k in keys}
if ishalf
else {k: new_dict_g["model"][k] for k in keys}
)
torch.save(
{
"model": new_dict_g,
"iteration": 0,
"optimizer": optim_g.state_dict(),
"learning_rate": 0.0001,
},
output_model,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", type=str, default="configs/config.json")
parser.add_argument("-i", "--input", type=str)
parser.add_argument("-o", "--output", type=str, default=None)
parser.add_argument(
"-hf", "--half", action="store_true", default=False, help="Save as FP16"
)
args = parser.parse_args()
output = args.output
if output is None:
filename, ext = os.path.splitext(args.input)
half = "_half" if args.half else ""
output = filename + "_release" + half + ext
removeOptimizer(args.config, args.input, args.half, output)
|
def copyStateDict(state_dict):
if list(state_dict.keys())[0].startswith("module"):
start_idx = 1
else:
start_idx = 0
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = ",".join(k.split(".")[start_idx:])
new_state_dict[name] = v
return new_state_dict
def removeOptimizer(config: str, input_model: str, ishalf: bool, output_model: str):
hps = utils.get_hparams_from_file(config)
net_g = SynthesizerTrn(
len(symbols),
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model,
)
optim_g = torch.optim.AdamW(
net_g.parameters(),
hps.train.learning_rate,
betas=hps.train.betas,
eps=hps.train.eps,
)
state_dict_g = torch.load(input_model, map_location="cpu")
new_dict_g = copyStateDict(state_dict_g)
keys = []
for k, v in new_dict_g["model"].items():
if "enc_q" in k:
continue # noqa: E701
keys.append(k)
new_dict_g = (
{k: new_dict_g["model"][k].half() for k in keys}
if ishalf
else {k: new_dict_g["model"][k] for k in keys}
)
torch.save(
{
"model": new_dict_g,
"iteration": 0,
"optimizer": optim_g.state_dict(),
"learning_rate": 0.0001,
},
output_model,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", type=str, default="configs/config.json")
parser.add_argument("-i", "--input", type=str)
parser.add_argument("-o", "--output", type=str, default=None)
parser.add_argument(
"-hf", "--half", action="store_true", default=False, help="Save as FP16"
)
args = parser.parse_args()
output = args.output
if output is None:
filename, ext = os.path.splitext(args.input)
half = "_half" if args.half else ""
output = filename + "_release" + half + ext
removeOptimizer(args.config, args.input, args.half, output) | logger.info(f"压缩模型成功, 输出模型: {os.path.abspath(output)}") | 1 | 2023-12-18 04:54:46+00:00 | 4k |
mjunaidca/travel-ai-service | backend/app/service/openai_travel_agent_call.py | [
{
"identifier": "TravelAIChat",
"path": "backend/app/utils/chat_assistant.py",
"snippet": "class TravelAIChat():\n def __init__(self, client: OpenAI, assistant: Assistant, thread: Thread):\n if (client is None):\n raise Exception(\"OpenAI Client is not initialized\")\n self.c... | from ..utils.chat_assistant import TravelAIChat
from ..utils.get_assistant import GetAssistant
from ..utils.thread_manager import CreateThread
from openai.types.beta.threads import ThreadMessage, Run
from openai.types.beta.thread import Thread
from openai.types.beta.assistant import Assistant
from openai import OpenAI
from dotenv import load_dotenv, find_dotenv
import os | 1,803 |
_: bool = load_dotenv(find_dotenv()) # read local .env file
client: OpenAI = OpenAI()
# TODO: If Assistant is present in env no need to retrive & verify it.
TRAVEL_ASSISTANT_ID = os.environ.get("TRAVEL_ASSISTANT_ID")
# Initialize Travel Assistant Class
|
_: bool = load_dotenv(find_dotenv()) # read local .env file
client: OpenAI = OpenAI()
# TODO: If Assistant is present in env no need to retrive & verify it.
TRAVEL_ASSISTANT_ID = os.environ.get("TRAVEL_ASSISTANT_ID")
# Initialize Travel Assistant Class | travel_agent_call: GetAssistant = GetAssistant( | 1 | 2023-12-17 05:57:21+00:00 | 4k |
Subsets and Splits
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have consistent code formatting levels across multiple scales (2k, 4k, 8k, 12k) and reveals the structured formatting patterns within these repositories.
SQL Console for tianyang/repobench_python_v1.1
Compares cross-file and in-file code structure patterns across different complexity levels, revealing how file organization strategies vary with code size and potentially informing better code architecture decisions.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have complete performance data across all seven code complexity levels, revealing consistent benchmarking patterns across different code sizes.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that contain all 7 distinct quality levels (2k through 32k), revealing complete datasets that might be useful for comprehensive analysis.