repo_name stringlengths 7 71 | file_path stringlengths 5 118 | context list | import_statement stringlengths 45 12.5k | token_num int64 641 99.4k | cropped_code stringlengths 44 17k | all_code stringlengths 43 754k | next_line stringlengths 2 330 | gold_snippet_index int64 0 68 | created_at stringlengths 25 25 | level stringclasses 9 values |
|---|---|---|---|---|---|---|---|---|---|---|
Q-Future/Q-Align | q_align/evaluate/iaa_eval.py | [
{
"identifier": "IMAGE_TOKEN_INDEX",
"path": "q_align/constants.py",
"snippet": "IMAGE_TOKEN_INDEX = -200"
},
{
"identifier": "DEFAULT_IMAGE_TOKEN",
"path": "q_align/constants.py",
"snippet": "DEFAULT_IMAGE_TOKEN = \"<|image|>\""
},
{
"identifier": "conv_templates",
"path": "... | import argparse
import torch
import requests
import json
import os
import numpy as np
import torch
import json
from q_align.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN
from q_align.conversation import conv_templates, SeparatorStyle
from q_align.model.builder import load_pretrained_model
from q_align.mm_utils import process_images, tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
from PIL import Image
from PIL import ImageFile
from PIL import Image
from io import BytesIO
from transformers import TextStreamer
from scipy.stats import spearmanr, pearsonr
from tqdm import tqdm
from collections import defaultdict | 2,875 |
ImageFile.LOAD_TRUNCATED_IMAGES = True
def wa5(logits):
logprobs = np.array([logits["excellent"], logits["good"], logits["fair"], logits["poor"], logits["bad"]])
probs = np.exp(logprobs) / np.sum(np.exp(logprobs))
return np.inner(probs, np.array([1,0.75,0.5,0.25,0.]))
def disable_torch_init():
"""
Disable the redundant torch default initialization to accelerate model creation.
"""
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
def load_image(image_file):
if image_file.startswith('http://') or image_file.startswith('https://'):
response = requests.get(image_file)
image = Image.open(BytesIO(response.content)).convert('RGB')
else:
image = Image.open(image_file).convert('RGB')
return image
def main(args):
# Model
disable_torch_init()
model_name = get_model_name_from_path(args.model_path)
|
ImageFile.LOAD_TRUNCATED_IMAGES = True
def wa5(logits):
logprobs = np.array([logits["excellent"], logits["good"], logits["fair"], logits["poor"], logits["bad"]])
probs = np.exp(logprobs) / np.sum(np.exp(logprobs))
return np.inner(probs, np.array([1,0.75,0.5,0.25,0.]))
def disable_torch_init():
"""
Disable the redundant torch default initialization to accelerate model creation.
"""
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None)
def load_image(image_file):
if image_file.startswith('http://') or image_file.startswith('https://'):
response = requests.get(image_file)
image = Image.open(BytesIO(response.content)).convert('RGB')
else:
image = Image.open(image_file).convert('RGB')
return image
def main(args):
# Model
disable_torch_init()
model_name = get_model_name_from_path(args.model_path) | tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit, device=args.device) | 3 | 2023-12-14 03:36:30+00:00 | 4k |
open-compass/T-Eval | teval/evaluators/instruct_evaluator.py | [
{
"identifier": "parse_string",
"path": "teval/utils/template.py",
"snippet": "def parse_string(template: str, input_string: str, allow_newline: bool=False) -> dict:\n \"\"\"Return a dictionary whose keys are from input template and value is\n responding content from input_string.\n\n Args:\n ... | from collections import defaultdict
from mmengine import load
from teval.utils.template import parse_string
from teval.utils.format_load import format_load
from teval.schema import ResponseDataSample
import json
import ast
import numpy as np | 1,927 |
class InstructEvaluator:
"""Instruct Following Evaluation
Args:
dataset_path(str): File path of evaluation dataset.
"""
def __init__(
self,
dataset_path: str,
**kwargs,
) -> None:
self.dataset_path = dataset_path
def _load_dataset(self):
self.dataset = []
dataset = load(self.dataset_path)
for key in dataset.keys():
datum = dataset[key]
data_sample = self._process_response(datum)
self.dataset.append(
dict(
origin_prompt=datum["origin_prompt"],
response_data_sample=data_sample))
self.num_samples = len(self.dataset)
def _process_response(
self,
datum: dict,
) -> ResponseDataSample:
"""Process the response to needed format.
Args:
datum(dict): inputs.
Returns:
dict: Processed response data sample.
"""
# Dict with keyword-only arguments.
template = datum['template']
# Generated response.
pred_data = datum['prediction']
# Response of ground truth.
gt_data = datum['ground_truth']
meta_data = datum['meta_data']
return ResponseDataSample(
template=template, pred=pred_data, gt=gt_data, meta_data=meta_data)
def _evaluate(self, data_sample: dict) -> dict:
metrics_result = dict()
response_format = data_sample.meta_data['response_format']
if response_format == 'json':
pred_data = self.json_format_parse(data_sample)
else:
pred_data = self.string_format_parse(data_sample)
if pred_data is None:
# directly set to 0 for all metrics
metrics_result[f'{response_format}_format_metric'] = 0
metrics_result[f'{response_format}_args_em_metric'] = 0
return metrics_result
# Exact matching
metrics_result[f'{response_format}_format_metric'] = 1
metrics_result[f'{response_format}_args_em_metric'] = self.compute_args_em_metric(
gt_action=data_sample.gt['action'], pred_action=pred_data['action'],
gt_args=data_sample.gt['args'], pred_args=pred_data['args']
)
return metrics_result
def compute_args_em_metric(self, gt_action, pred_action, gt_args, pred_args):
cnt = 0.
if gt_action == pred_action:
cnt += 1.
num_args = len(gt_args) + 1 # 1 means action name match
for gt_key in gt_args:
pred_val = pred_args.get(gt_key, "")
if pred_val == gt_args[gt_key]:
cnt += 1.
return cnt / num_args
def string_format_parse(self, data_sample):
pred_data = data_sample.pred
template = data_sample.template
thought_start = template['thought_start']
thought_end = template['thought_end']
action_start = template['action_start']
action_end = template['action_end']
args_start = template['args_start']
args_end = template['args_end']
parse_template = thought_start + "{thought}" + thought_end \
+ action_start + "{action}" + action_end \
+ args_start + "{args}" + args_end
res = parse_string(parse_template, pred_data, allow_newline=True)
try:
if res is not None:
args = ast.literal_eval(res['args'].strip())
res['args'] = args if isinstance(args, dict) else {}
res['action'] = res['action'].strip()
return res
except:
return dict(thought=res['thought'], action=res['action'].strip(), args=dict())
def json_format_parse(self, data_sample):
try:
|
class InstructEvaluator:
"""Instruct Following Evaluation
Args:
dataset_path(str): File path of evaluation dataset.
"""
def __init__(
self,
dataset_path: str,
**kwargs,
) -> None:
self.dataset_path = dataset_path
def _load_dataset(self):
self.dataset = []
dataset = load(self.dataset_path)
for key in dataset.keys():
datum = dataset[key]
data_sample = self._process_response(datum)
self.dataset.append(
dict(
origin_prompt=datum["origin_prompt"],
response_data_sample=data_sample))
self.num_samples = len(self.dataset)
def _process_response(
self,
datum: dict,
) -> ResponseDataSample:
"""Process the response to needed format.
Args:
datum(dict): inputs.
Returns:
dict: Processed response data sample.
"""
# Dict with keyword-only arguments.
template = datum['template']
# Generated response.
pred_data = datum['prediction']
# Response of ground truth.
gt_data = datum['ground_truth']
meta_data = datum['meta_data']
return ResponseDataSample(
template=template, pred=pred_data, gt=gt_data, meta_data=meta_data)
def _evaluate(self, data_sample: dict) -> dict:
metrics_result = dict()
response_format = data_sample.meta_data['response_format']
if response_format == 'json':
pred_data = self.json_format_parse(data_sample)
else:
pred_data = self.string_format_parse(data_sample)
if pred_data is None:
# directly set to 0 for all metrics
metrics_result[f'{response_format}_format_metric'] = 0
metrics_result[f'{response_format}_args_em_metric'] = 0
return metrics_result
# Exact matching
metrics_result[f'{response_format}_format_metric'] = 1
metrics_result[f'{response_format}_args_em_metric'] = self.compute_args_em_metric(
gt_action=data_sample.gt['action'], pred_action=pred_data['action'],
gt_args=data_sample.gt['args'], pred_args=pred_data['args']
)
return metrics_result
def compute_args_em_metric(self, gt_action, pred_action, gt_args, pred_args):
cnt = 0.
if gt_action == pred_action:
cnt += 1.
num_args = len(gt_args) + 1 # 1 means action name match
for gt_key in gt_args:
pred_val = pred_args.get(gt_key, "")
if pred_val == gt_args[gt_key]:
cnt += 1.
return cnt / num_args
def string_format_parse(self, data_sample):
pred_data = data_sample.pred
template = data_sample.template
thought_start = template['thought_start']
thought_end = template['thought_end']
action_start = template['action_start']
action_end = template['action_end']
args_start = template['args_start']
args_end = template['args_end']
parse_template = thought_start + "{thought}" + thought_end \
+ action_start + "{action}" + action_end \
+ args_start + "{args}" + args_end
res = parse_string(parse_template, pred_data, allow_newline=True)
try:
if res is not None:
args = ast.literal_eval(res['args'].strip())
res['args'] = args if isinstance(args, dict) else {}
res['action'] = res['action'].strip()
return res
except:
return dict(thought=res['thought'], action=res['action'].strip(), args=dict())
def json_format_parse(self, data_sample):
try: | pred_data = format_load(data_sample.pred) | 1 | 2023-12-10 05:18:46+00:00 | 4k |
rabilrbl/gemini-pro-bot | gemini_pro_bot/bot.py | [
{
"identifier": "AuthFilter",
"path": "gemini_pro_bot/filters.py",
"snippet": "_AUTHORIZED_USERS = [\n i.strip() for i in os.getenv(\"AUTHORIZED_USERS\", \"\").split(\",\") if i.strip()\n]\nclass AuthorizedUserFilter(UpdateFilter):\n def filter(self, update: Update):"
},
{
"identifier": "s... | import os
from telegram import Update
from telegram.ext import (
CommandHandler,
MessageHandler,
Application,
)
from gemini_pro_bot.filters import AuthFilter, MessageFilter, PhotoFilter
from dotenv import load_dotenv
from gemini_pro_bot.handlers import (
start,
help_command,
newchat_command,
handle_message,
handle_image,
) | 1,865 |
load_dotenv()
def start_bot() -> None:
"""Start the bot."""
# Create the Application and pass it your bot's token.
application = Application.builder().token(os.getenv("BOT_TOKEN")).build()
# on different commands - answer in Telegram
application.add_handler(CommandHandler("start", start, filters=AuthFilter))
application.add_handler(CommandHandler("help", help_command, filters=AuthFilter))
application.add_handler(CommandHandler("new", newchat_command, filters=AuthFilter))
# Any text message is sent to LLM to generate a response
|
load_dotenv()
def start_bot() -> None:
"""Start the bot."""
# Create the Application and pass it your bot's token.
application = Application.builder().token(os.getenv("BOT_TOKEN")).build()
# on different commands - answer in Telegram
application.add_handler(CommandHandler("start", start, filters=AuthFilter))
application.add_handler(CommandHandler("help", help_command, filters=AuthFilter))
application.add_handler(CommandHandler("new", newchat_command, filters=AuthFilter))
# Any text message is sent to LLM to generate a response | application.add_handler(MessageHandler(MessageFilter, handle_message)) | 0 | 2023-12-14 16:57:14+00:00 | 4k |
nox-410/tvm.tl | tests/python/contrib/test_hexagon/test_relay_simplify_conv_pat.py | [
{
"identifier": "build_module",
"path": "tests/python/contrib/test_hexagon/infrastructure.py",
"snippet": "def build_module(relay_mod, target):\n \"\"\"builds a relay module for a specified target\"\"\"\n params = {}\n executor = Executor(\"aot\", {\"link-params\": True})\n lowered = tvm.rel... | import numpy as np
import tvm
from tvm.runtime import ndarray as nd
from tvm import relay, testing
from tvm.contrib.hexagon.transform import simplify_conv_pat
from tvm.topi.utils import get_const_tuple
from tvm.contrib.hexagon.session import Session
from tvm.contrib.hexagon.pytest_plugin import HEXAGON_AOT_LLVM_TARGET
from .infrastructure import build_module, run_module | 1,811 | relay_mul_factor = relay.const(0.00392151, dtype="float32")
else:
relay_mul_factor = np.random.rand(*get_const_tuple(act_shape))
relay_mul_factor = relay.Constant(
nd.array(np.full(relay_mul_factor.shape, relay_mul_factor, dtype="float32"))
)
relay_sub_term = relay.const(0.5, dtype="float32")
relay_weights = relay.Constant(nd.array(np.full(weights.shape, weights, dtype="float32")))
relay_bias = relay.Constant(nd.array(np.full(bias.shape, bias, dtype="float32")))
return (relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias, data_in_float32)
def get_test_module_graph(relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias):
"""Creates a test relay graph with the specified relay expressions"""
v1 = relay.multiply(relay_act, relay_mul_factor)
v2 = relay.subtract(v1, relay_sub_term)
v3 = relay.transpose(v2, axes=[0, 3, 1, 2])
weights_type_info = tvm.relay.transform.InferTypeLocal(relay_weights)
v4 = relay.nn.conv2d(
v3,
relay_weights,
padding=[1, 1, 1, 1],
channels=weights_type_info.shape[0],
kernel_size=[3, 3],
)
graph = relay.nn.bias_add(v4, relay_bias)
return graph
def get_test_module(relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias):
"""Creates a test relay module and returns it."""
graph = get_test_module_graph(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
)
func = relay.Function(relay.analysis.free_vars(graph), graph)
mod = tvm.IRModule.from_expr(func)
return mod
def get_expected_output_module_graph(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
):
"""Creates the relay graph for expected output"""
v1 = relay.transpose(relay_act, axes=[0, 3, 1, 2])
v2 = relay.multiply(relay_mul_factor, relay_weights)
weights_type_info = tvm.relay.transform.InferTypeLocal(relay_weights)
v3 = relay.nn.conv2d(
v1, v2, padding=[1, 1, 1, 1], channels=weights_type_info.shape[0], kernel_size=[3, 3]
)
type_info = tvm.relay.transform.InferTypeLocal(v1)
relay_zero_act = relay.Constant(
nd.array(np.zeros(get_const_tuple(type_info.shape), dtype="float32"))
)
v4 = relay.subtract(relay_zero_act, relay_sub_term)
v5 = relay.nn.bias_add(v3, relay_bias)
v6 = relay.nn.conv2d(
v4,
relay_weights,
padding=[1, 1, 1, 1],
channels=weights_type_info.shape[0],
kernel_size=[3, 3],
)
return relay.add(v5, v6)
def get_expected_output_module(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
):
"""Returns manually created expected output module."""
graph = get_expected_output_module_graph(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
)
out_func = relay.Function(relay.analysis.free_vars(graph), graph)
return tvm.IRModule.from_expr(out_func)
def get_test_modules():
"""generates test, expected modules and their inputs"""
(
relay_act,
relay_mul_factor,
relay_sub_term,
relay_weights,
relay_bias,
data_in_float32,
) = get_test_module_relay_exprs()
mod = get_test_module(relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias)
exp_relay_mod = get_expected_output_module(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
)
return mod, exp_relay_mod, {"q1": data_in_float32}
@tvm.testing.requires_hexagon
def test_simplify_conv_pat(hexagon_session: Session):
"""A positive test case"""
(mod, exp_relay_mod, inputs) = get_test_modules()
with tvm.transform.PassContext(opt_level=3):
mod = tvm.relay.transform.InferType()(mod)
hexagon_lowered = build_module(
mod, tvm.target.Target(HEXAGON_AOT_LLVM_TARGET, host=HEXAGON_AOT_LLVM_TARGET)
)
with tvm.transform.PassContext(opt_level=3):
mod = simplify_conv_pat(mod)
mod = tvm.relay.transform.InferType()(mod)
exp_relay_mod = tvm.relay.transform.InferType()(exp_relay_mod)
assert tvm.ir.structural_equal(mod["main"], exp_relay_mod["main"], map_free_vars=True)
mod = tvm.relay.transform.FoldConstant()(mod)
hexagon_lowered_opt = build_module(
mod, tvm.target.Target(HEXAGON_AOT_LLVM_TARGET, host=HEXAGON_AOT_LLVM_TARGET)
)
# Run unoptimized llvm module
hexagon_mod = hexagon_session.get_executor_from_factory(hexagon_lowered)
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-wildcard-import, invalid-name
"""
Test hexagon relay transform - qnn.concat optimization
"""
def get_test_module_relay_exprs(isConstScalarMultiplier=True):
"""
Creates relay expressions that can be used both by
test module and expected output module
"""
act_shape = (1, 32, 32, 3)
data_in = np.random.rand(*get_const_tuple(act_shape))
data_in_float32 = np.full(data_in.shape, data_in, dtype="float32")
kernel_shape = (16, 3, 3, 3)
weights = np.random.rand(*get_const_tuple(kernel_shape))
bias = np.random.rand(get_const_tuple(kernel_shape)[0])
relay_act = relay.var("q1", shape=act_shape, dtype="float32")
if isConstScalarMultiplier:
relay_mul_factor = relay.const(0.00392151, dtype="float32")
else:
relay_mul_factor = np.random.rand(*get_const_tuple(act_shape))
relay_mul_factor = relay.Constant(
nd.array(np.full(relay_mul_factor.shape, relay_mul_factor, dtype="float32"))
)
relay_sub_term = relay.const(0.5, dtype="float32")
relay_weights = relay.Constant(nd.array(np.full(weights.shape, weights, dtype="float32")))
relay_bias = relay.Constant(nd.array(np.full(bias.shape, bias, dtype="float32")))
return (relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias, data_in_float32)
def get_test_module_graph(relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias):
"""Creates a test relay graph with the specified relay expressions"""
v1 = relay.multiply(relay_act, relay_mul_factor)
v2 = relay.subtract(v1, relay_sub_term)
v3 = relay.transpose(v2, axes=[0, 3, 1, 2])
weights_type_info = tvm.relay.transform.InferTypeLocal(relay_weights)
v4 = relay.nn.conv2d(
v3,
relay_weights,
padding=[1, 1, 1, 1],
channels=weights_type_info.shape[0],
kernel_size=[3, 3],
)
graph = relay.nn.bias_add(v4, relay_bias)
return graph
def get_test_module(relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias):
"""Creates a test relay module and returns it."""
graph = get_test_module_graph(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
)
func = relay.Function(relay.analysis.free_vars(graph), graph)
mod = tvm.IRModule.from_expr(func)
return mod
def get_expected_output_module_graph(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
):
"""Creates the relay graph for expected output"""
v1 = relay.transpose(relay_act, axes=[0, 3, 1, 2])
v2 = relay.multiply(relay_mul_factor, relay_weights)
weights_type_info = tvm.relay.transform.InferTypeLocal(relay_weights)
v3 = relay.nn.conv2d(
v1, v2, padding=[1, 1, 1, 1], channels=weights_type_info.shape[0], kernel_size=[3, 3]
)
type_info = tvm.relay.transform.InferTypeLocal(v1)
relay_zero_act = relay.Constant(
nd.array(np.zeros(get_const_tuple(type_info.shape), dtype="float32"))
)
v4 = relay.subtract(relay_zero_act, relay_sub_term)
v5 = relay.nn.bias_add(v3, relay_bias)
v6 = relay.nn.conv2d(
v4,
relay_weights,
padding=[1, 1, 1, 1],
channels=weights_type_info.shape[0],
kernel_size=[3, 3],
)
return relay.add(v5, v6)
def get_expected_output_module(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
):
"""Returns manually created expected output module."""
graph = get_expected_output_module_graph(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
)
out_func = relay.Function(relay.analysis.free_vars(graph), graph)
return tvm.IRModule.from_expr(out_func)
def get_test_modules():
"""generates test, expected modules and their inputs"""
(
relay_act,
relay_mul_factor,
relay_sub_term,
relay_weights,
relay_bias,
data_in_float32,
) = get_test_module_relay_exprs()
mod = get_test_module(relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias)
exp_relay_mod = get_expected_output_module(
relay_act, relay_mul_factor, relay_sub_term, relay_weights, relay_bias
)
return mod, exp_relay_mod, {"q1": data_in_float32}
@tvm.testing.requires_hexagon
def test_simplify_conv_pat(hexagon_session: Session):
"""A positive test case"""
(mod, exp_relay_mod, inputs) = get_test_modules()
with tvm.transform.PassContext(opt_level=3):
mod = tvm.relay.transform.InferType()(mod)
hexagon_lowered = build_module(
mod, tvm.target.Target(HEXAGON_AOT_LLVM_TARGET, host=HEXAGON_AOT_LLVM_TARGET)
)
with tvm.transform.PassContext(opt_level=3):
mod = simplify_conv_pat(mod)
mod = tvm.relay.transform.InferType()(mod)
exp_relay_mod = tvm.relay.transform.InferType()(exp_relay_mod)
assert tvm.ir.structural_equal(mod["main"], exp_relay_mod["main"], map_free_vars=True)
mod = tvm.relay.transform.FoldConstant()(mod)
hexagon_lowered_opt = build_module(
mod, tvm.target.Target(HEXAGON_AOT_LLVM_TARGET, host=HEXAGON_AOT_LLVM_TARGET)
)
# Run unoptimized llvm module
hexagon_mod = hexagon_session.get_executor_from_factory(hexagon_lowered) | expected_output = run_module(hexagon_mod, inputs) | 1 | 2023-12-14 02:37:47+00:00 | 4k |
berlino/gated_linear_attention | kernels/intra_chunk_contribution/fn.py | [
{
"identifier": "FlashGRet",
"path": "kernels/intra_chunk_contribution/fn_only_gk.py",
"snippet": "class FlashGRet(torch.autograd.Function):\n @staticmethod\n def forward(ctx, q, k, gk):\n q = q.contiguous()\n k = k.contiguous()\n gk = gk.contiguous()\n \n # asse... | import torch
import time
import math
import torch
import torch.nn.functional as F
import torch
import triton
import triton.language as tl
import numpy as np
import math
from typing import Tuple, Union, Optional
from einops import rearrange
from .fn_only_gk import FlashGRet
from .fn_only_gv import FlashGRet_O | 1,924 |
def intra_chunk_onc(q, k, v, gk, gv):
assert q.is_contiguous()
assert k.is_contiguous()
assert v.is_contiguous()
if gk is not None:
assert gk.is_contiguous()
if gv is not None:
assert gv.is_contiguous()
# q = q.float()
# k = k.float()
# v = v.float()
origin_chunk_size = k.shape[-2]
assert k.shape[-2] % 16 == 0
if gk is not None:
|
def intra_chunk_onc(q, k, v, gk, gv):
assert q.is_contiguous()
assert k.is_contiguous()
assert v.is_contiguous()
if gk is not None:
assert gk.is_contiguous()
if gv is not None:
assert gv.is_contiguous()
# q = q.float()
# k = k.float()
# v = v.float()
origin_chunk_size = k.shape[-2]
assert k.shape[-2] % 16 == 0
if gk is not None: | A = FlashGRet.apply(q, k, gk) | 0 | 2023-12-11 18:13:44+00:00 | 4k |
kakaobrain/honeybee | eval_tasks.py | [
{
"identifier": "get_model",
"path": "pipeline/interface.py",
"snippet": "def get_model(pretrained_ckpt, use_bf16=True, load_in_8bit=False):\n \"\"\"Model Provider with tokenizer and processor.\n\n Args:\n pretrained_ckpt (string): The path to pre-trained checkpoint.\n use_bf16 (bool... | import argparse
import os
import torch
import utils
from sconf import Config
from torch.distributed import destroy_process_group, init_process_group
from pipeline.interface import get_model
from tasks import build_task
from utils.logging import get_logger | 1,791 |
parser = argparse.ArgumentParser()
parser.add_argument(
"--ckpt_path",
type=str,
help="Path to the trained checkpoint.",
)
parser.add_argument(
"--result_dir",
type=str,
default="eval_results/",
help="Path to the result files.",
)
parser.add_argument("--config", nargs="+", required=True, help="Task configs.")
parser.add_argument(
"--load_results",
action="store_true",
help="Load saved results without model inference. Only for the results without re-formatted.",
)
parser.add_argument(
"--dump_submission_file",
action="store_true",
help="Dump a submission file with a specific format to evaluate on a evaluation server.",
)
parser.add_argument(
"--batch_size", "-B",
type=int,
default=None,
help="Per-device batch size for evaluation. (default: use the value in the config)",
)
logger = get_logger()
def dist_setup():
# Expected to use torchrun
init_process_group(backend="nccl")
torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
def init(ckpt_path, load_results=False):
if load_results:
logger.info("Skip init model in load_results mode.")
return None, None, None
logger.info("Init (load model, tokenizer, processor) ...")
# create model
model, tokenizer, processor = get_model(ckpt_path)
model.cuda()
logger.info(" -- Init done.")
return model, tokenizer, processor
def eval_single(
model,
tokenizer,
processor,
config_path,
result_dir,
load_results=False,
dump_submission_file=False,
):
task_config = Config(config_path)
task_config = next(iter(task_config.values())) # get first child
if args.batch_size is not None:
task_config.dataloader.batch_size = args.batch_size
if utils.is_main_process():
print("=" * 80)
print(Config(task_config).dumps())
print("=" * 80)
task_name = task_config.name
|
parser = argparse.ArgumentParser()
parser.add_argument(
"--ckpt_path",
type=str,
help="Path to the trained checkpoint.",
)
parser.add_argument(
"--result_dir",
type=str,
default="eval_results/",
help="Path to the result files.",
)
parser.add_argument("--config", nargs="+", required=True, help="Task configs.")
parser.add_argument(
"--load_results",
action="store_true",
help="Load saved results without model inference. Only for the results without re-formatted.",
)
parser.add_argument(
"--dump_submission_file",
action="store_true",
help="Dump a submission file with a specific format to evaluate on a evaluation server.",
)
parser.add_argument(
"--batch_size", "-B",
type=int,
default=None,
help="Per-device batch size for evaluation. (default: use the value in the config)",
)
logger = get_logger()
def dist_setup():
# Expected to use torchrun
init_process_group(backend="nccl")
torch.cuda.set_device(int(os.environ["LOCAL_RANK"]))
def init(ckpt_path, load_results=False):
if load_results:
logger.info("Skip init model in load_results mode.")
return None, None, None
logger.info("Init (load model, tokenizer, processor) ...")
# create model
model, tokenizer, processor = get_model(ckpt_path)
model.cuda()
logger.info(" -- Init done.")
return model, tokenizer, processor
def eval_single(
model,
tokenizer,
processor,
config_path,
result_dir,
load_results=False,
dump_submission_file=False,
):
task_config = Config(config_path)
task_config = next(iter(task_config.values())) # get first child
if args.batch_size is not None:
task_config.dataloader.batch_size = args.batch_size
if utils.is_main_process():
print("=" * 80)
print(Config(task_config).dumps())
print("=" * 80)
task_name = task_config.name | task = build_task(model, tokenizer, processor, task_config) | 1 | 2023-12-06 14:48:41+00:00 | 4k |
NVlabs/RADIO | radio/hf_model.py | [
{
"identifier": "eradio",
"path": "radio/eradio_model.py",
"snippet": "@register_model\ndef eradio(pretrained=False, **kwargs):\n return fastervit2_large_fullres_ws16(pretrained=pretrained, **kwargs)"
},
{
"identifier": "create_model_from_args",
"path": "radio/radio_model.py",
"snippe... | from collections import namedtuple
from typing import Optional
from timm.models import VisionTransformer
from transformers import PretrainedConfig, PreTrainedModel
from .eradio_model import eradio
from .radio_model import create_model_from_args
from .radio_model import RADIOModel as RADIOModelBase
from .input_conditioner import get_default_conditioner, InputConditioner
import torch | 1,776 | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RADIOConfig(PretrainedConfig):
"""Pretrained Hugging Face configuration for RADIO models."""
def __init__(
self,
args: Optional[dict] = None,
version: Optional[str] = "v1",
return_summary: Optional[bool] = True,
return_spatial_features: Optional[bool] = True,
**kwargs,
):
self.args = args
self.version = version
self.return_summary = return_summary
self.return_spatial_features = return_spatial_features
super().__init__(**kwargs)
class RADIOModel(PreTrainedModel):
"""Pretrained Hugging Face model for RADIO.
This class inherits from PreTrainedModel, which provides
HuggingFace's functionality for loading and saving models.
"""
config_class = RADIOConfig
def __init__(self, config):
super().__init__(config)
RADIOArgs = namedtuple("RADIOArgs", config.args.keys())
args = RADIOArgs(**config.args)
self.config = config
model = create_model_from_args(args)
input_conditioner: InputConditioner = get_default_conditioner()
self.radio_model = RADIOModelBase(
model,
input_conditioner,
config.return_summary,
config.return_spatial_features,
)
@property
def model(self) -> VisionTransformer:
return self.radio_model.model
@property
def input_conditioner(self) -> InputConditioner:
return self.radio_model.input_conditioner
def forward(self, x: torch.Tensor):
return self.radio_model.forward(x)
class ERADIOConfig(PretrainedConfig):
"""Pretrained Hugging Face configuration for ERADIO models."""
def __init__(
self,
args: Optional[dict] = None,
version: Optional[str] = "v1",
return_summary: Optional[bool] = True,
return_spatial_features: Optional[bool] = True,
**kwargs,
):
self.args = args
self.version = version
self.return_summary = return_summary
self.return_spatial_features = return_spatial_features
super().__init__(**kwargs)
class ERADIOModel(PreTrainedModel):
"""Pretrained Hugging Face model for ERADIO.
This class inherits from PreTrainedModel, which provides
HuggingFace's functionality for loading and saving models.
"""
config_class = ERADIOConfig
def __init__(self, config):
super().__init__(config)
config.args["in_chans"] = 3
config.args["num_classes"] = 0
config.args["return_full_features"] = config.return_spatial_features
self.config = config
| # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RADIOConfig(PretrainedConfig):
"""Pretrained Hugging Face configuration for RADIO models."""
def __init__(
self,
args: Optional[dict] = None,
version: Optional[str] = "v1",
return_summary: Optional[bool] = True,
return_spatial_features: Optional[bool] = True,
**kwargs,
):
self.args = args
self.version = version
self.return_summary = return_summary
self.return_spatial_features = return_spatial_features
super().__init__(**kwargs)
class RADIOModel(PreTrainedModel):
"""Pretrained Hugging Face model for RADIO.
This class inherits from PreTrainedModel, which provides
HuggingFace's functionality for loading and saving models.
"""
config_class = RADIOConfig
def __init__(self, config):
super().__init__(config)
RADIOArgs = namedtuple("RADIOArgs", config.args.keys())
args = RADIOArgs(**config.args)
self.config = config
model = create_model_from_args(args)
input_conditioner: InputConditioner = get_default_conditioner()
self.radio_model = RADIOModelBase(
model,
input_conditioner,
config.return_summary,
config.return_spatial_features,
)
@property
def model(self) -> VisionTransformer:
return self.radio_model.model
@property
def input_conditioner(self) -> InputConditioner:
return self.radio_model.input_conditioner
def forward(self, x: torch.Tensor):
return self.radio_model.forward(x)
class ERADIOConfig(PretrainedConfig):
"""Pretrained Hugging Face configuration for ERADIO models."""
def __init__(
self,
args: Optional[dict] = None,
version: Optional[str] = "v1",
return_summary: Optional[bool] = True,
return_spatial_features: Optional[bool] = True,
**kwargs,
):
self.args = args
self.version = version
self.return_summary = return_summary
self.return_spatial_features = return_spatial_features
super().__init__(**kwargs)
class ERADIOModel(PreTrainedModel):
"""Pretrained Hugging Face model for ERADIO.
This class inherits from PreTrainedModel, which provides
HuggingFace's functionality for loading and saving models.
"""
config_class = ERADIOConfig
def __init__(self, config):
super().__init__(config)
config.args["in_chans"] = 3
config.args["num_classes"] = 0
config.args["return_full_features"] = config.return_spatial_features
self.config = config | model = eradio(**config.args) | 0 | 2023-12-08 19:53:01+00:00 | 4k |
taikinman/langrila | src/langrila/utils.py | [
{
"identifier": "_TILE_SIZE",
"path": "src/langrila/model_config.py",
"snippet": "_TILE_SIZE = 512"
},
{
"identifier": "_TOKENS_PER_TILE",
"path": "src/langrila/model_config.py",
"snippet": "_TOKENS_PER_TILE = 170"
},
{
"identifier": "MODEL_CONFIG",
"path": "src/langrila/mode... | import base64
import io
import math
import os
import numpy as np
import openai
import tiktoken
from typing import Optional, Union
from openai import AsyncAzureOpenAI, AsyncOpenAI, AzureOpenAI, OpenAI
from PIL import Image
from .model_config import _TILE_SIZE, _TOKENS_PER_TILE, MODEL_CONFIG | 2,200 |
def get_n_tokens(message: dict[str, str | list[dict[str, str|dict[str, str]]]] , model_name: str) -> int:
"""
Return the number of tokens used by a list of messages.
Forked and edited from : https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
"""
try:
encoding = tiktoken.encoding_for_model(model_name)
except KeyError:
# print("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model_name in MODEL_ZOO:
if model_name == "gpt-3.5-turbo-0301":
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
else:
tokens_per_message = 3
tokens_per_name = 1
else:
raise NotImplementedError(
f"get_n_tokens() is not implemented for model {model_name}. Please choose from following model : {', '.join(sorted(list(MODEL_ZOO)))}."
)
n_content_tokens = 0
n_other_tokens = tokens_per_message
# num_tokens += tokens_per_message
for key, value in message.items():
if key == "content":
if "vision" in model_name and isinstance(value, list):
for item in value: # value type is list[dict[str, str|dict[str, str]]
if item["type"] == "text":
n_content_tokens += len(encoding.encode(item["text"]))
elif item["type"] == "image_url":
n_content_tokens += 85 # Base tokens
if item["image_url"]["detail"] == "high":
if item["image_url"]["url"].startswith("data:image/jpeg;base64,"):
img_encoded = item["image_url"]["url"].replace(
"data:image/jpeg;base64,", ""
)
n_content_tokens += calculate_high_resolution_image_tokens(
decode_image(img_encoded).size
)
elif item["image_url"]["url"].startswith("https://"):
raise NotImplementedError(
"Image URL is not acceptable. Please use base64 encoded image."
)
else:
raise ValueError(f"Unknown type {item['type']} in message['content'].")
else:
n_content_tokens += len(encoding.encode(value))
elif key == "name":
n_other_tokens += tokens_per_name
else:
n_other_tokens += len(encoding.encode(value))
n_other_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
total_tokens = n_content_tokens + n_other_tokens
return {"total": total_tokens, "content": n_content_tokens, "other": n_other_tokens}
def get_token_limit(model_name: str):
if model_name in MODEL_ZOO:
return MODEL_CONFIG[model_name]["max_tokens"]
else:
raise NotImplementedError(
f"get_token_limit() is not implemented for model {model_name}. Please choose from following model : {', '.join(sorted(list(MODEL_ZOO)))}."
)
def make_batch(iterable, batch_size=1):
length = len(iterable)
for ndx in range(0, length, batch_size):
yield iterable[ndx : min(ndx + batch_size, length)]
def pil2bytes(image: Image.Image) -> bytes:
num_byteio = io.BytesIO()
image.save(num_byteio, format="jpeg")
image_bytes = num_byteio.getvalue()
return image_bytes
def encode_image(image):
if isinstance(image, Image.Image):
image_bytes = pil2bytes(image)
return base64.b64encode(image_bytes).decode("utf-8")
elif isinstance(image, np.ndarray):
image_pil = Image.fromarray(image)
image_bytes = pil2bytes(image_pil)
return base64.b64encode(image_bytes).decode("utf-8")
elif isinstance(image, bytes):
return base64.b64encode(image).decode("utf-8")
else:
raise ValueError(f"Type of {type(image)} is not supported for image.")
def decode_image(image_encoded):
image_encoded_utf = image_encoded.encode("utf-8")
image_bytes = base64.b64decode(image_encoded_utf)
byteio = io.BytesIO(image_bytes)
return Image.open(byteio)
def calculate_high_resolution_image_tokens(image_size: tuple[int, int] | list[int, int]):
h, w = image_size
short = min(h, w)
long = max(h, w)
if long > 2048:
short = int(short * 2048 / long)
long = 2048
if short > 768:
long = int(long * 768 / short)
short = 768
n_bins_long = math.ceil(long / _TILE_SIZE)
n_bins_short = math.ceil(short / _TILE_SIZE)
n_tiles = n_bins_long * n_bins_short
|
MODEL_ZOO = set(MODEL_CONFIG.keys())
def get_client(
api_key_env_name: str,
api_version: Optional[str] = None,
endpoint_env_name: Optional[str] = None,
organization_id_env_name: Optional[str] = None,
deployment_id_env_name: Optional[str] = None,
api_type: Optional[str] = "openai",
timeout: int = 60,
max_retries: int = 5,
):
if api_type == "azure":
assert (
api_version and endpoint_env_name and deployment_id_env_name
), "api_version, endpoint_env_name, and deployment_id_env_name must be specified when api_type is 'azure'."
return AzureOpenAI(
**get_openai_client_settings(
api_key_env_name=api_key_env_name,
organization_id_env_name=organization_id_env_name,
api_version=api_version,
endpoint_env_name=endpoint_env_name,
deployment_id_env_name=deployment_id_env_name,
max_retries=max_retries,
timeout=timeout,
)
)
elif api_type == "openai":
return OpenAI(
**get_openai_client_settings(
api_key_env_name=api_key_env_name,
organization_id_env_name=organization_id_env_name,
max_retries=max_retries,
timeout=timeout,
)
)
else:
raise ValueError(f"api_type must be 'azure' or 'openai'. Got {api_type}.")
def get_async_client(
api_key_env_name: str,
api_version: Optional[str] = None,
endpoint_env_name: Optional[str] = None,
organization_id_env_name: Optional[str] = None,
deployment_id_env_name: Optional[str] = None,
api_type: Optional[str] = "openai",
timeout: int = 60,
max_retries: int = 5,
):
if api_type == "azure":
return AsyncAzureOpenAI(
**get_openai_client_settings(
api_key_env_name=api_key_env_name,
organization_id_env_name=organization_id_env_name,
api_version=api_version,
endpoint_env_name=endpoint_env_name,
deployment_id_env_name=deployment_id_env_name,
max_retries=max_retries,
timeout=timeout,
)
)
elif api_type == "openai":
return AsyncOpenAI(
**get_openai_client_settings(
api_key_env_name=api_key_env_name,
organization_id_env_name=organization_id_env_name,
max_retries=max_retries,
timeout=timeout,
)
)
else:
raise ValueError(f"api_type must be 'azure' or 'openai'. Got {api_type}.")
def get_openai_client_settings(
api_key_env_name: str,
api_version: Optional[str] = None,
endpoint_env_name: Optional[str] = None,
organization_id_env_name: Optional[str] = None,
deployment_id_env_name: Optional[str] = None,
timeout: int = 60,
max_retries: int = 5,
) -> None:
outputs = {}
outputs["api_key"] = os.getenv(api_key_env_name)
if isinstance(api_version, str):
outputs["api_version"] = api_version
if isinstance(endpoint_env_name, str):
outputs["azure_endpoint"] = os.getenv(endpoint_env_name)
if isinstance(organization_id_env_name, str):
outputs["organization"] = os.getenv(organization_id_env_name)
if isinstance(deployment_id_env_name, str):
outputs["azure_deployment"] = os.getenv(deployment_id_env_name)
outputs["timeout"] = timeout
outputs["max_retries"] = max_retries
return outputs
def set_openai_envs(
api_key_env_name: str,
api_version: Optional[str] = None,
api_type: Optional[str] = None,
endpoint_env_name: Optional[str] = None,
organization_id_env_name: Optional[str] = None,
) -> None:
openai.api_key = os.getenv(api_key_env_name)
if isinstance(api_version, str):
openai.api_version = api_version
if isinstance(api_type, str):
openai.api_type = api_type
if isinstance(endpoint_env_name, str):
openai.api_base = os.getenv(endpoint_env_name)
if isinstance(organization_id_env_name, str):
openai.organization = os.getenv(organization_id_env_name)
def get_n_tokens(message: dict[str, str | list[dict[str, str|dict[str, str]]]] , model_name: str) -> int:
"""
Return the number of tokens used by a list of messages.
Forked and edited from : https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
"""
try:
encoding = tiktoken.encoding_for_model(model_name)
except KeyError:
# print("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model_name in MODEL_ZOO:
if model_name == "gpt-3.5-turbo-0301":
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
else:
tokens_per_message = 3
tokens_per_name = 1
else:
raise NotImplementedError(
f"get_n_tokens() is not implemented for model {model_name}. Please choose from following model : {', '.join(sorted(list(MODEL_ZOO)))}."
)
n_content_tokens = 0
n_other_tokens = tokens_per_message
# num_tokens += tokens_per_message
for key, value in message.items():
if key == "content":
if "vision" in model_name and isinstance(value, list):
for item in value: # value type is list[dict[str, str|dict[str, str]]
if item["type"] == "text":
n_content_tokens += len(encoding.encode(item["text"]))
elif item["type"] == "image_url":
n_content_tokens += 85 # Base tokens
if item["image_url"]["detail"] == "high":
if item["image_url"]["url"].startswith("data:image/jpeg;base64,"):
img_encoded = item["image_url"]["url"].replace(
"data:image/jpeg;base64,", ""
)
n_content_tokens += calculate_high_resolution_image_tokens(
decode_image(img_encoded).size
)
elif item["image_url"]["url"].startswith("https://"):
raise NotImplementedError(
"Image URL is not acceptable. Please use base64 encoded image."
)
else:
raise ValueError(f"Unknown type {item['type']} in message['content'].")
else:
n_content_tokens += len(encoding.encode(value))
elif key == "name":
n_other_tokens += tokens_per_name
else:
n_other_tokens += len(encoding.encode(value))
n_other_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
total_tokens = n_content_tokens + n_other_tokens
return {"total": total_tokens, "content": n_content_tokens, "other": n_other_tokens}
def get_token_limit(model_name: str):
if model_name in MODEL_ZOO:
return MODEL_CONFIG[model_name]["max_tokens"]
else:
raise NotImplementedError(
f"get_token_limit() is not implemented for model {model_name}. Please choose from following model : {', '.join(sorted(list(MODEL_ZOO)))}."
)
def make_batch(iterable, batch_size=1):
length = len(iterable)
for ndx in range(0, length, batch_size):
yield iterable[ndx : min(ndx + batch_size, length)]
def pil2bytes(image: Image.Image) -> bytes:
num_byteio = io.BytesIO()
image.save(num_byteio, format="jpeg")
image_bytes = num_byteio.getvalue()
return image_bytes
def encode_image(image):
if isinstance(image, Image.Image):
image_bytes = pil2bytes(image)
return base64.b64encode(image_bytes).decode("utf-8")
elif isinstance(image, np.ndarray):
image_pil = Image.fromarray(image)
image_bytes = pil2bytes(image_pil)
return base64.b64encode(image_bytes).decode("utf-8")
elif isinstance(image, bytes):
return base64.b64encode(image).decode("utf-8")
else:
raise ValueError(f"Type of {type(image)} is not supported for image.")
def decode_image(image_encoded):
image_encoded_utf = image_encoded.encode("utf-8")
image_bytes = base64.b64decode(image_encoded_utf)
byteio = io.BytesIO(image_bytes)
return Image.open(byteio)
def calculate_high_resolution_image_tokens(image_size: tuple[int, int] | list[int, int]):
h, w = image_size
short = min(h, w)
long = max(h, w)
if long > 2048:
short = int(short * 2048 / long)
long = 2048
if short > 768:
long = int(long * 768 / short)
short = 768
n_bins_long = math.ceil(long / _TILE_SIZE)
n_bins_short = math.ceil(short / _TILE_SIZE)
n_tiles = n_bins_long * n_bins_short | return _TOKENS_PER_TILE * n_tiles | 1 | 2023-12-10 09:42:35+00:00 | 4k |
Open-All-Scale-Causal-Engine/OpenASCE | openasce/attribution/attribution_model.py | [
{
"identifier": "Runtime",
"path": "openasce/core/runtime.py",
"snippet": "class Runtime:\n \"\"\"Runtime Class\n\n Provide the runtime layer to support different running environment, including the single machine or multiple machines.\n\n Attributes:\n\n \"\"\"\n\n def __init__(self) -> N... | import copy
import random
import numpy as np
from typing import Iterable, List
from openasce.core.runtime import Runtime
from openasce.inference.inference_model import InferenceModel
from openasce.utils.logger import logger | 1,936 | # Copyright 2023 AntGroup CO., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
class Attribution(Runtime):
"""Attribution Class
Attributes:
"""
def __init__(
self, *, threshold: float, max_step: int = 2, top_num: int = None
) -> None:
"""Constructor
Argument:
threshold: the score threshold
max_step: the maximal step. For the attribution based on causal graph, that is the maximal node number.
top_num: the accepted number of best options in each step, which is used in greedy attribution.
"""
super().__init__()
self._inferencer = None
self._data = None
self._threshold = threshold
self._max_step = max_step
self._top_num = top_num
self._column_names = None
self._treatment_name = None
self._label_name = None
self._label_value = None
self._result = []
@property
def column_names(self):
"""All nodes' name.
Note: should include the treatment node and label node.
"""
assert self._column_names is not None, "column names should be set in advance"
return self._column_names
@column_names.setter
def column_names(self, value: List[str]):
assert self._column_names is None
self._column_names = value
@property
def treatment_name(self):
assert self._treatment_name is not None
return self._treatment_name
@treatment_name.setter
def treatment_name(self, value: str):
assert self._treatment_name is None
self._treatment_name = value
@property
def label_name(self):
assert self._label_name is not None
return self._label_name
@label_name.setter
def label_name(self, value: str):
assert self._label_name is None
self._label_name = value
@property
def label_value(self):
assert self._label_value is not None
return self._label_value
@label_value.setter
def label_value(self, value):
assert self._label_value is None
self._label_value = value
@property
| # Copyright 2023 AntGroup CO., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
class Attribution(Runtime):
"""Attribution Class
Attributes:
"""
def __init__(
self, *, threshold: float, max_step: int = 2, top_num: int = None
) -> None:
"""Constructor
Argument:
threshold: the score threshold
max_step: the maximal step. For the attribution based on causal graph, that is the maximal node number.
top_num: the accepted number of best options in each step, which is used in greedy attribution.
"""
super().__init__()
self._inferencer = None
self._data = None
self._threshold = threshold
self._max_step = max_step
self._top_num = top_num
self._column_names = None
self._treatment_name = None
self._label_name = None
self._label_value = None
self._result = []
@property
def column_names(self):
"""All nodes' name.
Note: should include the treatment node and label node.
"""
assert self._column_names is not None, "column names should be set in advance"
return self._column_names
@column_names.setter
def column_names(self, value: List[str]):
assert self._column_names is None
self._column_names = value
@property
def treatment_name(self):
assert self._treatment_name is not None
return self._treatment_name
@treatment_name.setter
def treatment_name(self, value: str):
assert self._treatment_name is None
self._treatment_name = value
@property
def label_name(self):
assert self._label_name is not None
return self._label_name
@label_name.setter
def label_name(self, value: str):
assert self._label_name is None
self._label_name = value
@property
def label_value(self):
assert self._label_value is not None
return self._label_value
@label_value.setter
def label_value(self, value):
assert self._label_value is None
self._label_value = value
@property | def inferencer(self) -> InferenceModel: | 1 | 2023-12-06 05:54:36+00:00 | 4k |
latorc/Wechat-AI-Assistant | chatbot.py | [
{
"identifier": "WcfWrapper",
"path": "wcf_wrapper.py",
"snippet": "class WcfWrapper:\r\n def __init__(self) -> None:\r\n def __del__(self):\r\n def msg_preview_str(self, msg:WxMsg) -> str:\r\n def wxid_to_nickname(self, wxid) -> str:\r\n def wxid_to_wxcode(self, wxid) -> str:\r\n def ... | import queue
import re
import config
import common
import openai_wrapper
import preset
from typing import Tuple
from wcf_wrapper import WcfWrapper, ContentType
from wcferry import WxMsg
from config import AdminCmd
from common import ContentType, ChatMsg
| 2,833 | return self.wcfw.send_message(msg, receiver, at_list)
try:
# 根据预设加上格式
preset = self.chat_presets.get(receiver, self.config.default_preset)
text = preset.construct_msg(content, self.wcfw.wxid_to_wxcode(msg.sender), self.wcfw.wxid_to_nickname(msg.sender))
# 获取引用消息及附件
refer_msg = self.wcfw.get_refer_content(msg)
files = []
if refer_msg is None: # 无引用内容
pass
elif refer_msg.type == ContentType.text: # 引用文本
text = text + f"\n(引用文本:\n{refer_msg.content})"
elif refer_msg.type == ContentType.link: # 引用链接
text = text + f"\n(引用链接:\n{refer_msg.content})"
elif refer_msg.type in (ContentType.image, ContentType.file): # 图片, 文件
files.append(refer_msg.content)
elif refer_msg.type == ContentType.voice: # 语音
text += f"\n(语音文件: {refer_msg.content})"
# self.openai_wrapper.run_audio_msg(receiver, text, refer_msg.content, callback_msg)
elif refer_msg.type == ContentType.video: # 视频
text += f"\n(视频文件: {refer_msg.content})"
# self.openai_wrapper.run_video_msg(receiver, text, refer_msg.content, callback_msg)
elif refer_msg.type == ContentType.ERROR: # 处理错误
self.wcfw.send_text("获取引用内容发生错误", receiver, at_list)
return
else: # 其他
# tp == WxMsgType.UNSUPPORTED
self.wcfw.send_text("抱歉, 不支持引用这类消息", receiver, at_list)
return
# 调用 OpenAI 运行消息 (阻塞直到全部消息处理结束)
self.openai_wrapper.run_msg(receiver, text, files, callback_msg)
except Exception as e:
common.logger().error("响应消息发生错误: %s", common.error_trace(e))
self.wcfw.send_text(f"对不起, 响应该消息时发生错误: {common.error_info(e)}", receiver, at_list)
def _filter_wxmsg(self, msg:WxMsg) -> str:
""" 判断是否响应这条消息
如果响应, 返回消息原文(去掉前缀)
如果忽略, 返回None
"""
# 过滤消息类型
if msg.type == 1: # 文本
pass
elif msg.type == 34: # 语音
pass
elif msg.type == 49: # 引用/文件/链接? 进一步看content type
ct = self.wcfw.get_content_type(msg)
if ct == 57: # 引用
pass
else:
return None
else:
return None
# 过滤消息内容
content = self.wcfw.get_msg_text(msg).strip()
if msg.from_group(): #群聊消息
# 白名单过滤
if "$all" in self.config.group_whitelist:
pass
else:
if msg.roomid not in self.config.group_whitelist:
return None
if msg.from_self() : #来自自己的消息, 如果有prefix开头, 去掉prefix; 否则忽略
for p in self.config.self_prefix:
if content.startswith(p):
content = content.removeprefix(p).strip()
return content
return None
if msg.is_at(self.wcfw.wxid): # @我的消息, 处理
#去掉@前缀, 获得消息正文
# 正则匹配: @开头 + 任意字符 + \u2005(1/4空格)或任意空白或结尾
content = re.sub(r"@.*?([\u2005\s]|$)", "", content).strip()
return content
else: # 其他情况, 忽略
return None
else: #单聊消息
# 微信号白名单
wxcode = self.wcfw.wxid_to_wxcode(msg.sender)
if "$all" in self.config.single_chat_whitelist:
pass
else:
if wxcode in self.config.single_chat_whitelist:
pass
else:
return None
if msg.from_self() : #来自自己的消息, 如果有prefix开头, 去掉prefix; 否则忽略
for p in self.config.self_prefix:
if content.startswith(p):
content = content.removeprefix(p).strip()
return content
return None
# 来自对方消息:
if not self.config.single_chat_prefix: # 未定义前缀: 响应所有
if msg.type == 34: # 语音
# return None
common.logger().info("转录语音")
audiofile = self.wcfw.wcf.get_audio_msg(msg.id, common.temp_dir())
text = self.openai_wrapper.audio_trans(audiofile)
return text
else:
return content
else:
for p in self.config.single_chat_prefix: # 已定义前缀: 只响应前缀开头的消息
if content.startswith(p):
return content.removeprefix(p).strip()
return None
return None
|
class Chatbot():
""" 管理微信机器人逻辑. 管理与微信客户端 (如Wechat Ferry) 和 AI 客户端 (如 OpenAI )的交互逻辑 """
def __init__(self, config: config.Config, wcfw: WcfWrapper, oaiw: openai_wrapper.OpenAIWrapper) -> None:
""" 初始化
args:
config (Config): Config对象
wcfw (WcfWrapper): Wechat Ferry Wrapper对象
oaiw (OpenAIWrapper): AI Wrapper对象
"""
self.config = config
self.wcfw = wcfw
self.openai_wrapper = oaiw
self.chat_presets:dict[str, preset.Preset] = {} # 每个对话的预设 {roomid或wxid: 预设}
def start_main_loop(self) -> None:
"""
主循环, 接收并处理微信消息.
该函数阻塞进程.
"""
while self.wcfw.wcf.is_receiving_msg():
try:
msg:WxMsg = self.wcfw.get_msg()
note = f"收到消息 {self.wcfw.msg_preview_str(msg)}"
common.logger().info(note)
except queue.Empty:
continue # 无消息,继续
except Exception as e:
common.logger().error("接收微信消息错误: %s", common.error_trace(e))
try:
self.run_wxmsg(msg)
except Exception as e:
common.logger().error("处理消息错误:%s", common.error_trace(e))
def run_wxmsg(self, msg:WxMsg):
""" 读取并处理一条消息
args:
msg (WxMsg): 消息对象. 群号: msg.roomid, 发送者微信ID: msg.sender, 消息内容: msg.content
"""
content = self._filter_wxmsg(msg)
if content is None:
return
# 确定回复对象
if msg.from_group():
receiver = msg.roomid
if msg.from_self():
at_list = ""
else:
at_list = msg.sender
else: #单聊
receiver = msg.sender
at_list = ""
# 发送者是管理员, 并且是命令时, 处理命令并直接返回
if self.wcfw.wxid_to_wxcode(msg.sender) in self.config.admins:
cmd = self._match_admin_cmd(content)
if cmd:
try:
self.process_admin_cmd(content, receiver, at_list)
except Exception as e:
common.logger().error("执行管理员命令错误: %s",common.error_trace(e))
self.wcfw.send_text(f"执行管理员命令'{content}'发生错误", receiver, at_list)
return
### 调用 AI 处理消息
# 回调函数, 处理 AI 返回消息
def callback_msg(msg:ChatMsg) -> int:
return self.wcfw.send_message(msg, receiver, at_list)
try:
# 根据预设加上格式
preset = self.chat_presets.get(receiver, self.config.default_preset)
text = preset.construct_msg(content, self.wcfw.wxid_to_wxcode(msg.sender), self.wcfw.wxid_to_nickname(msg.sender))
# 获取引用消息及附件
refer_msg = self.wcfw.get_refer_content(msg)
files = []
if refer_msg is None: # 无引用内容
pass
elif refer_msg.type == ContentType.text: # 引用文本
text = text + f"\n(引用文本:\n{refer_msg.content})"
elif refer_msg.type == ContentType.link: # 引用链接
text = text + f"\n(引用链接:\n{refer_msg.content})"
elif refer_msg.type in (ContentType.image, ContentType.file): # 图片, 文件
files.append(refer_msg.content)
elif refer_msg.type == ContentType.voice: # 语音
text += f"\n(语音文件: {refer_msg.content})"
# self.openai_wrapper.run_audio_msg(receiver, text, refer_msg.content, callback_msg)
elif refer_msg.type == ContentType.video: # 视频
text += f"\n(视频文件: {refer_msg.content})"
# self.openai_wrapper.run_video_msg(receiver, text, refer_msg.content, callback_msg)
elif refer_msg.type == ContentType.ERROR: # 处理错误
self.wcfw.send_text("获取引用内容发生错误", receiver, at_list)
return
else: # 其他
# tp == WxMsgType.UNSUPPORTED
self.wcfw.send_text("抱歉, 不支持引用这类消息", receiver, at_list)
return
# 调用 OpenAI 运行消息 (阻塞直到全部消息处理结束)
self.openai_wrapper.run_msg(receiver, text, files, callback_msg)
except Exception as e:
common.logger().error("响应消息发生错误: %s", common.error_trace(e))
self.wcfw.send_text(f"对不起, 响应该消息时发生错误: {common.error_info(e)}", receiver, at_list)
def _filter_wxmsg(self, msg:WxMsg) -> str:
""" 判断是否响应这条消息
如果响应, 返回消息原文(去掉前缀)
如果忽略, 返回None
"""
# 过滤消息类型
if msg.type == 1: # 文本
pass
elif msg.type == 34: # 语音
pass
elif msg.type == 49: # 引用/文件/链接? 进一步看content type
ct = self.wcfw.get_content_type(msg)
if ct == 57: # 引用
pass
else:
return None
else:
return None
# 过滤消息内容
content = self.wcfw.get_msg_text(msg).strip()
if msg.from_group(): #群聊消息
# 白名单过滤
if "$all" in self.config.group_whitelist:
pass
else:
if msg.roomid not in self.config.group_whitelist:
return None
if msg.from_self() : #来自自己的消息, 如果有prefix开头, 去掉prefix; 否则忽略
for p in self.config.self_prefix:
if content.startswith(p):
content = content.removeprefix(p).strip()
return content
return None
if msg.is_at(self.wcfw.wxid): # @我的消息, 处理
#去掉@前缀, 获得消息正文
# 正则匹配: @开头 + 任意字符 + \u2005(1/4空格)或任意空白或结尾
content = re.sub(r"@.*?([\u2005\s]|$)", "", content).strip()
return content
else: # 其他情况, 忽略
return None
else: #单聊消息
# 微信号白名单
wxcode = self.wcfw.wxid_to_wxcode(msg.sender)
if "$all" in self.config.single_chat_whitelist:
pass
else:
if wxcode in self.config.single_chat_whitelist:
pass
else:
return None
if msg.from_self() : #来自自己的消息, 如果有prefix开头, 去掉prefix; 否则忽略
for p in self.config.self_prefix:
if content.startswith(p):
content = content.removeprefix(p).strip()
return content
return None
# 来自对方消息:
if not self.config.single_chat_prefix: # 未定义前缀: 响应所有
if msg.type == 34: # 语音
# return None
common.logger().info("转录语音")
audiofile = self.wcfw.wcf.get_audio_msg(msg.id, common.temp_dir())
text = self.openai_wrapper.audio_trans(audiofile)
return text
else:
return content
else:
for p in self.config.single_chat_prefix: # 已定义前缀: 只响应前缀开头的消息
if content.startswith(p):
return content.removeprefix(p).strip()
return None
return None
| def _match_admin_cmd(self, content:str) -> Tuple[str, config.AdminCmd]:
| 1 | 2023-12-07 12:17:15+00:00 | 4k |
tensorsense/faceflow | params/model.py | [
{
"identifier": "core",
"path": "lib/core.py",
"snippet": "class AUHead(torch.nn.Module):\nclass AUModel(pl.LightningModule):\n def __init__(\n self,\n task: str,\n in_channels: int,\n num_classes: int = 1,\n logits_per_class: int = 1,\n act: torch.nn.Module ... | from functools import partial
from timm.scheduler import CosineLRScheduler
from torch.nn import BCEWithLogitsLoss
from torchvision.ops.focal_loss import sigmoid_focal_loss
from lib import core
from lib.core import AUModel
from lib.losses.db import DistributionBalancedLoss
from params.datamodule import datamodule, logits_per_class, num_aus
import timm
import torch | 2,481 |
backbone = timm.create_model(
"convnextv2_nano.fcmae_ft_in22k_in1k", pretrained=True, features_only=True
)
# loss = BCEWithLogitsLoss()
loss = partial(sigmoid_focal_loss, reduction="mean")
# datamodule.setup("fit")
# train_datasets = datamodule.train_dataset.datasets
# pos_counts = sum([ds.pos_counts for ds in train_datasets])
# neg_counts = sum([ds.neg_counts for ds in train_datasets])
# loss = DistributionBalancedLoss(pos_counts=pos_counts, neg_counts=neg_counts)
# def binary_dice_loss(inputs, targets, smooth=1e-4):
# inputs = torch.nn.functional.sigmoid(inputs.squeeze())
# inputs = inputs.view(-1)
# targets = targets.view(-1)
#
# intersection = (inputs * targets).sum()
# dice = (2.*intersection + smooth)/(inputs.sum() + targets.sum() + smooth)
# return 1 - dice
#
#
# loss = binary_dice_loss
heads_partials = [
partial(
core.AUHead,
task="multilabel",
num_classes=num_aus,
|
backbone = timm.create_model(
"convnextv2_nano.fcmae_ft_in22k_in1k", pretrained=True, features_only=True
)
# loss = BCEWithLogitsLoss()
loss = partial(sigmoid_focal_loss, reduction="mean")
# datamodule.setup("fit")
# train_datasets = datamodule.train_dataset.datasets
# pos_counts = sum([ds.pos_counts for ds in train_datasets])
# neg_counts = sum([ds.neg_counts for ds in train_datasets])
# loss = DistributionBalancedLoss(pos_counts=pos_counts, neg_counts=neg_counts)
# def binary_dice_loss(inputs, targets, smooth=1e-4):
# inputs = torch.nn.functional.sigmoid(inputs.squeeze())
# inputs = inputs.view(-1)
# targets = targets.view(-1)
#
# intersection = (inputs * targets).sum()
# dice = (2.*intersection + smooth)/(inputs.sum() + targets.sum() + smooth)
# return 1 - dice
#
#
# loss = binary_dice_loss
heads_partials = [
partial(
core.AUHead,
task="multilabel",
num_classes=num_aus, | logits_per_class=logits_per_class, | 1 | 2023-12-05 13:15:58+00:00 | 4k |
8none1/idealLED | custom_components/ideal_led/light.py | [
{
"identifier": "IDEALLEDInstance",
"path": "custom_components/ideal_led/idealled.py",
"snippet": "class IDEALLEDInstance:\n def __init__(self, address, reset: bool, delay: int, hass) -> None:\n self.loop = asyncio.get_running_loop()\n self._mac = address\n self._reset = reset\n ... | import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from typing import Any, Optional, Tuple
from .idealled import IDEALLEDInstance
from .const import DOMAIN
from homeassistant.const import CONF_MAC
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.components.light import (
PLATFORM_SCHEMA,
ATTR_BRIGHTNESS,
ATTR_RGB_COLOR,
ATTR_EFFECT,
ColorMode,
LightEntity,
LightEntityFeature,
)
from homeassistant.util.color import match_max_scale
from homeassistant.helpers import device_registry | 3,507 |
LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_MAC): cv.string})
async def async_setup_entry(hass, config_entry, async_add_devices):
|
LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_MAC): cv.string})
async def async_setup_entry(hass, config_entry, async_add_devices): | instance = hass.data[DOMAIN][config_entry.entry_id] | 1 | 2023-12-14 08:01:32+00:00 | 4k |
amirzandieh/HyperAttention | benchmark_single_attention.py | [
{
"identifier": "flash_attn_func",
"path": "src/flash_attn_triton.py",
"snippet": "def _fwd_kernel(\n Q,\n K,\n V,\n Bias,\n Out,\n Lse,\n softmax_scale,\n stride_qb,\n stride_qh,\n stride_qm,\n stride_kb,\n stride_kh,\n stride_kn,\n stride_vb,\n stride_vh,\n... | import argparse
import torch
import triton
from src.flash_attn_triton import flash_attn_func
from hyper_attention import HyperAttention
from flash_attn import flash_attn_func as flash_attn_func_cuda | 3,431 |
try:
except ImportError:
flash_attn_func_cuda = None
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--no_causal", action="store_true")
parser.add_argument("--smooth_block", action="store_true")
parser.add_argument("--mode", type=str, default="fwd+bwd", choices=['fwd', 'bwd', 'fwd+bwd'])
parser.add_argument("--attn_method", type=str, default="flash",
choices=['flash', 'flash-cuda', 'hyper'])
return parser.parse_args()
def get_tensors(batch_size, seq_len, head_size, dim):
q = torch.randn((batch_size, seq_len, head_size, dim), dtype=torch.bfloat16, device="cuda", requires_grad=True)
k = torch.randn((batch_size, seq_len, head_size, dim), dtype=torch.bfloat16, device="cuda", requires_grad=True)
v = torch.randn((batch_size, seq_len, head_size, dim), dtype=torch.bfloat16, device="cuda", requires_grad=True)
return q, k, v
def run_flash_attn(batch_size, head_size, seq_len, dim, causal, mode, impl="triton", warmup=20, rep=100):
q, k, v = get_tensors(batch_size, seq_len, head_size, dim)
if impl == "cuda":
if flash_attn_func_cuda is None:
raise ImportError("Please install flash_attn (pip install flash-attn --no-build-isolation)")
fn = lambda: flash_attn_func_cuda(q, k, v, causal=causal)
else:
fn = lambda: flash_attn_func(q, k, v, None, causal, None)[0]
if mode == 'fwd':
return triton.testing.do_bench(fn, warmup=warmup, rep=rep, quantiles=[0.2, 0.5, 0.8])
elif mode == 'bwd':
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
return triton.testing.do_bench(fn, warmup=warmup, rep=rep, quantiles=[0.2, 0.5, 0.8])
else: # mode == 'fwd+bwd'
q20_fwd, median_fwd, q80_fwd = triton.testing.do_bench(fn, warmup=warmup, rep=rep, quantiles=[0.2, 0.5, 0.8])
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
q20_bwd, median_bwd, q80_bwd = triton.testing.do_bench(fn, warmup=warmup, rep=rep, quantiles=[0.2, 0.5, 0.8])
return q20_fwd + q20_bwd, median_fwd + median_bwd, q80_fwd + q80_bwd
def run_hyper_attn(batch_size, head_size, seq_len, dim, causal, mode, smooth_block, warmup=20, rep=100):
q, k, v = get_tensors(batch_size, head_size, seq_len, dim)
block_size = 256
sample_size = 256
|
try:
except ImportError:
flash_attn_func_cuda = None
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--no_causal", action="store_true")
parser.add_argument("--smooth_block", action="store_true")
parser.add_argument("--mode", type=str, default="fwd+bwd", choices=['fwd', 'bwd', 'fwd+bwd'])
parser.add_argument("--attn_method", type=str, default="flash",
choices=['flash', 'flash-cuda', 'hyper'])
return parser.parse_args()
def get_tensors(batch_size, seq_len, head_size, dim):
q = torch.randn((batch_size, seq_len, head_size, dim), dtype=torch.bfloat16, device="cuda", requires_grad=True)
k = torch.randn((batch_size, seq_len, head_size, dim), dtype=torch.bfloat16, device="cuda", requires_grad=True)
v = torch.randn((batch_size, seq_len, head_size, dim), dtype=torch.bfloat16, device="cuda", requires_grad=True)
return q, k, v
def run_flash_attn(batch_size, head_size, seq_len, dim, causal, mode, impl="triton", warmup=20, rep=100):
q, k, v = get_tensors(batch_size, seq_len, head_size, dim)
if impl == "cuda":
if flash_attn_func_cuda is None:
raise ImportError("Please install flash_attn (pip install flash-attn --no-build-isolation)")
fn = lambda: flash_attn_func_cuda(q, k, v, causal=causal)
else:
fn = lambda: flash_attn_func(q, k, v, None, causal, None)[0]
if mode == 'fwd':
return triton.testing.do_bench(fn, warmup=warmup, rep=rep, quantiles=[0.2, 0.5, 0.8])
elif mode == 'bwd':
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
return triton.testing.do_bench(fn, warmup=warmup, rep=rep, quantiles=[0.2, 0.5, 0.8])
else: # mode == 'fwd+bwd'
q20_fwd, median_fwd, q80_fwd = triton.testing.do_bench(fn, warmup=warmup, rep=rep, quantiles=[0.2, 0.5, 0.8])
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
q20_bwd, median_bwd, q80_bwd = triton.testing.do_bench(fn, warmup=warmup, rep=rep, quantiles=[0.2, 0.5, 0.8])
return q20_fwd + q20_bwd, median_fwd + median_bwd, q80_fwd + q80_bwd
def run_hyper_attn(batch_size, head_size, seq_len, dim, causal, mode, smooth_block, warmup=20, rep=100):
q, k, v = get_tensors(batch_size, head_size, seq_len, dim)
block_size = 256
sample_size = 256
| attn = HyperAttention( | 1 | 2023-12-08 21:28:22+00:00 | 4k |
Psivant/femto | femto/md/tests/test_system.py | [
{
"identifier": "CDK2_SYSTEM",
"path": "femto/fe/tests/systems.py",
"snippet": "CDK2_SYSTEM = TestSystem(\n directory=CDK2_DATA_DIR,\n receptor_name=\"cdk2\",\n receptor_coords=CDK2_DATA_DIR / \"cdk2.pdb\",\n receptor_params=None,\n receptor_cavity_mask=\":12,14,16,22,84,87,88,134,146,147... | import openmm
import openmm.unit
import parmed
from femto.fe.tests.systems import CDK2_SYSTEM, TEMOA_SYSTEM
from femto.md.constants import LIGAND_1_RESIDUE_NAME, LIGAND_2_RESIDUE_NAME
from femto.md.system import apply_hmr, load_ligand, load_ligands, load_receptor
from femto.md.tests.mocking import build_mock_structure
from femto.md.utils.openmm import is_close | 3,324 |
def test_hmr():
topology = build_mock_structure(["CC"])
system = openmm.System()
system.addParticle(12.0 * openmm.unit.amu)
system.addParticle(12.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
original_mass = sum(
[system.getParticleMass(i) for i in range(system.getNumParticles())],
0.0 * openmm.unit.amu,
)
expected_h_mass = 1.5 * openmm.unit.amu
apply_hmr(system, topology, hydrogen_mass=expected_h_mass)
new_masses = [system.getParticleMass(i) for i in range(system.getNumParticles())]
new_mass = sum(new_masses, 0.0 * openmm.unit.amu)
assert is_close(new_mass, original_mass)
expected_masses = [
(12.0 - 0.5 * 3.0) * openmm.unit.amu,
(12.0 - 0.5 * 3.0) * openmm.unit.amu,
] + ([expected_h_mass] * 6)
assert all(
is_close(new_mass, expected_mass)
for new_mass, expected_mass in zip(new_masses, expected_masses, strict=True)
)
def test_hmr_water():
"""HMR should not modify water molecules."""
topology = build_mock_structure(["O"])
expected_masses = [16.0, 1.0, 1.0] * openmm.unit.amu
system = openmm.System()
for mass in expected_masses:
system.addParticle(mass)
apply_hmr(system, topology)
new_masses = [system.getParticleMass(i) for i in range(system.getNumParticles())]
assert all(
is_close(new_mass, expected_mass)
for new_mass, expected_mass in zip(new_masses, expected_masses, strict=True)
)
def test_load_ligand():
reside_name = "ABC"
ligand = load_ligand(
CDK2_SYSTEM.ligand_1_coords, CDK2_SYSTEM.ligand_1_params, reside_name
)
assert len(ligand[":ABC"].atoms) == len(ligand.atoms)
def test_load_ligands():
coord_path = CDK2_SYSTEM.ligand_1_coords
param_path = CDK2_SYSTEM.ligand_1_params
ligand_1, ligand_2 = load_ligands(coord_path, param_path, None, None)
assert ligand_2 is None
assert ligand_1.residues[0].name == LIGAND_1_RESIDUE_NAME
ligand_1, ligand_2 = load_ligands(coord_path, param_path, coord_path, param_path)
assert isinstance(ligand_2, parmed.Structure)
assert ligand_1.residues[0].name == LIGAND_1_RESIDUE_NAME
assert ligand_2.residues[0].name == LIGAND_2_RESIDUE_NAME
def test_load_receptor_with_params(mocker):
mock_parameterize = mocker.patch(
"femto.md.utils.amber.parameterize_structure", autospec=True
)
|
def test_hmr():
topology = build_mock_structure(["CC"])
system = openmm.System()
system.addParticle(12.0 * openmm.unit.amu)
system.addParticle(12.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
system.addParticle(1.0 * openmm.unit.amu)
original_mass = sum(
[system.getParticleMass(i) for i in range(system.getNumParticles())],
0.0 * openmm.unit.amu,
)
expected_h_mass = 1.5 * openmm.unit.amu
apply_hmr(system, topology, hydrogen_mass=expected_h_mass)
new_masses = [system.getParticleMass(i) for i in range(system.getNumParticles())]
new_mass = sum(new_masses, 0.0 * openmm.unit.amu)
assert is_close(new_mass, original_mass)
expected_masses = [
(12.0 - 0.5 * 3.0) * openmm.unit.amu,
(12.0 - 0.5 * 3.0) * openmm.unit.amu,
] + ([expected_h_mass] * 6)
assert all(
is_close(new_mass, expected_mass)
for new_mass, expected_mass in zip(new_masses, expected_masses, strict=True)
)
def test_hmr_water():
"""HMR should not modify water molecules."""
topology = build_mock_structure(["O"])
expected_masses = [16.0, 1.0, 1.0] * openmm.unit.amu
system = openmm.System()
for mass in expected_masses:
system.addParticle(mass)
apply_hmr(system, topology)
new_masses = [system.getParticleMass(i) for i in range(system.getNumParticles())]
assert all(
is_close(new_mass, expected_mass)
for new_mass, expected_mass in zip(new_masses, expected_masses, strict=True)
)
def test_load_ligand():
reside_name = "ABC"
ligand = load_ligand(
CDK2_SYSTEM.ligand_1_coords, CDK2_SYSTEM.ligand_1_params, reside_name
)
assert len(ligand[":ABC"].atoms) == len(ligand.atoms)
def test_load_ligands():
coord_path = CDK2_SYSTEM.ligand_1_coords
param_path = CDK2_SYSTEM.ligand_1_params
ligand_1, ligand_2 = load_ligands(coord_path, param_path, None, None)
assert ligand_2 is None
assert ligand_1.residues[0].name == LIGAND_1_RESIDUE_NAME
ligand_1, ligand_2 = load_ligands(coord_path, param_path, coord_path, param_path)
assert isinstance(ligand_2, parmed.Structure)
assert ligand_1.residues[0].name == LIGAND_1_RESIDUE_NAME
assert ligand_2.residues[0].name == LIGAND_2_RESIDUE_NAME
def test_load_receptor_with_params(mocker):
mock_parameterize = mocker.patch(
"femto.md.utils.amber.parameterize_structure", autospec=True
)
| receptor = load_receptor(TEMOA_SYSTEM.receptor_coords, TEMOA_SYSTEM.receptor_params) | 1 | 2023-12-07 15:28:18+00:00 | 4k |
AIFSH/NativeDancer | nativedancer/third_part/detectron2/modeling/backbone/regnet.py | [
{
"identifier": "get_norm",
"path": "nativedancer/third_part/detectron2/layers/batch_norm.py",
"snippet": "def get_norm(norm, out_channels):\n \"\"\"\n Args:\n norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;\n or a callable that takes a channel number and returns\... | import numpy as np
from torch import nn
from ...layers import CNNBlockBase, ShapeSpec, get_norm
from .backbone import Backbone | 1,873 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Implementation of RegNet models from :paper:`dds` and :paper:`scaling`.
This code is adapted from https://github.com/facebookresearch/pycls with minimal modifications.
Some code duplication exists between RegNet and ResNets (e.g., ResStem) in order to simplify
model loading.
"""
__all__ = [
"AnyNet",
"RegNet",
"ResStem",
"SimpleStem",
"VanillaBlock",
"ResBasicBlock",
"ResBottleneckBlock",
]
def conv2d(w_in, w_out, k, *, stride=1, groups=1, bias=False):
"""Helper for building a conv2d layer."""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
s, p, g, b = stride, (k - 1) // 2, groups, bias
return nn.Conv2d(w_in, w_out, k, stride=s, padding=p, groups=g, bias=b)
def gap2d():
"""Helper for building a global average pooling layer."""
return nn.AdaptiveAvgPool2d((1, 1))
def pool2d(k, *, stride=1):
"""Helper for building a pool2d layer."""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
return nn.MaxPool2d(k, stride=stride, padding=(k - 1) // 2)
def init_weights(m):
"""Performs ResNet-style weight initialization."""
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0.0, std=0.01)
m.bias.data.zero_()
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Implementation of RegNet models from :paper:`dds` and :paper:`scaling`.
This code is adapted from https://github.com/facebookresearch/pycls with minimal modifications.
Some code duplication exists between RegNet and ResNets (e.g., ResStem) in order to simplify
model loading.
"""
__all__ = [
"AnyNet",
"RegNet",
"ResStem",
"SimpleStem",
"VanillaBlock",
"ResBasicBlock",
"ResBottleneckBlock",
]
def conv2d(w_in, w_out, k, *, stride=1, groups=1, bias=False):
"""Helper for building a conv2d layer."""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
s, p, g, b = stride, (k - 1) // 2, groups, bias
return nn.Conv2d(w_in, w_out, k, stride=s, padding=p, groups=g, bias=b)
def gap2d():
"""Helper for building a global average pooling layer."""
return nn.AdaptiveAvgPool2d((1, 1))
def pool2d(k, *, stride=1):
"""Helper for building a pool2d layer."""
assert k % 2 == 1, "Only odd size kernels supported to avoid padding issues."
return nn.MaxPool2d(k, stride=stride, padding=(k - 1) // 2)
def init_weights(m):
"""Performs ResNet-style weight initialization."""
if isinstance(m, nn.Conv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=np.sqrt(2.0 / fan_out))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(mean=0.0, std=0.01)
m.bias.data.zero_()
| class ResStem(CNNBlockBase): | 2 | 2023-12-10 20:14:00+00:00 | 4k |
ethanweber/nerfiller | nerfiller/guidance/multiview_metric.py | [
{
"identifier": "get_images_with_keypoints",
"path": "nerfiller/utils/draw_utils.py",
"snippet": "def get_images_with_keypoints(\n images: Float[Tensor, \"B 3 H W\"],\n keypoints: Float[Tensor, \"B N 2\"],\n colors: Optional[Float[Tensor, \"B N 3\"]] = None,\n keypoint_size: int = 10,\n t... | import mediapy
import torch
from kornia.geometry.epipolar import (
compute_correspond_epilines,
fundamental_from_projections,
)
from kornia.geometry.linalg import point_line_distance
from torchmetrics.functional import (
pairwise_cosine_similarity,
)
from nerfiller.utils.draw_utils import (
get_images_with_keypoints,
get_images_with_lines,
)
from nerfiller.utils.depth_utils import (
reproject,
)
from nerfiller.utils.camera_utils import (
get_projection_matrix,
)
from nerfstudio.utils.colormaps import ColormapOptions, apply_colormap
from nerfiller.utils.typing import * | 2,744 | """
Code for epipolar guidance.
"""
class MultiviewMetric(torch.nn.Module):
"""
Computes multi-view consistency loss.
"""
def __init__(self):
super().__init__()
def forward(
self,
features1: Float[Tensor, "B C H W"],
features2: Float[Tensor, "B C H W"],
image1: Float[Tensor, "B 3 Horig Worig"],
image2: Float[Tensor, "B 3 Horig Worig"],
depth1: Optional[Float[Tensor, "B 1 H W"]] = None,
depth2: Optional[Float[Tensor, "B 1 H W"]] = None,
mask1: Optional[Float[Tensor, "B 1 H W"]] = None,
mask2: Optional[Float[Tensor, "B 1 H W"]] = None,
K1: Optional[Float[Tensor, "B 3 3"]] = None,
K2: Optional[Float[Tensor, "B 3 3"]] = None,
c2w1: Optional[Float[Tensor, "B 3 4"]] = None,
c2w2: Optional[Float[Tensor, "B 3 4"]] = None,
output_folder: Optional[Path] = None,
suffix: str = "",
show: bool = False,
display_height: int = 512,
):
pass
class MatchingMetric(MultiviewMetric):
"""
Computes a loss to encourage the depth to give good matches.
"""
def __init__(
self,
lossfeatmult: float = 1.0,
lossdistmult: float = 1.0,
sigma_scalar: float = 1.0,
height_scalar: float = 1.0,
keypoint_size: int = 10,
line_width: int = 4,
eps: float = 1e-6,
thresh: float = 0.018,
):
super().__init__()
self.sigma_scalar = sigma_scalar
self.height_scalar = height_scalar
self.lossfeatmult = lossfeatmult
self.lossdistmult = lossdistmult
self.keypoint_size = keypoint_size
self.line_width = line_width
self.eps = eps
self.thresh = thresh
def compute_matches(
self,
features1: Float[Tensor, "B C H W"],
features2: Float[Tensor, "B C H W"],
K1: Float[Tensor, "B 3 3"],
K2: Float[Tensor, "B 3 3"],
c2w1: Float[Tensor, "B 3 4"],
c2w2: Float[Tensor, "B 3 4"],
output_folder: Optional[Path] = None,
suffix: str = "",
show: bool = False,
display_height: int = 512,
):
| """
Code for epipolar guidance.
"""
class MultiviewMetric(torch.nn.Module):
"""
Computes multi-view consistency loss.
"""
def __init__(self):
super().__init__()
def forward(
self,
features1: Float[Tensor, "B C H W"],
features2: Float[Tensor, "B C H W"],
image1: Float[Tensor, "B 3 Horig Worig"],
image2: Float[Tensor, "B 3 Horig Worig"],
depth1: Optional[Float[Tensor, "B 1 H W"]] = None,
depth2: Optional[Float[Tensor, "B 1 H W"]] = None,
mask1: Optional[Float[Tensor, "B 1 H W"]] = None,
mask2: Optional[Float[Tensor, "B 1 H W"]] = None,
K1: Optional[Float[Tensor, "B 3 3"]] = None,
K2: Optional[Float[Tensor, "B 3 3"]] = None,
c2w1: Optional[Float[Tensor, "B 3 4"]] = None,
c2w2: Optional[Float[Tensor, "B 3 4"]] = None,
output_folder: Optional[Path] = None,
suffix: str = "",
show: bool = False,
display_height: int = 512,
):
pass
class MatchingMetric(MultiviewMetric):
"""
Computes a loss to encourage the depth to give good matches.
"""
def __init__(
self,
lossfeatmult: float = 1.0,
lossdistmult: float = 1.0,
sigma_scalar: float = 1.0,
height_scalar: float = 1.0,
keypoint_size: int = 10,
line_width: int = 4,
eps: float = 1e-6,
thresh: float = 0.018,
):
super().__init__()
self.sigma_scalar = sigma_scalar
self.height_scalar = height_scalar
self.lossfeatmult = lossfeatmult
self.lossdistmult = lossdistmult
self.keypoint_size = keypoint_size
self.line_width = line_width
self.eps = eps
self.thresh = thresh
def compute_matches(
self,
features1: Float[Tensor, "B C H W"],
features2: Float[Tensor, "B C H W"],
K1: Float[Tensor, "B 3 3"],
K2: Float[Tensor, "B 3 3"],
c2w1: Float[Tensor, "B 3 4"],
c2w2: Float[Tensor, "B 3 4"],
output_folder: Optional[Path] = None,
suffix: str = "",
show: bool = False,
display_height: int = 512,
): | P1 = get_projection_matrix(K1, c2w1) | 3 | 2023-12-07 19:12:08+00:00 | 4k |
nnanhuang/Customize-it-3D | ldm/modules/diffusionmodules/openaimodel.py | [
{
"identifier": "checkpoint",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n ... | from abc import abstractmethod
from functools import partial
from typing import Iterable
from ldm.modules.diffusionmodules.util import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
from ldm.modules.attention import SpatialTransformer
from ldm.util import exists
from omegaconf.listconfig import ListConfig
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F | 2,531 |
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class TransposedUpsample(nn.Module):
'Learned 2x upsampling without padding'
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
def forward(self,x):
return self.up(x)
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels
|
# dummy replace
def convert_module_to_f16(x):
pass
def convert_module_to_f32(x):
pass
## go
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class TransposedUpsample(nn.Module):
'Learned 2x upsampling without padding'
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
def forward(self,x):
return self.up(x)
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels | self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) | 3 | 2023-12-14 11:03:35+00:00 | 4k |
jbarrow/mlx-playground | train.py | [
{
"identifier": "Llama",
"path": "llama/model.py",
"snippet": "class Llama(nn.Module):\n def __init__(self, config: ModelArgs) -> None:\n super().__init__()\n\n self.embedding = nn.Embedding(config.vocab_size, config.dims)\n self.attention = [TransformerBlock(config) for _ in ran... | from llama.model import Llama, ModelArgs
from llama.optim import AdamW
from mlx.utils import tree_flatten
from tqdm import tqdm
import mlx.optimizers as optim
import mlx.core as mx
import mlx.nn as nn | 1,921 | """
Super simple train.py, getting started without any tokenizers,
and with a very simple training loop.
"""
lines = open("./data/example.txt", "r").read()
vocab = sorted(list(set(lines)))
itos = {i: ch for i, ch in enumerate(vocab)}
stoi = {ch: i for i, ch in enumerate(vocab)}
CONFIG = {
"context_length": 16,
"batch_size": 32,
"steps": 1000,
"learning_rate": 0.001,
}
def encode(s):
return [stoi[ch] for ch in s]
def decode(l):
return "".join([itos[i] for i in l])
def get_batches(
data: mx.array, split: str, batch_size: int, context_window: int, config=CONFIG
) -> tuple[mx.array, mx.array]:
train = data[: int(0.8 * len(data))]
val = data[int(0.8 * len(data)) : int(0.9 * len(data))]
test = data[int(0.9 * len(data)) :]
batch_data = train
if split == "val":
batch_data = val
if split == "test":
batch_data = test
ixs = mx.random.randint(
0, batch_data.shape[0] - context_window - 1, shape=(batch_size,)
).tolist()
# create B x C tensors of x and y
x = mx.concatenate(
[mx.expand_dims(batch_data[ix : ix + context_window], 0) for ix in ixs], axis=0
)
y = mx.concatenate(
[mx.expand_dims(batch_data[ix + 1 : ix + context_window + 1], 0) for ix in ixs],
axis=0,
)
return x, y
def evaluate_loss(model, config=CONFIG) -> dict[str, mx.array]:
out = {}
mx.eval(model.parameters())
for split in ["train", "val"]:
losses = []
for _ in range(10):
xb, yb = get_batches(
dataset, split, config["batch_size"], config["context_length"], config
)
loss = model.loss(xb, yb)
losses.append(loss.item())
out[split] = mx.mean(mx.array(losses)).item()
return out
def train(model: nn.Module, optimizer, config=CONFIG):
losses = []
loss_and_grad_fn = nn.value_and_grad(model, model.loss)
pbar = tqdm(range(config["steps"]))
for step in pbar:
xs, ys = get_batches(
dataset, "train", config["batch_size"], config["context_length"]
)
loss, grads = loss_and_grad_fn(xs, ys)
model.update(optimizer.apply_gradients(grads, model))
mx.simplify(loss, model.parameters())
# mx.eval(loss, model.parameters())
losses.append(loss.item())
pbar.set_description(f"loss: ({loss.item():.2f})")
print(evaluate_loss(model))
if __name__ == "__main__":
dataset = mx.array(encode(lines))
args = ModelArgs()
model = Llama(args)
nparams = sum(x.size for k, x in tree_flatten(model.parameters()))
print(f"training a model with {nparams} trainable params")
| """
Super simple train.py, getting started without any tokenizers,
and with a very simple training loop.
"""
lines = open("./data/example.txt", "r").read()
vocab = sorted(list(set(lines)))
itos = {i: ch for i, ch in enumerate(vocab)}
stoi = {ch: i for i, ch in enumerate(vocab)}
CONFIG = {
"context_length": 16,
"batch_size": 32,
"steps": 1000,
"learning_rate": 0.001,
}
def encode(s):
return [stoi[ch] for ch in s]
def decode(l):
return "".join([itos[i] for i in l])
def get_batches(
data: mx.array, split: str, batch_size: int, context_window: int, config=CONFIG
) -> tuple[mx.array, mx.array]:
train = data[: int(0.8 * len(data))]
val = data[int(0.8 * len(data)) : int(0.9 * len(data))]
test = data[int(0.9 * len(data)) :]
batch_data = train
if split == "val":
batch_data = val
if split == "test":
batch_data = test
ixs = mx.random.randint(
0, batch_data.shape[0] - context_window - 1, shape=(batch_size,)
).tolist()
# create B x C tensors of x and y
x = mx.concatenate(
[mx.expand_dims(batch_data[ix : ix + context_window], 0) for ix in ixs], axis=0
)
y = mx.concatenate(
[mx.expand_dims(batch_data[ix + 1 : ix + context_window + 1], 0) for ix in ixs],
axis=0,
)
return x, y
def evaluate_loss(model, config=CONFIG) -> dict[str, mx.array]:
out = {}
mx.eval(model.parameters())
for split in ["train", "val"]:
losses = []
for _ in range(10):
xb, yb = get_batches(
dataset, split, config["batch_size"], config["context_length"], config
)
loss = model.loss(xb, yb)
losses.append(loss.item())
out[split] = mx.mean(mx.array(losses)).item()
return out
def train(model: nn.Module, optimizer, config=CONFIG):
losses = []
loss_and_grad_fn = nn.value_and_grad(model, model.loss)
pbar = tqdm(range(config["steps"]))
for step in pbar:
xs, ys = get_batches(
dataset, "train", config["batch_size"], config["context_length"]
)
loss, grads = loss_and_grad_fn(xs, ys)
model.update(optimizer.apply_gradients(grads, model))
mx.simplify(loss, model.parameters())
# mx.eval(loss, model.parameters())
losses.append(loss.item())
pbar.set_description(f"loss: ({loss.item():.2f})")
print(evaluate_loss(model))
if __name__ == "__main__":
dataset = mx.array(encode(lines))
args = ModelArgs()
model = Llama(args)
nparams = sum(x.size for k, x in tree_flatten(model.parameters()))
print(f"training a model with {nparams} trainable params")
| optimizer = AdamW( | 2 | 2023-12-06 13:31:42+00:00 | 4k |
TaoHuang13/diffusion_reward | diffusion_reward/models/reward_models/diffusion_reward.py | [
{
"identifier": "build_model",
"path": "diffusion_reward/models/video_models/vqdiffusion/modeling/build.py",
"snippet": "def build_model(config, args=None):\n return instantiate_from_config(config['model'])"
},
{
"identifier": "index_to_log_onehot",
"path": "diffusion_reward/models/video_... | import os
import hydra
import torch
import torch.nn as nn
import torch.nn.functional as F
import yaml
from pathlib import Path
from ..video_models.vqdiffusion.modeling.build import build_model
from ..video_models.vqdiffusion.modeling.transformers.diffusion_transformer import (
index_to_log_onehot, log_categorical, log_onehot_to_index,
sum_except_batch)
from ..video_models.vqdiffusion.utils.io import load_yaml_config
from ..video_models.vqdiffusion.utils.misc import get_model_parameters_info | 2,296 | self.use_std = cfg.use_std
if self.use_std:
stat_path = str(Path(__file__).parents[3]) + cfg.stat_path
with open(stat_path, 'r') as file:
self.stat = yaml.safe_load(file)[cfg.task_name][cfg.skip_step]
# build exploration reward model
self.use_expl_reward = cfg.use_expl_reward
if self.use_expl_reward:
cfg.expl_reward.obs_shape = cfg.obs_shape
cfg.expl_reward.action_shape = cfg.action_shape
self.expl_reward = hydra.utils.instantiate(cfg.expl_reward)
self.expl_scale = cfg.expl_scale
def get_model(self, ema, model_path, config_path):
if 'OUTPUT' in model_path: # pretrained model
model_name = model_path.split(os.path.sep)[-3]
else:
model_name = os.path.basename(config_path).replace('.yaml', '')
config = load_yaml_config(config_path)
model = build_model(config)
model_parameters = get_model_parameters_info(model)
print(model_parameters)
if os.path.exists(model_path):
ckpt = torch.load(model_path, map_location="cpu")
if 'last_epoch' in ckpt:
epoch = ckpt['last_epoch']
elif 'epoch' in ckpt:
epoch = ckpt['epoch']
else:
epoch = 0
missing, unexpected = model.load_state_dict(ckpt["model"], strict=False)
print('Model missing keys:\n', missing)
print('Model unexpected keys:\n', unexpected)
if ema==True and 'ema' in ckpt:
print("Evaluate EMA model")
ema_model = model.get_ema_model()
missing, unexpected = ema_model.load_state_dict(ckpt['ema'], strict=False)
else:
epoch = None
return {'model': model, 'epoch': epoch, 'model_name': model_name, 'parameter': model_parameters}
def imgs_to_batch(self, x, reward_type='entropy'):
'''
input:
imgs: B * T * H * W * C
(mostly): 1 * T * ...
'''
assert x.max() <= 1
# preprocessing
seq_len = x.shape[1]
num_frames = self.model.cfg.params['condition_emb_config']['params']['num_cond_frames'] + 1
n_skip = self.model.frame_skip
subseq_len = (num_frames - 1) * n_skip
x = x.permute(0, 1, 4, 2 ,3)
_, indices = self.model.content_codec.encode_to_z(x)
assert indices.shape[0] == 1
indices = indices.reshape(indices.shape[0], seq_len, -1)
if reward_type == 'entropy':
# only return conditional frames
post_idxes = list(range(seq_len - subseq_len + n_skip))
batch_indices = [indices[:, idx:idx+subseq_len:n_skip] for idx in post_idxes]
batch_indices = torch.stack(batch_indices, dim=0)
batch_indices = batch_indices.squeeze(1).reshape(batch_indices.shape[0], -1)
if subseq_len - n_skip > 0:
pre_batch_indices = [indices[:, idx].tile((1, num_frames - 1)) for idx in range(subseq_len-n_skip)]
pre_batch_indices = torch.concat(pre_batch_indices, dim=0)
batch_indices = torch.concat([pre_batch_indices, batch_indices], dim=0)
cond = {'condition_token': batch_indices}
elif reward_type == 'likelihood':
# return conditional frames + current frame
post_idxes = list(range(seq_len - subseq_len))
batch_indices = [indices[:, idx:idx+subseq_len+n_skip:n_skip] for idx in post_idxes]
batch_indices = torch.stack(batch_indices, dim=0)
batch_indices = batch_indices.squeeze(1).reshape(batch_indices.shape[0], -1)
if subseq_len - n_skip > 0:
pre_batch_indices = [indices[:, idx].tile((1, num_frames)) for idx in range(subseq_len)]
pre_batch_indices = torch.concat(pre_batch_indices, dim=0)
batch_indices = torch.concat([pre_batch_indices, batch_indices], dim=0)
cond = {'condition_token': batch_indices}
else:
raise NotImplementedError
x = x.flatten(0, 1)
cont = {'content_token': indices[0]}
return cont, cond, indices[0]
@torch.no_grad()
def calc_reward(self, imgs):
self.model.eval()
content, condition, _ = self.imgs_to_batch(imgs, reward_type=self.reward_type)
content_token = content['content_token']
condition_token = condition['condition_token']
rewards = self.calc_vlb(content_token, condition_token)
if self.use_std:
rewards_std = (rewards - self.stat[0]) / self.stat[1]
scaled_rewards = (1 - self.expl_scale) * rewards_std
return scaled_rewards
@torch.no_grad()
def calc_vlb(self, cont_emb, cond_emb):
x = cont_emb
b, device = x.size(0), x.device
transformer = self.model.transformer
cond_emb = transformer.condition_emb(cond_emb).float()
# t=0
start_step = transformer.num_timesteps
x_start = x
t = torch.full((b,), start_step-1, device=device, dtype=torch.long)
|
class DiffusionReward(nn.Module):
def __init__(self, cfg):
super(DiffusionReward, self).__init__()
# load video models
self.info = self.get_model(ema=True, model_path=cfg.ckpt_path, config_path=cfg.cfg_path)
self.model = self.info['model']
self.epoch = self.info['epoch']
self.model_name = self.info['model_name']
# self.model = self.model.cuda()
self.model.eval()
for param in self.model.parameters():
param.requires_grad = False
# set attribute
for attr_name, attr_value in cfg.items():
setattr(self, attr_name, attr_value)
# standardization
self.use_std = cfg.use_std
if self.use_std:
stat_path = str(Path(__file__).parents[3]) + cfg.stat_path
with open(stat_path, 'r') as file:
self.stat = yaml.safe_load(file)[cfg.task_name][cfg.skip_step]
# build exploration reward model
self.use_expl_reward = cfg.use_expl_reward
if self.use_expl_reward:
cfg.expl_reward.obs_shape = cfg.obs_shape
cfg.expl_reward.action_shape = cfg.action_shape
self.expl_reward = hydra.utils.instantiate(cfg.expl_reward)
self.expl_scale = cfg.expl_scale
def get_model(self, ema, model_path, config_path):
if 'OUTPUT' in model_path: # pretrained model
model_name = model_path.split(os.path.sep)[-3]
else:
model_name = os.path.basename(config_path).replace('.yaml', '')
config = load_yaml_config(config_path)
model = build_model(config)
model_parameters = get_model_parameters_info(model)
print(model_parameters)
if os.path.exists(model_path):
ckpt = torch.load(model_path, map_location="cpu")
if 'last_epoch' in ckpt:
epoch = ckpt['last_epoch']
elif 'epoch' in ckpt:
epoch = ckpt['epoch']
else:
epoch = 0
missing, unexpected = model.load_state_dict(ckpt["model"], strict=False)
print('Model missing keys:\n', missing)
print('Model unexpected keys:\n', unexpected)
if ema==True and 'ema' in ckpt:
print("Evaluate EMA model")
ema_model = model.get_ema_model()
missing, unexpected = ema_model.load_state_dict(ckpt['ema'], strict=False)
else:
epoch = None
return {'model': model, 'epoch': epoch, 'model_name': model_name, 'parameter': model_parameters}
def imgs_to_batch(self, x, reward_type='entropy'):
'''
input:
imgs: B * T * H * W * C
(mostly): 1 * T * ...
'''
assert x.max() <= 1
# preprocessing
seq_len = x.shape[1]
num_frames = self.model.cfg.params['condition_emb_config']['params']['num_cond_frames'] + 1
n_skip = self.model.frame_skip
subseq_len = (num_frames - 1) * n_skip
x = x.permute(0, 1, 4, 2 ,3)
_, indices = self.model.content_codec.encode_to_z(x)
assert indices.shape[0] == 1
indices = indices.reshape(indices.shape[0], seq_len, -1)
if reward_type == 'entropy':
# only return conditional frames
post_idxes = list(range(seq_len - subseq_len + n_skip))
batch_indices = [indices[:, idx:idx+subseq_len:n_skip] for idx in post_idxes]
batch_indices = torch.stack(batch_indices, dim=0)
batch_indices = batch_indices.squeeze(1).reshape(batch_indices.shape[0], -1)
if subseq_len - n_skip > 0:
pre_batch_indices = [indices[:, idx].tile((1, num_frames - 1)) for idx in range(subseq_len-n_skip)]
pre_batch_indices = torch.concat(pre_batch_indices, dim=0)
batch_indices = torch.concat([pre_batch_indices, batch_indices], dim=0)
cond = {'condition_token': batch_indices}
elif reward_type == 'likelihood':
# return conditional frames + current frame
post_idxes = list(range(seq_len - subseq_len))
batch_indices = [indices[:, idx:idx+subseq_len+n_skip:n_skip] for idx in post_idxes]
batch_indices = torch.stack(batch_indices, dim=0)
batch_indices = batch_indices.squeeze(1).reshape(batch_indices.shape[0], -1)
if subseq_len - n_skip > 0:
pre_batch_indices = [indices[:, idx].tile((1, num_frames)) for idx in range(subseq_len)]
pre_batch_indices = torch.concat(pre_batch_indices, dim=0)
batch_indices = torch.concat([pre_batch_indices, batch_indices], dim=0)
cond = {'condition_token': batch_indices}
else:
raise NotImplementedError
x = x.flatten(0, 1)
cont = {'content_token': indices[0]}
return cont, cond, indices[0]
@torch.no_grad()
def calc_reward(self, imgs):
self.model.eval()
content, condition, _ = self.imgs_to_batch(imgs, reward_type=self.reward_type)
content_token = content['content_token']
condition_token = condition['condition_token']
rewards = self.calc_vlb(content_token, condition_token)
if self.use_std:
rewards_std = (rewards - self.stat[0]) / self.stat[1]
scaled_rewards = (1 - self.expl_scale) * rewards_std
return scaled_rewards
@torch.no_grad()
def calc_vlb(self, cont_emb, cond_emb):
x = cont_emb
b, device = x.size(0), x.device
transformer = self.model.transformer
cond_emb = transformer.condition_emb(cond_emb).float()
# t=0
start_step = transformer.num_timesteps
x_start = x
t = torch.full((b,), start_step-1, device=device, dtype=torch.long) | log_x_start = index_to_log_onehot(x_start, transformer.num_classes) | 1 | 2023-12-05 02:42:28+00:00 | 4k |
mkang315/ASF-YOLO | utils/loss.py | [
{
"identifier": "bbox_iou",
"path": "utils/metrics.py",
"snippet": "def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, EIoU=False, eps=1e-7):\n # Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4)\n\n # Get the coordinates of bounding boxes\n if xywh: # transf... | import torch
import torch.nn as nn
from utils.metrics import bbox_iou
from utils.torch_utils import de_parallel | 2,790 | super().__init__()
self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
self.alpha = alpha
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred = torch.sigmoid(pred) # prob from logits
dx = pred - true # reduce only missing label effects
# dx = (pred - true).abs() # reduce missing label and false label effects
alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
loss *= alpha_factor
return loss.mean()
class FocalLoss(nn.Module):
# Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super().__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
# p_t = torch.exp(-loss)
# loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
# TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
pred_prob = torch.sigmoid(pred) # prob from logits
p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = (1.0 - p_t) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
class QFocalLoss(nn.Module):
# Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super().__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred_prob = torch.sigmoid(pred) # prob from logits
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = torch.abs(true - pred_prob) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
class ComputeLoss:
sort_obj_iou = False
# Compute losses
def __init__(self, model, autobalance=False):
device = next(model.parameters()).device # get model device
h = model.hyp # hyperparameters
# Define criteria
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
# Focal loss
g = h['fl_gamma'] # focal loss gamma
if g > 0:
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
m = de_parallel(model).model[-1] # Detect() module
self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7
self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index
self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance
self.na = m.na # number of anchors
self.nc = m.nc # number of classes
self.nl = m.nl # number of layers
self.anchors = m.anchors
self.device = device
def __call__(self, p, targets): # predictions, targets
lcls = torch.zeros(1, device=self.device) # class loss
lbox = torch.zeros(1, device=self.device) # box loss
lobj = torch.zeros(1, device=self.device) # object loss
tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets
# Losses
for i, pi in enumerate(p): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj
n = b.shape[0] # number of targets
if n:
# pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0
pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions
# Regression
pxy = pxy.sigmoid() * 2 - 0.5
pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i]
pbox = torch.cat((pxy, pwh), 1) # predicted box
| # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Loss functions
"""
def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
# return positive, negative label smoothing BCE targets
return 1.0 - 0.5 * eps, 0.5 * eps
class BCEBlurWithLogitsLoss(nn.Module):
# BCEwithLogitLoss() with reduced missing label effects.
def __init__(self, alpha=0.05):
super().__init__()
self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
self.alpha = alpha
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred = torch.sigmoid(pred) # prob from logits
dx = pred - true # reduce only missing label effects
# dx = (pred - true).abs() # reduce missing label and false label effects
alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
loss *= alpha_factor
return loss.mean()
class FocalLoss(nn.Module):
# Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super().__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
# p_t = torch.exp(-loss)
# loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
# TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
pred_prob = torch.sigmoid(pred) # prob from logits
p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = (1.0 - p_t) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
class QFocalLoss(nn.Module):
# Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super().__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
pred_prob = torch.sigmoid(pred) # prob from logits
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = torch.abs(true - pred_prob) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
class ComputeLoss:
sort_obj_iou = False
# Compute losses
def __init__(self, model, autobalance=False):
device = next(model.parameters()).device # get model device
h = model.hyp # hyperparameters
# Define criteria
BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
# Focal loss
g = h['fl_gamma'] # focal loss gamma
if g > 0:
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
m = de_parallel(model).model[-1] # Detect() module
self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7
self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index
self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance
self.na = m.na # number of anchors
self.nc = m.nc # number of classes
self.nl = m.nl # number of layers
self.anchors = m.anchors
self.device = device
def __call__(self, p, targets): # predictions, targets
lcls = torch.zeros(1, device=self.device) # class loss
lbox = torch.zeros(1, device=self.device) # box loss
lobj = torch.zeros(1, device=self.device) # object loss
tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets
# Losses
for i, pi in enumerate(p): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj
n = b.shape[0] # number of targets
if n:
# pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0
pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions
# Regression
pxy = pxy.sigmoid() * 2 - 0.5
pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i]
pbox = torch.cat((pxy, pwh), 1) # predicted box | iou = bbox_iou(pbox, tbox[i], ESIoU=True).squeeze() # iou(prediction, target) | 0 | 2023-12-10 14:18:29+00:00 | 4k |
user1342/Tweezer | Tweezer/tweezer.py | [
{
"identifier": "GhidraBridge",
"path": "Tweezer/GhidraBridge/ghidra_bridge.py",
"snippet": "class GhidraBridge():\n def __init__(self):\n pass\n\n def _execute_blocking_command(self, command_as_list):\n if command_as_list != None:\n print(\"Executing command: {}\".format(... | import argparse
import re
import tempfile
from pathlib import Path
from pprint import pprint
from Tweezer.GhidraBridge.ghidra_bridge import GhidraBridge
from Tweezer.Model.model import Model
from Tweezer.Training.trainer import Trainer | 2,466 |
class Tweezer():
def __init__(self, model_path="TweezerMDL"):
self.model = None
self.model_path = model_path
def train(self, list_of_binary_folders):
self.extend_model_training(list_of_binary_folders)
def extend_model_training(self, list_of_binary_folders):
|
class Tweezer():
def __init__(self, model_path="TweezerMDL"):
self.model = None
self.model_path = model_path
def train(self, list_of_binary_folders):
self.extend_model_training(list_of_binary_folders)
def extend_model_training(self, list_of_binary_folders): | trainer = Trainer() | 2 | 2023-12-10 21:01:03+00:00 | 4k |
felixcheng97/AGAP | lib/dpvgo.py | [
{
"identifier": "Raw2Alpha",
"path": "lib/dvgo.py",
"snippet": "class Raw2Alpha(torch.autograd.Function):\n @staticmethod\n def forward(ctx, density, shift, interval):\n '''\n alpha = 1 - exp(-softplus(density + shift) * interval)\n = 1 - exp(-log(1 + exp(density + shift... | import os
import time
import functools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_scatter import segment_coo
from . import grid
from .dvgo import Raw2Alpha, Alphas2Weights
from .dmpigo import create_full_step_id
from .networks import *
from torch.utils.cpp_extension import load | 3,498 | rgb_np = np.uint8(rgb.numpy() * 255)
return rgb_np
def _set_equ_resolution(self, equ_size):
self.equ_size = equ_size
print('dpvgo equ_size ', self.equ_size)
def _set_grid_resolution(self, num_voxels):
# Determine grid resolution
self.num_voxels = num_voxels
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_voxels).pow(1/3)
self.world_size = ((self.xyz_max - self.xyz_min) / self.voxel_size).long()
self.world_len = self.world_size[0].item()
self.voxel_size_ratio = self.voxel_size / self.voxel_size_base
print('dpvgo voxel_size ', self.voxel_size)
print('dpvgo world_size ', self.world_size)
print('dpvgo voxel_size_base ', self.voxel_size_base)
print('dpvgo voxel_size_ratio', self.voxel_size_ratio)
def get_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'num_voxels': self.num_voxels,
'num_voxels_base': self.num_voxels_base,
'alpha_init': self.alpha_init,
'voxel_size_ratio': self.voxel_size_ratio,
'mask_cache_world_size': list(self.mask_cache.mask.shape),
'fast_color_thres': self.fast_color_thres,
'contracted_norm': self.contracted_norm,
'density_type': self.density_type,
'k0_type': self.k0_type,
'density_config': self.density_config,
'k0_config': self.k0_config,
**self.rgbnet_kwargs,
'equ_size': self.equ_size,
'xyz_config': self.xyz_config,
'viewdirs_config': self.viewdirs_config,
'deformation_config': self.deformation_config,
}
@torch.no_grad()
def scale_equ_grid(self, equ_size, upsample):
print('dpvgo scale_equ_grid start')
ori_equ_size = self.equ_size
self._set_equ_resolution(equ_size)
print('dpvgo scale_equ_grid scale equ_size from', ori_equ_size, 'to', self.equ_size)
self.k0.scale_equ_grid(self.equ_size, upsample)
print('dpvgo k0 scale_image_grid finish')
@torch.no_grad()
def scale_volume_grid(self, num_voxels):
print('dpvgo: scale_volume_grid start')
ori_world_size = self.world_size
self._set_grid_resolution(num_voxels)
print('dpvgo: scale_volume_grid scale world_size from', ori_world_size.tolist(), 'to', self.world_size.tolist())
self.density.scale_volume_grid(self.world_size)
if np.prod(self.world_size.tolist()) <= 256**3:
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.world_size[0]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.world_size[1]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.world_size[2]),
), -1)
self_alpha = F.max_pool3d(self.activate_density(self.density.get_dense_grid()), kernel_size=3, padding=1, stride=1)[0,0]
self.mask_cache = grid.MaskGrid(
path=None, mask=self.mask_cache(self_grid_xyz) & (self_alpha>self.fast_color_thres),
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
print('dpvgo: scale_volume_grid finish')
@torch.no_grad()
def update_occupancy_cache(self):
ori_p = self.mask_cache.mask.float().mean().item()
cache_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.mask_cache.mask.shape[0]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.mask_cache.mask.shape[1]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.mask_cache.mask.shape[2]),
), -1)
cache_grid_density = self.density(cache_grid_xyz)[None,None]
cache_grid_alpha = self.activate_density(cache_grid_density)
cache_grid_alpha = F.max_pool3d(cache_grid_alpha, kernel_size=3, padding=1, stride=1)[0,0]
self.mask_cache.mask &= (cache_grid_alpha > self.fast_color_thres)
new_p = self.mask_cache.mask.float().mean().item()
print(f'dpvgo update mask_cache {ori_p:.4f} => {new_p:.4f}')
def update_occupancy_cache_lt_nviews(self, rays_o_tr, rays_d_tr, imsz, render_kwargs, maskout_lt_nviews):
print('dpvgo update mask_cache lt_nviews start')
eps_time = time.time()
count = torch.zeros_like(self.density.get_dense_grid()).long()
device = count.device
for rays_o_, rays_d_ in zip(rays_o_tr.split(imsz), rays_d_tr.split(imsz)):
ones = grid.DenseGrid(1, self.world_size, self.xyz_min, self.xyz_max)
for rays_o, rays_d in zip(rays_o_.split(8192), rays_d_.split(8192)):
ray_pts, inner_mask, t = self.sample_ray(
ori_rays_o=rays_o.to(device), ori_rays_d=rays_d.to(device),
**render_kwargs)
ones(ray_pts).sum().backward()
count.data += (ones.grid.grad > 1)
ori_p = self.mask_cache.mask.float().mean().item()
self.mask_cache.mask &= (count >= maskout_lt_nviews)[0,0]
new_p = self.mask_cache.mask.float().mean().item()
print(f'dpvgo update mask_cache {ori_p:.4f} => {new_p:.4f}')
eps_time = time.time() - eps_time
print(f'dpvgo update mask_cache lt_nviews finish (eps time:', eps_time, 'sec)')
def density_total_variation_add_grad(self, weight, dense_mode):
w = weight * self.world_size.max() / 128
self.density.total_variation_add_grad(w, w, w, dense_mode)
def k0_total_variation_add_grad(self, weight, dense_mode):
wx = weight * self.equ_size[1] / 128
wy = weight * self.equ_size[0] / 128
self.k0.total_variation_2d_add_grad(wx, wy, dense_mode)
def activate_density(self, density, interval=None):
interval = interval if interval is not None else self.voxel_size_ratio
shape = density.shape
|
parent_dir = os.path.dirname(os.path.abspath(__file__))
ub360_utils_cuda = load(
name='ub360_utils_cuda',
sources=[
os.path.join(parent_dir, path)
for path in ['cuda/ub360_utils.cpp', 'cuda/ub360_utils_kernel.cu']],
verbose=True)
'''Model'''
class DirectPanoramaVoxGO(nn.Module):
def __init__(self, xyz_min, xyz_max,
num_voxels=0, num_voxels_base=0,
alpha_init=None,
mask_cache_world_size=None,
fast_color_thres=0,
contracted_norm='l2',
density_type='DenseGrid', k0_type='DenseGrid',
density_config={}, k0_config={},
rgbnet_dim=0,
rgbnet_depth=3, rgbnet_width=128,
equ_size=(768,1536),
xyz_config={},
viewdirs_config={},
deformation_config={},
**kwargs):
super(DirectPanoramaVoxGO, self).__init__()
self.register_buffer('xyz_min', torch.Tensor([-1,-1,-1]))
self.register_buffer('xyz_max', torch.Tensor([1,1,1]))
if isinstance(fast_color_thres, dict):
self._fast_color_thres = fast_color_thres
self.fast_color_thres = fast_color_thres[0]
else:
self._fast_color_thres = None
self.fast_color_thres = fast_color_thres
self.contracted_norm = contracted_norm
# determine based grid resolution
self.num_voxels_base = num_voxels_base
self.voxel_size_base = ((self.xyz_max - self.xyz_min).prod() / self.num_voxels_base).pow(1/3)
# determine init grid resolution
self._set_grid_resolution(num_voxels)
self._set_equ_resolution(equ_size)
# determine the density bias shift
self.alpha_init = alpha_init
self.register_buffer('act_shift', torch.FloatTensor([np.log(1/(1-alpha_init) - 1)]))
print('dpvgo: set density bias shift to', self.act_shift)
# init density voxel grid
self.density_type = density_type
self.density_config = density_config
self.density = grid.create_grid(
density_type, channels=1, world_size=self.world_size,
xyz_min=self.xyz_min, xyz_max=self.xyz_max,
config=self.density_config)
# init color representation
self.rgbnet_kwargs = {
'rgbnet_dim': rgbnet_dim,
'rgbnet_depth': rgbnet_depth, 'rgbnet_width': rgbnet_width,
}
self.k0_type = k0_type
self.k0_config = k0_config
if rgbnet_dim == 0:
self.k0_explicit_grid = grid.DenseEquExplicitGrid(channels=3, equ_size=self.equ_size)
self.k0_explicit_mlp = None
else:
self.k0_explicit_grid = grid.DenseEquExplicitGrid(channels=rgbnet_dim, equ_size=self.equ_size)
self.k0_explicit_mlp = nn.Sequential(
nn.Linear(rgbnet_dim, rgbnet_width), nn.ReLU(inplace=True),
*[
nn.Sequential(nn.Linear(rgbnet_width, rgbnet_width), nn.ReLU(inplace=True))
for _ in range(rgbnet_depth-2)
],
nn.Linear(rgbnet_width, 3),
)
nn.init.constant_(self.k0_explicit_mlp[-1].bias, 0)
self.k0_explicit = grid.DenseEquExplicit(explicit_grid=self.k0_explicit_grid, explicit_mlp=self.k0_explicit_mlp, sigmoid=self.k0_type=='DenseEquExplicit')
self.k0 = self.k0_explicit
self.xyz_config = xyz_config
self.viewdirs_config = viewdirs_config
self.deformation_config = deformation_config
self.xyz_enc_type = xyz_config['enc_type']
if self.xyz_enc_type == 'pe':
self.embedding_xyz = PositionalEncoding(in_channels=3, **xyz_config[self.xyz_enc_type])
elif self.xyz_enc_type == 'hash':
self.embedding_xyz = HashEncoding(**xyz_config[self.xyz_enc_type])
else:
raise NotImplementedError
self.viewdirs_enc_type = viewdirs_config['enc_type']
if self.viewdirs_enc_type == 'pe':
self.embedding_viewdirs = ViewdirEncoding(in_channels=3, **viewdirs_config[self.viewdirs_enc_type])
elif self.viewdirs_enc_type == 'hash':
self.embedding_viewdirs = HashEncoding(**viewdirs_config[self.viewdirs_enc_type])
else:
raise NotImplementedError
self.deform_type = deformation_config['deform_type']
in_channels = self.embedding_xyz.out_channels + self.embedding_viewdirs.out_channels
if self.deform_type == 'mlp':
self.deformation_field = DeformationMLP(in_channels=in_channels, **deformation_config[self.deform_type])
else:
self.deformation_field = DeformationTCNN(in_channels=in_channels, **deformation_config[self.deform_type])
print('dpvgo: densitye grid', self.density)
print('dpvgo: k0', self.k0)
print('dpvgo: deformation field', self.deformation_field)
print('dpvgo: embedding_xyz', self.embedding_xyz)
print('dpvgo: embedding_viewdirs', self.embedding_viewdirs)
# Using the coarse geometry if provided (used to determine known free space and unknown space)
# Re-implement as occupancy grid (2021/1/31)
if mask_cache_world_size is None:
mask_cache_world_size = self.world_size
mask = torch.ones(list(mask_cache_world_size), dtype=torch.bool)
self.mask_cache = grid.MaskGrid(
path=None, mask=mask,
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
def get_k0_grid_rgb(self):
rgb = self.k0.get_current_equ()[0].permute(1,2,0).detach().cpu()
rgb_np = np.uint8(rgb.numpy() * 255)
return rgb_np
def _set_equ_resolution(self, equ_size):
self.equ_size = equ_size
print('dpvgo equ_size ', self.equ_size)
def _set_grid_resolution(self, num_voxels):
# Determine grid resolution
self.num_voxels = num_voxels
self.voxel_size = ((self.xyz_max - self.xyz_min).prod() / num_voxels).pow(1/3)
self.world_size = ((self.xyz_max - self.xyz_min) / self.voxel_size).long()
self.world_len = self.world_size[0].item()
self.voxel_size_ratio = self.voxel_size / self.voxel_size_base
print('dpvgo voxel_size ', self.voxel_size)
print('dpvgo world_size ', self.world_size)
print('dpvgo voxel_size_base ', self.voxel_size_base)
print('dpvgo voxel_size_ratio', self.voxel_size_ratio)
def get_kwargs(self):
return {
'xyz_min': self.xyz_min.cpu().numpy(),
'xyz_max': self.xyz_max.cpu().numpy(),
'num_voxels': self.num_voxels,
'num_voxels_base': self.num_voxels_base,
'alpha_init': self.alpha_init,
'voxel_size_ratio': self.voxel_size_ratio,
'mask_cache_world_size': list(self.mask_cache.mask.shape),
'fast_color_thres': self.fast_color_thres,
'contracted_norm': self.contracted_norm,
'density_type': self.density_type,
'k0_type': self.k0_type,
'density_config': self.density_config,
'k0_config': self.k0_config,
**self.rgbnet_kwargs,
'equ_size': self.equ_size,
'xyz_config': self.xyz_config,
'viewdirs_config': self.viewdirs_config,
'deformation_config': self.deformation_config,
}
@torch.no_grad()
def scale_equ_grid(self, equ_size, upsample):
print('dpvgo scale_equ_grid start')
ori_equ_size = self.equ_size
self._set_equ_resolution(equ_size)
print('dpvgo scale_equ_grid scale equ_size from', ori_equ_size, 'to', self.equ_size)
self.k0.scale_equ_grid(self.equ_size, upsample)
print('dpvgo k0 scale_image_grid finish')
@torch.no_grad()
def scale_volume_grid(self, num_voxels):
print('dpvgo: scale_volume_grid start')
ori_world_size = self.world_size
self._set_grid_resolution(num_voxels)
print('dpvgo: scale_volume_grid scale world_size from', ori_world_size.tolist(), 'to', self.world_size.tolist())
self.density.scale_volume_grid(self.world_size)
if np.prod(self.world_size.tolist()) <= 256**3:
self_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.world_size[0]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.world_size[1]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.world_size[2]),
), -1)
self_alpha = F.max_pool3d(self.activate_density(self.density.get_dense_grid()), kernel_size=3, padding=1, stride=1)[0,0]
self.mask_cache = grid.MaskGrid(
path=None, mask=self.mask_cache(self_grid_xyz) & (self_alpha>self.fast_color_thres),
xyz_min=self.xyz_min, xyz_max=self.xyz_max)
print('dpvgo: scale_volume_grid finish')
@torch.no_grad()
def update_occupancy_cache(self):
ori_p = self.mask_cache.mask.float().mean().item()
cache_grid_xyz = torch.stack(torch.meshgrid(
torch.linspace(self.xyz_min[0], self.xyz_max[0], self.mask_cache.mask.shape[0]),
torch.linspace(self.xyz_min[1], self.xyz_max[1], self.mask_cache.mask.shape[1]),
torch.linspace(self.xyz_min[2], self.xyz_max[2], self.mask_cache.mask.shape[2]),
), -1)
cache_grid_density = self.density(cache_grid_xyz)[None,None]
cache_grid_alpha = self.activate_density(cache_grid_density)
cache_grid_alpha = F.max_pool3d(cache_grid_alpha, kernel_size=3, padding=1, stride=1)[0,0]
self.mask_cache.mask &= (cache_grid_alpha > self.fast_color_thres)
new_p = self.mask_cache.mask.float().mean().item()
print(f'dpvgo update mask_cache {ori_p:.4f} => {new_p:.4f}')
def update_occupancy_cache_lt_nviews(self, rays_o_tr, rays_d_tr, imsz, render_kwargs, maskout_lt_nviews):
print('dpvgo update mask_cache lt_nviews start')
eps_time = time.time()
count = torch.zeros_like(self.density.get_dense_grid()).long()
device = count.device
for rays_o_, rays_d_ in zip(rays_o_tr.split(imsz), rays_d_tr.split(imsz)):
ones = grid.DenseGrid(1, self.world_size, self.xyz_min, self.xyz_max)
for rays_o, rays_d in zip(rays_o_.split(8192), rays_d_.split(8192)):
ray_pts, inner_mask, t = self.sample_ray(
ori_rays_o=rays_o.to(device), ori_rays_d=rays_d.to(device),
**render_kwargs)
ones(ray_pts).sum().backward()
count.data += (ones.grid.grad > 1)
ori_p = self.mask_cache.mask.float().mean().item()
self.mask_cache.mask &= (count >= maskout_lt_nviews)[0,0]
new_p = self.mask_cache.mask.float().mean().item()
print(f'dpvgo update mask_cache {ori_p:.4f} => {new_p:.4f}')
eps_time = time.time() - eps_time
print(f'dpvgo update mask_cache lt_nviews finish (eps time:', eps_time, 'sec)')
def density_total_variation_add_grad(self, weight, dense_mode):
w = weight * self.world_size.max() / 128
self.density.total_variation_add_grad(w, w, w, dense_mode)
def k0_total_variation_add_grad(self, weight, dense_mode):
wx = weight * self.equ_size[1] / 128
wy = weight * self.equ_size[0] / 128
self.k0.total_variation_2d_add_grad(wx, wy, dense_mode)
def activate_density(self, density, interval=None):
interval = interval if interval is not None else self.voxel_size_ratio
shape = density.shape | return Raw2Alpha.apply(density.flatten(), self.act_shift, interval).reshape(shape) | 0 | 2023-12-11 05:49:46+00:00 | 4k |
Vill-Lab/2024-AAAI-HPT | datasets/eurosat.py | [
{
"identifier": "OxfordPets",
"path": "datasets/oxford_pets.py",
"snippet": "class OxfordPets(DatasetBase):\n\n dataset_dir = \"oxford_pets\"\n\n def __init__(self, cfg):\n root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))\n self.dataset_dir = os.path.join(root, self.datas... | import os
import pickle
from dassl.data.datasets import DATASET_REGISTRY, Datum, DatasetBase
from dassl.utils import mkdir_if_missing
from .oxford_pets import OxfordPets
from .dtd import DescribableTextures as DTD | 2,940 |
NEW_CLASSNAMES = {
"AnnualCrop": "Annual Crop Land",
"Forest": "Forest",
"HerbaceousVegetation": "Herbaceous Vegetation Land",
"Highway": "Highway or Road",
"Industrial": "Industrial Buildings",
"Pasture": "Pasture Land",
"PermanentCrop": "Permanent Crop Land",
"Residential": "Residential Buildings",
"River": "River",
"SeaLake": "Sea or Lake",
}
@DATASET_REGISTRY.register()
class EuroSAT(DatasetBase):
dataset_dir = "eurosat"
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, "2750")
self.split_path = os.path.join(self.dataset_dir, "split_zhou_EuroSAT.json")
self.split_fewshot_dir = os.path.join(self.dataset_dir, "split_fewshot")
mkdir_if_missing(self.split_fewshot_dir)
if os.path.exists(self.split_path):
train, val, test = OxfordPets.read_split(self.split_path, self.image_dir)
else:
|
NEW_CLASSNAMES = {
"AnnualCrop": "Annual Crop Land",
"Forest": "Forest",
"HerbaceousVegetation": "Herbaceous Vegetation Land",
"Highway": "Highway or Road",
"Industrial": "Industrial Buildings",
"Pasture": "Pasture Land",
"PermanentCrop": "Permanent Crop Land",
"Residential": "Residential Buildings",
"River": "River",
"SeaLake": "Sea or Lake",
}
@DATASET_REGISTRY.register()
class EuroSAT(DatasetBase):
dataset_dir = "eurosat"
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, "2750")
self.split_path = os.path.join(self.dataset_dir, "split_zhou_EuroSAT.json")
self.split_fewshot_dir = os.path.join(self.dataset_dir, "split_fewshot")
mkdir_if_missing(self.split_fewshot_dir)
if os.path.exists(self.split_path):
train, val, test = OxfordPets.read_split(self.split_path, self.image_dir)
else: | train, val, test = DTD.read_and_split_data(self.image_dir, new_cnames=NEW_CLASSNAMES) | 0 | 2023-12-11 03:01:58+00:00 | 4k |
WalBouss/GEM | gem/gem_wrapper.py | [
{
"identifier": "SelfSelfAttention",
"path": "gem/gem_utils.py",
"snippet": "class SelfSelfAttention(nn.Module):\n def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., ss_attn_iter=1,\n ss_attn_temp=None):\n super().__init__()\n ... | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from open_clip.transformer import VisionTransformer
from .gem_utils import SelfSelfAttention, GEMResidualBlock, modified_vit_forward | 2,201 |
class GEMWrapper(nn.Module):
def __init__(self, model, tokenizer, depth=7, ss_attn_iter=1, ss_attn_temp=None):
super(GEMWrapper, self).__init__()
self.model = model
self.tokenizer = tokenizer
self.depth = depth
self.ss_attn_iter = ss_attn_iter
self.ss_attn_temp = ss_attn_temp
self.patch_size = self.model.visual.patch_size[0]
self.apply_gem()
def apply_gem(self):
for i in range(1, self.depth):
# Extract info from the original ViT
num_heads = self.model.visual.transformer.resblocks[-i].attn.num_heads
dim = int(self.model.visual.transformer.resblocks[-i].attn.head_dim * num_heads)
qkv_bias = True
# Init the self-self attention layer
ss_attn = SelfSelfAttention(dim=dim, num_heads=num_heads, qkv_bias=qkv_bias,
ss_attn_iter=self.ss_attn_iter, ss_attn_temp=self.ss_attn_temp)
# Copy necessary weights
ss_attn.qkv.weight.data = self.model.visual.transformer.resblocks[-i].attn.in_proj_weight.clone()
ss_attn.qkv.bias.data = self.model.visual.transformer.resblocks[-i].attn.in_proj_bias.clone()
ss_attn.proj.weight.data = self.model.visual.transformer.resblocks[-i].attn.out_proj.weight.clone()
ss_attn.proj.bias.data = self.model.visual.transformer.resblocks[-i].attn.out_proj.bias.clone()
# Swap the original Attention with our SelfSelfAttention
self.model.visual.transformer.resblocks[-i].attn = ss_attn
# Wrap Residual block to handle SelfSelfAttention outputs
|
class GEMWrapper(nn.Module):
def __init__(self, model, tokenizer, depth=7, ss_attn_iter=1, ss_attn_temp=None):
super(GEMWrapper, self).__init__()
self.model = model
self.tokenizer = tokenizer
self.depth = depth
self.ss_attn_iter = ss_attn_iter
self.ss_attn_temp = ss_attn_temp
self.patch_size = self.model.visual.patch_size[0]
self.apply_gem()
def apply_gem(self):
for i in range(1, self.depth):
# Extract info from the original ViT
num_heads = self.model.visual.transformer.resblocks[-i].attn.num_heads
dim = int(self.model.visual.transformer.resblocks[-i].attn.head_dim * num_heads)
qkv_bias = True
# Init the self-self attention layer
ss_attn = SelfSelfAttention(dim=dim, num_heads=num_heads, qkv_bias=qkv_bias,
ss_attn_iter=self.ss_attn_iter, ss_attn_temp=self.ss_attn_temp)
# Copy necessary weights
ss_attn.qkv.weight.data = self.model.visual.transformer.resblocks[-i].attn.in_proj_weight.clone()
ss_attn.qkv.bias.data = self.model.visual.transformer.resblocks[-i].attn.in_proj_bias.clone()
ss_attn.proj.weight.data = self.model.visual.transformer.resblocks[-i].attn.out_proj.weight.clone()
ss_attn.proj.bias.data = self.model.visual.transformer.resblocks[-i].attn.out_proj.bias.clone()
# Swap the original Attention with our SelfSelfAttention
self.model.visual.transformer.resblocks[-i].attn = ss_attn
# Wrap Residual block to handle SelfSelfAttention outputs | self.model.visual.transformer.resblocks[-i] = GEMResidualBlock(self.model.visual.transformer.resblocks[-i]) | 1 | 2023-12-05 08:23:35+00:00 | 4k |
JeffersonQin/DungeonAssistant | registration.py | [
{
"identifier": "o3dobj",
"path": "utils/o3dobj.py",
"snippet": "def get_o3d_unit_block_at_origin():\ndef get_o3d_trajectory_object(points, color=(1, 0, 0)):\n def transform_o3d_format(points):"
},
{
"identifier": "io",
"path": "utils/io.py",
"snippet": "def load_point_clouds(\n po... | import json
import argparse
import os
import os.path as osp
import time
import open3d as o3d
import numpy as np
import copy
import matplotlib.pyplot as plt
from utils import o3dobj
from utils import io
from utils import tfm | 2,165 | target,
max_correspondence_distance,
estimation_method=estimation,
criteria=criteria,
)
return result
if __name__ == "__main__":
voxel_size_fgr = args.voxel_size_fgr
voxel_size_icp = args.voxel_size_icp
(
cloud_1,
cloud_2,
cloud_1_down,
cloud_2_down,
cloud_1_fpfh,
cloud_2_fpfh,
) = prepare_dataset(voxel_size=voxel_size_fgr)
color_1 = [0.9450980392, 0.5764705882, 0.7098039216]
color_2 = [0.11, 0.72, 0.89]
cloud_1.paint_uniform_color(color_1)
cloud_2.paint_uniform_color(color_2)
cloud_1_down.paint_uniform_color(color_1)
cloud_2_down.paint_uniform_color(color_2)
# axis
axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0, 0, 0])
# unit block
unit_block = o3dobj.get_o3d_unit_block_at_origin()
# Visualize point cloud
print("Initial preview ... Close window to continue")
o3d.visualization.draw_geometries([cloud_1_down, cloud_2_down, axis, unit_block])
# FGR
transformation_fast = None
if args.fast_cache != "":
if osp.exists(args.fast_cache):
print("Loading fast global registration cache from: ", args.fast_cache)
transformation_fast = np.load(args.fast_cache, allow_pickle=True)
if transformation_fast is None:
print(
"Fast global registration cache not found. Running fast global registration..."
)
start = time.time()
with o3d.utility.VerbosityContextManager(
o3d.utility.VerbosityLevel.Debug
) as cm:
result_fast = execute_fast_global_registration(
cloud_1_down, cloud_2_down, cloud_1_fpfh, cloud_2_fpfh, voxel_size_fgr
)
print(f"Fast global registration took {(time.time() - start):.3f} sec.\n")
print(result_fast)
transformation_fast = result_fast.transformation
np.save("registration_fgr.npy", transformation_fast)
cloud_1.transform(transformation_fast)
# Visualize point cloud
print("FGR preview ... Close window to continue")
o3d.visualization.draw_geometries([cloud_1, cloud_2, axis, unit_block])
# Vanilla ICP
if not args.skip_icp:
(
_,
_,
cloud_1_down,
cloud_2_down,
_,
_,
) = prepare_dataset(voxel_size=voxel_size_icp)
cloud_1_down.transform(transformation_fast)
transformation_icp = None
if args.icp_cache != "":
if osp.exists(args.icp_cache):
print("Loading icp cache from: ", args.icp_cache)
transformation_icp = np.load(args.icp_cache, allow_pickle=True)
if transformation_icp is None:
print("ICP cache not found. Running ICP...")
s = time.time()
with o3d.utility.VerbosityContextManager(
o3d.utility.VerbosityLevel.Debug
) as cm:
result_icp = execute_vanilla_icp(cloud_1_down, cloud_2_down)
icp_time = time.time() - s
print("Time taken by ICP: ", icp_time)
print("Inlier Fitness: ", result_icp.fitness)
print("Inlier RMSE: ", result_icp.inlier_rmse)
transformation_icp = result_icp.transformation
np.save("registration_icp.npy", transformation_icp)
cloud_1.transform(transformation_icp)
else:
transformation_icp = np.identity(4)
if trajectory_file_path_1 != "":
# trajectory
points_1, timestamps_1 = io.load_coordinates_and_timestamps(
trajectory_file_path_1
)
# transformation
|
parser = argparse.ArgumentParser()
parser.add_argument(
"--pointcloud1",
type=str,
default="pointcloud1.ply",
help="first point cloud file path (1 --[transform]-> 2)",
)
parser.add_argument(
"--pointcloud2",
type=str,
default="pointcloud2.ply",
help="second point cloud file path (1 --[transform]-> 2)",
)
parser.add_argument(
"--trajectory1",
type=str,
default="trajectory1.json",
help="first trajectory file path",
)
parser.add_argument(
"--trajectory2",
type=str,
default="trajectory2.json",
help="second trajectory file path",
)
parser.add_argument(
"--fast_cache",
type=str,
default="",
help="transformation cache of fast global registration if available. default is none",
)
parser.add_argument(
"--icp_cache",
type=str,
default="",
help="transformation cache of icp if available. default is none",
)
parser.add_argument(
"--voxel_size_fgr",
type=float,
default=0.05,
help="voxel size for global fast registration downsampling. default is 0.05",
)
parser.add_argument(
"--voxel_size_icp",
type=float,
default=0.05,
help="voxel size for icp downsampling. default is 0.05",
)
parser.add_argument("--skip_icp", action="store_true", help="skip icp and only run fgr")
parser.add_argument(
"--transformed_trajectory_out",
type=str,
default="trajectory_1.jsonl",
help="output trajectory of the transformed trajectory 1 (to trajectory 2)",
)
args = parser.parse_args()
pointcloud_file_path_1 = args.pointcloud1
pointcloud_file_path_2 = args.pointcloud2
trajectory_file_path_1 = args.trajectory1
trajectory_file_path_2 = args.trajectory2
def preprocess_point_cloud(pcd, voxel_size):
"""Downsamples the point cloud and computes the normals and FPFH features"""
print(f":: Downsample with a voxel size {voxel_size:.3f}.")
pcd_down = pcd.voxel_down_sample(voxel_size)
radius_normal = voxel_size * 2
print(f":: Estimate normal with search radius {radius_normal:.3f}.")
pcd_down.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30)
)
radius_feature = voxel_size * 5
print(f":: Compute FPFH feature with search radius {radius_feature:.3f}.")
pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature(
pcd_down,
o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100),
)
return pcd_down, pcd_fpfh
def prepare_dataset(voxel_size):
"""Loads two point clouds and downsamples them."""
print(":: Load two point clouds")
source = o3d.io.read_point_cloud(pointcloud_file_path_1)
target = o3d.io.read_point_cloud(pointcloud_file_path_2)
source_down, source_fpfh = preprocess_point_cloud(source, voxel_size)
target_down, target_fpfh = preprocess_point_cloud(target, voxel_size)
return source, target, source_down, target_down, source_fpfh, target_fpfh
def execute_fast_global_registration(
source_down, target_down, source_fpfh, target_fpfh, voxel_size
):
"""Performs fast global registration on the downsampled point clouds"""
distance_threshold = voxel_size * 0.5
print(
f":: Apply fast global registration with distance threshold {distance_threshold:.3f}"
)
result = o3d.pipelines.registration.registration_fgr_based_on_feature_matching(
source_down,
target_down,
source_fpfh,
target_fpfh,
o3d.pipelines.registration.FastGlobalRegistrationOption(
maximum_correspondence_distance=distance_threshold
),
)
return result
def execute_vanilla_icp(source, target):
"""Performs vanilla ICP on the point clouds"""
estimation = o3d.pipelines.registration.TransformationEstimationPointToPlane()
max_correspondence_distance = 0.5
# Convergence-Criteria for Vanilla ICP
criteria = o3d.pipelines.registration.ICPConvergenceCriteria(
relative_fitness=0.000001, relative_rmse=0.000001, max_iteration=50
)
result = o3d.pipelines.registration.registration_icp(
source,
target,
max_correspondence_distance,
estimation_method=estimation,
criteria=criteria,
)
return result
if __name__ == "__main__":
voxel_size_fgr = args.voxel_size_fgr
voxel_size_icp = args.voxel_size_icp
(
cloud_1,
cloud_2,
cloud_1_down,
cloud_2_down,
cloud_1_fpfh,
cloud_2_fpfh,
) = prepare_dataset(voxel_size=voxel_size_fgr)
color_1 = [0.9450980392, 0.5764705882, 0.7098039216]
color_2 = [0.11, 0.72, 0.89]
cloud_1.paint_uniform_color(color_1)
cloud_2.paint_uniform_color(color_2)
cloud_1_down.paint_uniform_color(color_1)
cloud_2_down.paint_uniform_color(color_2)
# axis
axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0, 0, 0])
# unit block
unit_block = o3dobj.get_o3d_unit_block_at_origin()
# Visualize point cloud
print("Initial preview ... Close window to continue")
o3d.visualization.draw_geometries([cloud_1_down, cloud_2_down, axis, unit_block])
# FGR
transformation_fast = None
if args.fast_cache != "":
if osp.exists(args.fast_cache):
print("Loading fast global registration cache from: ", args.fast_cache)
transformation_fast = np.load(args.fast_cache, allow_pickle=True)
if transformation_fast is None:
print(
"Fast global registration cache not found. Running fast global registration..."
)
start = time.time()
with o3d.utility.VerbosityContextManager(
o3d.utility.VerbosityLevel.Debug
) as cm:
result_fast = execute_fast_global_registration(
cloud_1_down, cloud_2_down, cloud_1_fpfh, cloud_2_fpfh, voxel_size_fgr
)
print(f"Fast global registration took {(time.time() - start):.3f} sec.\n")
print(result_fast)
transformation_fast = result_fast.transformation
np.save("registration_fgr.npy", transformation_fast)
cloud_1.transform(transformation_fast)
# Visualize point cloud
print("FGR preview ... Close window to continue")
o3d.visualization.draw_geometries([cloud_1, cloud_2, axis, unit_block])
# Vanilla ICP
if not args.skip_icp:
(
_,
_,
cloud_1_down,
cloud_2_down,
_,
_,
) = prepare_dataset(voxel_size=voxel_size_icp)
cloud_1_down.transform(transformation_fast)
transformation_icp = None
if args.icp_cache != "":
if osp.exists(args.icp_cache):
print("Loading icp cache from: ", args.icp_cache)
transformation_icp = np.load(args.icp_cache, allow_pickle=True)
if transformation_icp is None:
print("ICP cache not found. Running ICP...")
s = time.time()
with o3d.utility.VerbosityContextManager(
o3d.utility.VerbosityLevel.Debug
) as cm:
result_icp = execute_vanilla_icp(cloud_1_down, cloud_2_down)
icp_time = time.time() - s
print("Time taken by ICP: ", icp_time)
print("Inlier Fitness: ", result_icp.fitness)
print("Inlier RMSE: ", result_icp.inlier_rmse)
transformation_icp = result_icp.transformation
np.save("registration_icp.npy", transformation_icp)
cloud_1.transform(transformation_icp)
else:
transformation_icp = np.identity(4)
if trajectory_file_path_1 != "":
# trajectory
points_1, timestamps_1 = io.load_coordinates_and_timestamps(
trajectory_file_path_1
)
# transformation | points_1 = tfm.transform_trajectory(points_1, transformation_fast) | 2 | 2023-12-08 19:52:08+00:00 | 4k |
KAIST-VICLab/From_Ground_To_Objects | networks/depth_decoder.py | [
{
"identifier": "ConvBlock",
"path": "networks/layers.py",
"snippet": "class ConvBlock(nn.Module):\r\n \"\"\"Layer to perform a convolution followed by ELU\r\n \"\"\"\r\n\r\n def __init__(self, in_channels, out_channels):\r\n super(ConvBlock, self).__init__()\r\n\r\n self.conv = C... | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from .layers import ConvBlock, Conv3x3, upsample, disp_to_depth, coords_to_normals
from timm.models.layers import trunc_normal_
from .cadepth import SPM, DEM | 1,801 | # Copyright Niantic 2021. Patent Pending. All rights reserved.
#
# This software is licensed under the terms of the ManyDepth licence
# which allows for non-commercial use only, the full terms of which are made
# available in the LICENSE file.
class DepthDecoder(nn.Module):
def __init__(self, num_ch_enc, scales=range(4), num_output_channels=1, use_skips=True,
opt=None, backproject_depth=None, min_depth=0.1, max_depth=100):
super(DepthDecoder, self).__init__()
self.num_output_channels = num_output_channels
self.use_skips = use_skips
self.upsample_mode = 'nearest'
self.scales = scales
self.opt = opt
self.num_ch_enc = num_ch_enc
self.num_ch_dec = np.array([16, 32, 64, 128, 256])
self.backproject_depth = backproject_depth
self.min_depth = min_depth
self.max_depth = max_depth
# decoder
self.convs = OrderedDict()
for i in range(4, -1, -1):
# upconv_0
num_ch_in = self.num_ch_enc[-1] if i == 4 else self.num_ch_dec[i + 1]
if self.opt["use_surface_normal"] and i != 4:
num_ch_in += 3
num_ch_out = self.num_ch_dec[i]
self.convs[("upconv", i, 0)] = ConvBlock(num_ch_in, num_ch_out)
# upconv_1
num_ch_in = self.num_ch_dec[i]
if self.use_skips and i > 0:
num_ch_in += self.num_ch_enc[i - 1]
num_ch_out = self.num_ch_dec[i]
self.convs[("upconv", i, 1)] = ConvBlock(num_ch_in, num_ch_out)
if self.opt['cadepth']:
self.convs[("dem", i)] = DEM(num_ch_in)
for s in self.scales:
self.convs[("dispconv", s)] = Conv3x3(self.num_ch_dec[s], self.num_output_channels)
if self.opt['cadepth']:
self.spm = SPM(self.num_ch_enc[-1])
self.decoder = nn.ModuleList(list(self.convs.values()))
self.sigmoid = nn.Sigmoid()
def disp_to_surface_normal(self, disp, inputs, scale):
# useful information from disparity map to scale invariant surface normal vector map
B, _, H, W = disp.shape
_, depth = disp_to_depth(disp, self.min_depth, self.max_depth)
coords = self.backproject_depth[scale](depth, inputs[('inv_K', scale)])
normals = coords_to_normals(coords[:, :3].view(-1, 3, H, W))
return (normals + 1) / 2
def forward(self, input_features, inputs=None):
self.outputs = {}
# decoder
x = input_features[-1]
if self.opt["cadepth"]:
x = self.spm(x)
for i in range(4, -1, -1):
x = self.convs[("upconv", i, 0)](x)
| # Copyright Niantic 2021. Patent Pending. All rights reserved.
#
# This software is licensed under the terms of the ManyDepth licence
# which allows for non-commercial use only, the full terms of which are made
# available in the LICENSE file.
class DepthDecoder(nn.Module):
def __init__(self, num_ch_enc, scales=range(4), num_output_channels=1, use_skips=True,
opt=None, backproject_depth=None, min_depth=0.1, max_depth=100):
super(DepthDecoder, self).__init__()
self.num_output_channels = num_output_channels
self.use_skips = use_skips
self.upsample_mode = 'nearest'
self.scales = scales
self.opt = opt
self.num_ch_enc = num_ch_enc
self.num_ch_dec = np.array([16, 32, 64, 128, 256])
self.backproject_depth = backproject_depth
self.min_depth = min_depth
self.max_depth = max_depth
# decoder
self.convs = OrderedDict()
for i in range(4, -1, -1):
# upconv_0
num_ch_in = self.num_ch_enc[-1] if i == 4 else self.num_ch_dec[i + 1]
if self.opt["use_surface_normal"] and i != 4:
num_ch_in += 3
num_ch_out = self.num_ch_dec[i]
self.convs[("upconv", i, 0)] = ConvBlock(num_ch_in, num_ch_out)
# upconv_1
num_ch_in = self.num_ch_dec[i]
if self.use_skips and i > 0:
num_ch_in += self.num_ch_enc[i - 1]
num_ch_out = self.num_ch_dec[i]
self.convs[("upconv", i, 1)] = ConvBlock(num_ch_in, num_ch_out)
if self.opt['cadepth']:
self.convs[("dem", i)] = DEM(num_ch_in)
for s in self.scales:
self.convs[("dispconv", s)] = Conv3x3(self.num_ch_dec[s], self.num_output_channels)
if self.opt['cadepth']:
self.spm = SPM(self.num_ch_enc[-1])
self.decoder = nn.ModuleList(list(self.convs.values()))
self.sigmoid = nn.Sigmoid()
def disp_to_surface_normal(self, disp, inputs, scale):
# useful information from disparity map to scale invariant surface normal vector map
B, _, H, W = disp.shape
_, depth = disp_to_depth(disp, self.min_depth, self.max_depth)
coords = self.backproject_depth[scale](depth, inputs[('inv_K', scale)])
normals = coords_to_normals(coords[:, :3].view(-1, 3, H, W))
return (normals + 1) / 2
def forward(self, input_features, inputs=None):
self.outputs = {}
# decoder
x = input_features[-1]
if self.opt["cadepth"]:
x = self.spm(x)
for i in range(4, -1, -1):
x = self.convs[("upconv", i, 0)](x) | x = [upsample(x)] | 2 | 2023-12-12 08:29:30+00:00 | 4k |
marc-rigter/polygrad-world-models | polygrad/models/diffusion.py | [
{
"identifier": "default_sample_fn",
"path": "polygrad/sampling/functions.py",
"snippet": "@torch.no_grad()\ndef default_sample_fn(model, x, act, cond, t, q_sample, condition_noise_scale, policy, normalizer):\n timesteps = make_timesteps(x.shape[0], t, x.device)\n\n # rescale actions\n act_scal... | from collections import namedtuple
from torch import nn
from polygrad.sampling.functions import default_sample_fn, policy_guided_sample_fn
from .helpers import (
cosine_beta_schedule,
extract,
apply_conditioning,
Losses,
)
import numpy as np
import torch
import pdb
import torch.nn.functional as F
import polygrad.utils as utils | 3,426 | action_weight=1.0, loss_discount=1.0, loss_weights=None,
noise_sched_tau=1.0, mask_obs=False, max_prediction_weight=1.0,
temporal_loss_weight=1.0, action_condition_noise_scale=1.0,
):
super().__init__()
self.horizon = horizon
self.observation_dim = observation_dim
self.action_dim = action_dim
self.transition_dim = observation_dim + 2 # obs + reward + terminals
self.model = model
self.action_condition_noise_scale = action_condition_noise_scale
betas = cosine_beta_schedule(n_timesteps, tau=noise_sched_tau)
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, axis=0)
alphas_cumprod_prev = torch.cat([torch.ones(1), alphas_cumprod[:-1]])
self.n_timesteps = int(n_timesteps)
self.clip_denoised = clip_denoised
self.predict_epsilon = predict_epsilon
self.register_buffer('betas', betas)
self.register_buffer('alphas_cumprod', alphas_cumprod)
self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
self.register_buffer('posterior_variance', posterior_variance)
## log calculation clipped because the posterior variance
## is 0 at the beginning of the diffusion chain
self.register_buffer('posterior_log_variance_clipped',
torch.log(torch.clamp(posterior_variance, min=1e-20)))
coef1 = betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)
coef2 = (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)
self.register_buffer('posterior_mean_coef1', coef1)
self.register_buffer('posterior_mean_coef2', coef2)
## get loss coefficients and initialize objective
loss_weights = torch.linspace(temporal_loss_weight, 1 / temporal_loss_weight, horizon)
loss_weights = loss_weights[None, :, None]
self.loss_fn = Losses[loss_type](loss_weights)
def get_loss_weights(self, action_weight, discount, weights_dict):
'''
sets loss coefficients for trajectory
action_weight : float
coefficient on first action loss
discount : float
multiplies t^th timestep of trajectory loss by discount**t
weights_dict : dict
{ i: c } multiplies dimension i of observation loss by c
'''
self.action_weight = action_weight
dim_weights = torch.ones(self.transition_dim, dtype=torch.float32)
## set loss coefficients for dimensions of observation
if weights_dict is None: weights_dict = {}
for ind, w in weights_dict.items():
dim_weights[ind] *= w
## decay loss with trajectory timestep: discount**t
discounts = discount ** torch.arange(self.horizon, dtype=torch.float)
discounts = discounts / discounts.mean()
loss_weights = torch.einsum('h,t->ht', discounts, dim_weights)
## manually set a0 weight
loss_weights[0, (-1 - self.action_dim):-1] = action_weight
return loss_weights
#------------------------------------------ sampling ------------------------------------------#
def predict_start_from_noise(self, x_t, t, noise):
'''
if self.predict_epsilon, model output is (scaled) noise;
otherwise, model predicts x0 directly
'''
if self.predict_epsilon:
return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
else:
return noise
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, x, act, t):
with torch.autocast(device_type="cuda", dtype=torch.float16):
prediction = self.model(x, act, t)
x_recon = self.predict_start_from_noise(x, t=t, noise=prediction)
if self.clip_denoised:
x_recon.clamp_(-1., 1.)
else:
assert RuntimeError()
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(
x_start=x_recon, x_t=x, t=t)
return model_mean, posterior_variance, posterior_log_variance
def p_sample_loop(self, shape, cond, act=None, normalizer=None, policy=None, return_sequence=False, verbose=True, return_chain=False, **sample_kwargs):
if policy is None:
|
Sample = namedtuple('Sample', 'trajectories chains recons_after_guide recons_before_guide')
def sort_by_values(x, values):
inds = torch.argsort(values, descending=True)
x = x[inds]
values = values[inds]
return x, values
def make_timesteps(batch_size, i, device):
t = torch.full((batch_size,), i, device=device, dtype=torch.long)
return t
class GaussianDiffusion(nn.Module):
def __init__(self, model, horizon, observation_dim, action_dim, n_timesteps=1000,
loss_type='l1', clip_denoised=False, predict_epsilon=True,
action_weight=1.0, loss_discount=1.0, loss_weights=None,
noise_sched_tau=1.0, mask_obs=False, max_prediction_weight=1.0,
temporal_loss_weight=1.0, action_condition_noise_scale=1.0,
):
super().__init__()
self.horizon = horizon
self.observation_dim = observation_dim
self.action_dim = action_dim
self.transition_dim = observation_dim + 2 # obs + reward + terminals
self.model = model
self.action_condition_noise_scale = action_condition_noise_scale
betas = cosine_beta_schedule(n_timesteps, tau=noise_sched_tau)
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, axis=0)
alphas_cumprod_prev = torch.cat([torch.ones(1), alphas_cumprod[:-1]])
self.n_timesteps = int(n_timesteps)
self.clip_denoised = clip_denoised
self.predict_epsilon = predict_epsilon
self.register_buffer('betas', betas)
self.register_buffer('alphas_cumprod', alphas_cumprod)
self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
self.register_buffer('posterior_variance', posterior_variance)
## log calculation clipped because the posterior variance
## is 0 at the beginning of the diffusion chain
self.register_buffer('posterior_log_variance_clipped',
torch.log(torch.clamp(posterior_variance, min=1e-20)))
coef1 = betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)
coef2 = (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)
self.register_buffer('posterior_mean_coef1', coef1)
self.register_buffer('posterior_mean_coef2', coef2)
## get loss coefficients and initialize objective
loss_weights = torch.linspace(temporal_loss_weight, 1 / temporal_loss_weight, horizon)
loss_weights = loss_weights[None, :, None]
self.loss_fn = Losses[loss_type](loss_weights)
def get_loss_weights(self, action_weight, discount, weights_dict):
'''
sets loss coefficients for trajectory
action_weight : float
coefficient on first action loss
discount : float
multiplies t^th timestep of trajectory loss by discount**t
weights_dict : dict
{ i: c } multiplies dimension i of observation loss by c
'''
self.action_weight = action_weight
dim_weights = torch.ones(self.transition_dim, dtype=torch.float32)
## set loss coefficients for dimensions of observation
if weights_dict is None: weights_dict = {}
for ind, w in weights_dict.items():
dim_weights[ind] *= w
## decay loss with trajectory timestep: discount**t
discounts = discount ** torch.arange(self.horizon, dtype=torch.float)
discounts = discounts / discounts.mean()
loss_weights = torch.einsum('h,t->ht', discounts, dim_weights)
## manually set a0 weight
loss_weights[0, (-1 - self.action_dim):-1] = action_weight
return loss_weights
#------------------------------------------ sampling ------------------------------------------#
def predict_start_from_noise(self, x_t, t, noise):
'''
if self.predict_epsilon, model output is (scaled) noise;
otherwise, model predicts x0 directly
'''
if self.predict_epsilon:
return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
else:
return noise
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, x, act, t):
with torch.autocast(device_type="cuda", dtype=torch.float16):
prediction = self.model(x, act, t)
x_recon = self.predict_start_from_noise(x, t=t, noise=prediction)
if self.clip_denoised:
x_recon.clamp_(-1., 1.)
else:
assert RuntimeError()
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(
x_start=x_recon, x_t=x, t=t)
return model_mean, posterior_variance, posterior_log_variance
def p_sample_loop(self, shape, cond, act=None, normalizer=None, policy=None, return_sequence=False, verbose=True, return_chain=False, **sample_kwargs):
if policy is None: | sample_fn = default_sample_fn | 0 | 2023-12-12 21:05:26+00:00 | 4k |
zhongpei/Qwen-SDXL-Turbo | web_demo.py | [
{
"identifier": "start_server",
"path": "file_server.py",
"snippet": "def start_server(server_port):\n # 在单独的线程中启动服务器\n server_thread = threading.Thread(target=_start_server, args=(server_port,))\n server_thread.daemon = True # 设置为守护线程,这样当主程序退出时,服务器线程也会退出\n server_thread.start()"
},
{
... | import os
import gradio as gr
import mdtex2html
import piexif
import os
import torch
import json
import time
import datetime
import config as conf
import gc
from argparse import ArgumentParser
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
from diffusers import AutoPipelineForText2Image
from file_server import start_server, get_local_ip | 3,109 | task_history = gr.State([])
with gr.Row():
with gr.Column(scale=1, min_width=600):
image = gr.Image(type="pil")
query = gr.Textbox(lines=2, label='Input')
with gr.Row():
empty_btn = gr.Button("🧹 Clear History (清除历史)")
submit_btn = gr.Button("🚀 Submit (生成)")
regen_btn = gr.Button("🤔️ Regenerate (重试)")
image_btn = gr.Button("🎨 Image (生成)")
talk_btn = gr.Button("💬 Talk (聊天)")
with gr.Column(scale=1, min_width=600):
with gr.Tab(label="Qwen"):
temperature = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.9,
label="Temperature",
info="越小越遵循输入,越大越充满想象"
)
prompt_system_radio = gr.Radio(
["中英文翻译", "文言文", "画家", "剧情", "AI助手"],
label='角色',
info="根据输入选择合适的角色"
)
with gr.Row():
prompt_system = gr.Textbox(
lines=1,
label='System Template',
value="你擅长翻译中文到英语。"
)
prompt_template = gr.Textbox(
lines=1,
label='Prompt Template',
value="必须使用英语根据主题描述一副画面:"
)
chatbot = gr.Chatbot(label='Qwen-Chat', elem_classes="control-height")
with gr.Tab(label="Config"):
with gr.Row():
top_p = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=1.0, label="Top-p")
top_k = gr.Slider(minimum=0, maximum=100, step=1, value=50, label="Top-k")
max_new_tokens = gr.Slider(minimum=1, maximum=1024, step=1, value=77, label="Max New Tokens")
repetition_penalty = gr.Slider(
minimum=1.0,
maximum=2.0,
step=0.01,
value=1.1,
label="repetition penalty",
info="重复惩罚"
)
aspect_ratios_selection = gr.Radio(
label='Aspect Ratios',
choices=conf.available_aspect_ratios,
value=conf.default_aspect_ratio,
info='width × height',
elem_classes='aspect_ratios'
)
num_inference_steps = gr.Slider(minimum=1, maximum=60, step=1, value=16, label="Image Steps")
with gr.Tab(label="History"):
file_server = f"http://{get_local_ip()}:{args.file_server_port}/"
html_file_path = f"{datetime.datetime.now().strftime('%Y-%m-%d')}.html"
html_fns = [fn for fn in os.listdir(OUTPUT_HTML_DIR) if fn.endswith(".html")]
gr.Markdown(f'<a href="{file_server}{html_file_path}" target="_blank">{html_file_path}</a>')
for fn in html_fns:
if fn == html_file_path:
continue
gr.Markdown(f'<a href="{file_server}{fn}" target="_blank">{fn}</a>')
PROMPT_SYSTEM_DICT = {
"中英文翻译": "你擅长翻译中文到英语。",
"文言文": "你擅长文言文翻译为英语。",
"画家": "你是绘画大师,擅长描绘画面细节。",
"剧情": "你是剧作家,擅长创作连续的漫画脚本。",
"AI助手": "You are a helpful assistant",
}
prompt_system_radio.change(lambda val: (PROMPT_SYSTEM_DICT[val]),
inputs=[prompt_system_radio], outputs=[prompt_system])
temperature.change(lambda val: config.update(temperature=val), inputs=[temperature], outputs=[])
top_k.change(lambda val: config.update(top_k=val), inputs=[top_k], outputs=[])
top_p.change(lambda val: config.update(top_p=val), inputs=[top_p], outputs=[])
max_new_tokens.change(
lambda val: config.update(max_new_tokens=val),
inputs=[max_new_tokens],
outputs=[],
)
repetition_penalty.change(
lambda val: config.update(repetition_penalty=val),
inputs=[repetition_penalty],
outputs=[],
)
talk_btn.click(predict, [query, chatbot, task_history, prompt_system], [chatbot],
show_progress=True)
submit_btn.click(predict, [query, chatbot, task_history, prompt_system, prompt_template], [chatbot],
show_progress=True)
submit_btn.click(reset_user_input, [], [query])
empty_btn.click(reset_state, [chatbot, task_history], outputs=[chatbot], show_progress=True)
image_btn.click(draw_image, [chatbot, task_history, aspect_ratios_selection, num_inference_steps],
outputs=[image],
show_progress=True)
regen_btn.click(regenerate, [chatbot, task_history, prompt_system], [chatbot], show_progress=True)
demo.queue().launch(
share=args.share,
inbrowser=args.inbrowser,
server_port=args.server_port,
server_name=args.server_name,
)
def main():
args = _get_args()
| # Copyright (c) Alibaba Cloud.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""A simple web interactive chat demo based on gradio."""
DEFAULT_CKPT_PATH = 'hahahafofo/Qwen-1_8B-Stable-Diffusion-Prompt'
DEFAULT_SDXL_PATH = "Lykon/dreamshaper-xl-turbo" # "stabilityai/sdxl-turbo"
OUTPUT_IMAGES_DIR = "output_images"
OUTPUT_HTML_DIR = "output_html"
def _get_args():
parser = ArgumentParser()
parser.add_argument("-c", "--checkpoint-path", type=str, default=DEFAULT_CKPT_PATH,
help="Checkpoint name or path, default to %(default)r")
parser.add_argument("-x", "--sdxl-path", type=str, default=DEFAULT_SDXL_PATH,
help="SDXL Checkpoint name or path, default to %(default)r")
parser.add_argument("--cpu-only", action="store_true", help="Run demo with CPU only")
parser.add_argument("--qwen-only", action="store_true", help="Run demo with qwen only")
parser.add_argument("--share", action="store_true", default=False,
help="Create a publicly shareable link for the interface.")
parser.add_argument("--inbrowser", action="store_true", default=False,
help="Automatically launch the interface in a new tab on the default browser.")
parser.add_argument("--server-port", type=int, default=8000,
help="Demo server port.")
parser.add_argument("--server-name", type=str, default="0.0.0.0",
help="Demo server name.")
parser.add_argument("--file-server-port", type=int, default=8001,
help="file server port.")
args = parser.parse_args()
return args
def _load_model_tokenizer(args):
tokenizer = AutoTokenizer.from_pretrained(
args.checkpoint_path, trust_remote_code=True, resume_download=True,
)
if args.cpu_only:
device_map = "cpu"
else:
device_map = "auto"
model = AutoModelForCausalLM.from_pretrained(
args.checkpoint_path,
device_map=device_map,
trust_remote_code=True,
resume_download=True,
).eval()
config = GenerationConfig.from_pretrained(
args.checkpoint_path,
trust_remote_code=True,
resume_download=True,
)
config.max_new_tokens = 77
return model, tokenizer, config
def _load_sdxl_turbo(args):
pipe = AutoPipelineForText2Image.from_pretrained(
args.sdxl_path,
torch_dtype=torch.float16,
variant="fp16"
)
pipe.to("cuda")
return pipe
def postprocess(self, y):
if y is None:
return []
for i, (message, response) in enumerate(y):
y[i] = (
None if message is None else mdtex2html.convert(message),
None if response is None else mdtex2html.convert(response),
)
return y
gr.Chatbot.postprocess = postprocess
def _save_image2html(image, query, prompt):
# 将文本信息编码为 JSON 并保存到 EXIF
exif_dict = {"0th": {}, "Exif": {}, "1st": {}, "thumbnail": None, "GPS": {}}
exif_dict["0th"][piexif.ImageIFD.ImageDescription] = json.dumps({"prompt": prompt})
exif_bytes = piexif.dump(exif_dict)
file_name = f"{int(time.time())}.png"
image_path = os.path.join(OUTPUT_IMAGES_DIR, file_name)
image.save(image_path, "PNG", exif=exif_bytes)
# 创建 HTML 内容
# 初始 HTML 结构
html_start = """<!DOCTYPE html><html lang="zh"><head><meta charset="UTF-8">
<title>Image and Prompt History</title></head><body><h1>Image and Prompt History</h1><ul>"""
html_end = "</ul></body></html>"
# 将 HTML 内容写入文件
html_file_path = os.path.join(OUTPUT_HTML_DIR, f"{datetime.datetime.now().strftime('%Y-%m-%d')}.html")
# 创建新的列表项
new_list_item = f"""
<li>
<p>Prompt: {prompt}</p>
<p>Input: {query}</p>
<img src="{image_path}" alt="{image_path}" style="max-width: 100%; height: auto;">
</li>
"""
# 读取现有的 HTML 文件
try:
with open(html_file_path, 'r', encoding='utf-8') as file:
existing_html = file.read()
except FileNotFoundError:
# 如果文件不存在,创建一个新的 HTML 结构
existing_html = html_start + html_end
# 在列表结束标签前插入新的列表项
updated_html = existing_html.replace(html_end, new_list_item + html_end)
# 将更新后的 HTML 写回文件
with open(html_file_path, 'w+', encoding='utf-8') as file:
file.write(updated_html)
return f"HTML content appended to {html_file_path}"
def _parse_text(text):
lines = text.split("\n")
lines = [line for line in lines if line != ""]
count = 0
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split("`")
if count % 2 == 1:
lines[i] = f'<pre><code class="language-{items[-1]}">'
else:
lines[i] = f"<br></code></pre>"
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", r"\`")
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace(" ", " ")
line = line.replace("*", "*")
line = line.replace("_", "_")
line = line.replace("-", "-")
line = line.replace(".", ".")
line = line.replace("!", "!")
line = line.replace("(", "(")
line = line.replace(")", ")")
line = line.replace("$", "$")
lines[i] = "<br>" + line
text = "".join(lines)
return text
def _launch_demo(args, image_pipe, model, tokenizer, config):
def predict(
_query,
_chatbot,
_task_history,
_prompt_system: str = "You are a helpful assistant",
_prompt_template: str = ""
):
print(f"User: {_parse_text(_query)}")
_chatbot.append((_parse_text(_query), ""))
full_response = ""
_query = f"{_prompt_template}\n{_query}"
for response in model.chat_stream(
tokenizer,
_query,
history=_task_history,
generation_config=config,
system=_prompt_system
):
_chatbot[-1] = (_parse_text(_query), _parse_text(response))
yield _chatbot
full_response = _parse_text(response)
print(f"History: {_task_history}")
_task_history.append((_query, full_response))
print(f"Qwen-Chat: {_parse_text(full_response)}")
def draw_image(_chatbot, _task_history, aspect_ratios_selection, num_inference_steps, ):
if len(_task_history) == 0:
return
prompt = _task_history[-1][-1]
if len(prompt) == 0:
return
print(f"===\n{_chatbot} \n\n{_task_history} ====\n")
height = int(aspect_ratios_selection.split("*")[1])
width = int(aspect_ratios_selection.split("*")[0])
print(f"{prompt} {height} * {width} {num_inference_steps}")
image_pil = image_pipe(
prompt=prompt,
num_inference_steps=num_inference_steps,
guidance_scale=0.0,
height=height,
width=width,
).images[0]
_save_image2html(image_pil, query=_chatbot[-1][0], prompt=prompt)
return image_pil
def regenerate(_chatbot, _task_history, _prompt_system):
if not _task_history:
yield _chatbot
return
item = _task_history.pop(-1)
_chatbot.pop(-1)
yield from predict(item[0], _chatbot, _task_history, _prompt_template="", _prompt_system=_prompt_system)
def reset_user_input():
return gr.update(value="")
def reset_state(_chatbot, _task_history):
_task_history.clear()
_chatbot.clear()
gc.collect()
torch.cuda.empty_cache()
return _chatbot
with gr.Blocks() as demo:
task_history = gr.State([])
with gr.Row():
with gr.Column(scale=1, min_width=600):
image = gr.Image(type="pil")
query = gr.Textbox(lines=2, label='Input')
with gr.Row():
empty_btn = gr.Button("🧹 Clear History (清除历史)")
submit_btn = gr.Button("🚀 Submit (生成)")
regen_btn = gr.Button("🤔️ Regenerate (重试)")
image_btn = gr.Button("🎨 Image (生成)")
talk_btn = gr.Button("💬 Talk (聊天)")
with gr.Column(scale=1, min_width=600):
with gr.Tab(label="Qwen"):
temperature = gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.01,
value=0.9,
label="Temperature",
info="越小越遵循输入,越大越充满想象"
)
prompt_system_radio = gr.Radio(
["中英文翻译", "文言文", "画家", "剧情", "AI助手"],
label='角色',
info="根据输入选择合适的角色"
)
with gr.Row():
prompt_system = gr.Textbox(
lines=1,
label='System Template',
value="你擅长翻译中文到英语。"
)
prompt_template = gr.Textbox(
lines=1,
label='Prompt Template',
value="必须使用英语根据主题描述一副画面:"
)
chatbot = gr.Chatbot(label='Qwen-Chat', elem_classes="control-height")
with gr.Tab(label="Config"):
with gr.Row():
top_p = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=1.0, label="Top-p")
top_k = gr.Slider(minimum=0, maximum=100, step=1, value=50, label="Top-k")
max_new_tokens = gr.Slider(minimum=1, maximum=1024, step=1, value=77, label="Max New Tokens")
repetition_penalty = gr.Slider(
minimum=1.0,
maximum=2.0,
step=0.01,
value=1.1,
label="repetition penalty",
info="重复惩罚"
)
aspect_ratios_selection = gr.Radio(
label='Aspect Ratios',
choices=conf.available_aspect_ratios,
value=conf.default_aspect_ratio,
info='width × height',
elem_classes='aspect_ratios'
)
num_inference_steps = gr.Slider(minimum=1, maximum=60, step=1, value=16, label="Image Steps")
with gr.Tab(label="History"):
file_server = f"http://{get_local_ip()}:{args.file_server_port}/"
html_file_path = f"{datetime.datetime.now().strftime('%Y-%m-%d')}.html"
html_fns = [fn for fn in os.listdir(OUTPUT_HTML_DIR) if fn.endswith(".html")]
gr.Markdown(f'<a href="{file_server}{html_file_path}" target="_blank">{html_file_path}</a>')
for fn in html_fns:
if fn == html_file_path:
continue
gr.Markdown(f'<a href="{file_server}{fn}" target="_blank">{fn}</a>')
PROMPT_SYSTEM_DICT = {
"中英文翻译": "你擅长翻译中文到英语。",
"文言文": "你擅长文言文翻译为英语。",
"画家": "你是绘画大师,擅长描绘画面细节。",
"剧情": "你是剧作家,擅长创作连续的漫画脚本。",
"AI助手": "You are a helpful assistant",
}
prompt_system_radio.change(lambda val: (PROMPT_SYSTEM_DICT[val]),
inputs=[prompt_system_radio], outputs=[prompt_system])
temperature.change(lambda val: config.update(temperature=val), inputs=[temperature], outputs=[])
top_k.change(lambda val: config.update(top_k=val), inputs=[top_k], outputs=[])
top_p.change(lambda val: config.update(top_p=val), inputs=[top_p], outputs=[])
max_new_tokens.change(
lambda val: config.update(max_new_tokens=val),
inputs=[max_new_tokens],
outputs=[],
)
repetition_penalty.change(
lambda val: config.update(repetition_penalty=val),
inputs=[repetition_penalty],
outputs=[],
)
talk_btn.click(predict, [query, chatbot, task_history, prompt_system], [chatbot],
show_progress=True)
submit_btn.click(predict, [query, chatbot, task_history, prompt_system, prompt_template], [chatbot],
show_progress=True)
submit_btn.click(reset_user_input, [], [query])
empty_btn.click(reset_state, [chatbot, task_history], outputs=[chatbot], show_progress=True)
image_btn.click(draw_image, [chatbot, task_history, aspect_ratios_selection, num_inference_steps],
outputs=[image],
show_progress=True)
regen_btn.click(regenerate, [chatbot, task_history, prompt_system], [chatbot], show_progress=True)
demo.queue().launch(
share=args.share,
inbrowser=args.inbrowser,
server_port=args.server_port,
server_name=args.server_name,
)
def main():
args = _get_args() | start_server(server_port=args.file_server_port) | 0 | 2023-12-06 06:04:29+00:00 | 4k |
jinxixiang/magic_animate_unofficial | animatediff/magic_animate/controlnet.py | [
{
"identifier": "TimestepEmbedding",
"path": "animatediff/magic_animate/embeddings.py",
"snippet": "class TimestepEmbedding(nn.Module):\n def __init__(\n self,\n in_channels: int,\n time_embed_dim: int,\n act_fn: str = \"silu\",\n out_dim: int = None,\n post_... | from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from torch import nn
from torch.nn import functional as F
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.utils import BaseOutput, logging
from .embeddings import TimestepEmbedding, Timesteps
from diffusers.models.modeling_utils import ModelMixin
from diffusers.models.unet_2d_blocks import (
CrossAttnDownBlock2D,
DownBlock2D,
UNetMidBlock2DCrossAttn,
get_down_block,
)
from diffusers.models.unet_2d_condition import UNet2DConditionModel
import torch | 2,169 |
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class ControlNetOutput(BaseOutput):
down_block_res_samples: Tuple[torch.Tensor]
mid_block_res_sample: torch.Tensor
class ControlNetConditioningEmbedding(nn.Module):
"""
Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
[11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
(activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
model) to encode image-space conditions ... into feature maps ..."
"""
def __init__(
self,
conditioning_embedding_channels: int,
conditioning_channels: int = 3,
block_out_channels: Tuple[int] = (16, 32, 96, 256),
):
super().__init__()
self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
self.blocks = nn.ModuleList([])
for i in range(len(block_out_channels) - 1):
channel_in = block_out_channels[i]
channel_out = block_out_channels[i + 1]
self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
self.conv_out = zero_module(
nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
)
def forward(self, conditioning):
embedding = self.conv_in(conditioning)
embedding = F.silu(embedding)
for block in self.blocks:
embedding = block(embedding)
embedding = F.silu(embedding)
embedding = self.conv_out(embedding)
return embedding
class ControlNetModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
in_channels: int = 4,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
projection_class_embeddings_input_dim: Optional[int] = None,
controlnet_conditioning_channel_order: str = "rgb",
conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),
):
super().__init__()
# Check inputs
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
)
# time
time_embed_dim = block_out_channels[0] * 4
| # *************************************************************************
# This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo-
# difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B-
# ytedance Inc..
# *************************************************************************
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class ControlNetOutput(BaseOutput):
down_block_res_samples: Tuple[torch.Tensor]
mid_block_res_sample: torch.Tensor
class ControlNetConditioningEmbedding(nn.Module):
"""
Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
[11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
(activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
model) to encode image-space conditions ... into feature maps ..."
"""
def __init__(
self,
conditioning_embedding_channels: int,
conditioning_channels: int = 3,
block_out_channels: Tuple[int] = (16, 32, 96, 256),
):
super().__init__()
self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
self.blocks = nn.ModuleList([])
for i in range(len(block_out_channels) - 1):
channel_in = block_out_channels[i]
channel_out = block_out_channels[i + 1]
self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
self.conv_out = zero_module(
nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
)
def forward(self, conditioning):
embedding = self.conv_in(conditioning)
embedding = F.silu(embedding)
for block in self.blocks:
embedding = block(embedding)
embedding = F.silu(embedding)
embedding = self.conv_out(embedding)
return embedding
class ControlNetModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
in_channels: int = 4,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
projection_class_embeddings_input_dim: Optional[int] = None,
controlnet_conditioning_channel_order: str = "rgb",
conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),
):
super().__init__()
# Check inputs
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
)
# time
time_embed_dim = block_out_channels[0] * 4
| self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) | 1 | 2023-12-12 00:16:39+00:00 | 4k |
Chat-3D/Chat-3D-v2 | utils/config_utils.py | [
{
"identifier": "Config",
"path": "utils/config.py",
"snippet": "class Config(object):\n \"\"\"config\"\"\"\n\n @classmethod\n def pretty_text(cls, cfg: dict, indent=2) -> str:\n \"\"\"format dict to a string\n\n Args:\n cfg (EasyDict): the params.\n\n Returns: T... | import logging
import os
import sys
from os.path import dirname, join
from utils.config import Config
from utils.distributed import init_distributed_mode, is_main_process
from utils.logger import setup_logger | 3,272 |
logger = logging.getLogger(__name__)
def setup_config():
"""Conbine yaml config and command line config with OmegaConf.
Also converts types, e.g., `'None'` (str) --> `None` (None)
"""
config = Config.get_config()
if config.debug:
config.wandb.enable = False
return config
def setup_evaluate_config(config):
"""setup evaluation default settings, e.g., disable wandb"""
assert config.evaluate
config.wandb.enable = False
if config.output_dir is None:
config.output_dir = join(dirname(config.pretrained_path), "eval")
return config
def setup_output_dir(output_dir, excludes=["code"]):
"""ensure not overwritting an exisiting/non-empty output dir"""
if not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=False)
else:
existing_dirs_files = os.listdir(output_dir) # list
remaining = set(existing_dirs_files) - set(excludes)
remaining = [e for e in remaining if "slurm" not in e]
remaining = [e for e in remaining if ".out" not in e]
# assert len(remaining) == 0, f"remaining dirs or files: {remaining}"
logger.warn(f"remaining dirs or files: {remaining}")
def setup_main():
"""
Setup config, logger, output_dir, etc.
Shared for pretrain and all downstream tasks.
"""
config = setup_config()
if hasattr(config, "evaluate") and config.evaluate:
config = setup_evaluate_config(config)
|
logger = logging.getLogger(__name__)
def setup_config():
"""Conbine yaml config and command line config with OmegaConf.
Also converts types, e.g., `'None'` (str) --> `None` (None)
"""
config = Config.get_config()
if config.debug:
config.wandb.enable = False
return config
def setup_evaluate_config(config):
"""setup evaluation default settings, e.g., disable wandb"""
assert config.evaluate
config.wandb.enable = False
if config.output_dir is None:
config.output_dir = join(dirname(config.pretrained_path), "eval")
return config
def setup_output_dir(output_dir, excludes=["code"]):
"""ensure not overwritting an exisiting/non-empty output dir"""
if not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=False)
else:
existing_dirs_files = os.listdir(output_dir) # list
remaining = set(existing_dirs_files) - set(excludes)
remaining = [e for e in remaining if "slurm" not in e]
remaining = [e for e in remaining if ".out" not in e]
# assert len(remaining) == 0, f"remaining dirs or files: {remaining}"
logger.warn(f"remaining dirs or files: {remaining}")
def setup_main():
"""
Setup config, logger, output_dir, etc.
Shared for pretrain and all downstream tasks.
"""
config = setup_config()
if hasattr(config, "evaluate") and config.evaluate:
config = setup_evaluate_config(config) | init_distributed_mode(config) | 1 | 2023-12-11 14:39:58+00:00 | 4k |
SqueezeBits/owlite | owlite/options/options_dict.py | [
{
"identifier": "log",
"path": "owlite/logger.py",
"snippet": "class Logger(logging.Logger):\n class _WarningFilterContext:\n class WarningFilter(logging.Filter):\n ENV_VAR = \"OWLITE_LOG_LEVEL\"\n DEBUG_WARNING = 15\n ULTRA_VERBOSE = -10\n def ignore_warnings(self):\n ... | from dataclasses import fields, is_dataclass
from types import NoneType
from typing import Any, Union, get_args, get_origin
from yacs.config import CfgNode
from ..logger import log
from .generic_type_checking import generic_isinstance
from .load import load_json_or_yaml
from .options_mixin import OptionsMixin | 3,361 | """Options required for configuring torch.fx.GraphModule"""
class OptionsDict(dict, OptionsMixin):
"""A simple extension of python `dict` to hold Options as values"""
ValueType: type
def __init__(self, d: Union[CfgNode, dict, str, NoneType] = None):
# Required for checking if ValueType is valid
_ = type(self).value_types()
super(dict, self).__init__()
if d is None:
return
for k, v in type(self).load(d).items():
self[k] = v
def update(self, d: dict):
for key, value in d.items():
self[key] = value
@classmethod
def value_types(cls) -> tuple[type[OptionsMixin]]:
"""Allowed value types of this class in tuple"""
if hasattr(cls, "_value_types"):
# prevent duplicate type-checking
return cls._value_types
if not hasattr(cls, "ValueType"):
log.error(
"A subclass of OptionsDict requires a static type (or type union) `ValueType` "
"indicating the possible value types of the subclass"
)
raise AttributeError(f"ValueType for {cls} is not defined")
origin = get_origin(cls.ValueType)
full_type_error_message = (
f"The type (union) ValueType of {cls} must be one of the followings:"
"\ni) a subclass of OptionsMixin decorated with dataclass; or"
"\nii) the list, tuple, Optional or Union of type(s) satisfying i); or"
"\niii) a subclass of OptionsDict,\n"
f"but {cls.__name__}.ValueType={cls.ValueType} is given."
)
type_error_message = f"Invalid ValueType {cls.ValueType} defined for {cls}"
if origin in (Union, list, tuple):
args = get_args(cls.ValueType)
if not all((issubclass(c, OptionsMixin) and is_dataclass(c)) or c is NoneType for c in args):
log.error(full_type_error_message)
raise TypeError(type_error_message)
elif origin is None:
if not (
(issubclass(cls.ValueType, OptionsMixin) and is_dataclass(cls.ValueType))
or issubclass(cls.ValueType, OptionsDict)
):
log.error(full_type_error_message)
raise TypeError(type_error_message)
args = (cls.ValueType,)
else:
raise TypeError(f"The type hint origin {origin} is not supported - {cls}.ValueType = {cls.ValueType}")
cls._value_types = args
return args
@classmethod
def load(cls, d: Union[dict, list, str, tuple, NoneType]) -> Any:
options_dict = cls()
value_types = cls.value_types()
origin = get_origin(cls.ValueType)
def load(name: str, data):
if generic_isinstance(data, cls.ValueType):
options_dict[name] = data
return
if origin in (Union, None):
if data is None and NoneType in value_types:
options_dict[name] = None
return
if not isinstance(data, dict):
raise TypeError(f"Expected dict but got {data}")
subnode_key_set = set(data.keys())
for option_type in value_types:
if issubclass(option_type, OptionsDict):
options_dict[name] = option_type.load(data)
break
if subnode_key_set == {field.name for field in fields(option_type)}:
options_dict[name] = option_type.load(data)
break
else:
raise ValueError(
f"Failed to parse config for node {name}: "
f"no matching options class for {data}. ({cls.__name__}.ValueType = {cls.ValueType})"
)
return
if origin in (list, tuple) and isinstance(data, (tuple, list)):
if len(value_types) != 1:
raise TypeError(
"When ValueType of a subclass of OptionsDict is either list or tuple, "
f"its element type must be specified, but {cls.__name__}.ValueType = {cls.ValueType}"
)
option_type = value_types[0]
options_dict[name] = origin(option_type.load(item) for item in data)
return
raise ValueError(f"{cls} cannot load the invalid value {data} at key={name}")
if isinstance(d, str):
| """Options required for configuring torch.fx.GraphModule"""
class OptionsDict(dict, OptionsMixin):
"""A simple extension of python `dict` to hold Options as values"""
ValueType: type
def __init__(self, d: Union[CfgNode, dict, str, NoneType] = None):
# Required for checking if ValueType is valid
_ = type(self).value_types()
super(dict, self).__init__()
if d is None:
return
for k, v in type(self).load(d).items():
self[k] = v
def update(self, d: dict):
for key, value in d.items():
self[key] = value
@classmethod
def value_types(cls) -> tuple[type[OptionsMixin]]:
"""Allowed value types of this class in tuple"""
if hasattr(cls, "_value_types"):
# prevent duplicate type-checking
return cls._value_types
if not hasattr(cls, "ValueType"):
log.error(
"A subclass of OptionsDict requires a static type (or type union) `ValueType` "
"indicating the possible value types of the subclass"
)
raise AttributeError(f"ValueType for {cls} is not defined")
origin = get_origin(cls.ValueType)
full_type_error_message = (
f"The type (union) ValueType of {cls} must be one of the followings:"
"\ni) a subclass of OptionsMixin decorated with dataclass; or"
"\nii) the list, tuple, Optional or Union of type(s) satisfying i); or"
"\niii) a subclass of OptionsDict,\n"
f"but {cls.__name__}.ValueType={cls.ValueType} is given."
)
type_error_message = f"Invalid ValueType {cls.ValueType} defined for {cls}"
if origin in (Union, list, tuple):
args = get_args(cls.ValueType)
if not all((issubclass(c, OptionsMixin) and is_dataclass(c)) or c is NoneType for c in args):
log.error(full_type_error_message)
raise TypeError(type_error_message)
elif origin is None:
if not (
(issubclass(cls.ValueType, OptionsMixin) and is_dataclass(cls.ValueType))
or issubclass(cls.ValueType, OptionsDict)
):
log.error(full_type_error_message)
raise TypeError(type_error_message)
args = (cls.ValueType,)
else:
raise TypeError(f"The type hint origin {origin} is not supported - {cls}.ValueType = {cls.ValueType}")
cls._value_types = args
return args
@classmethod
def load(cls, d: Union[dict, list, str, tuple, NoneType]) -> Any:
options_dict = cls()
value_types = cls.value_types()
origin = get_origin(cls.ValueType)
def load(name: str, data):
if generic_isinstance(data, cls.ValueType):
options_dict[name] = data
return
if origin in (Union, None):
if data is None and NoneType in value_types:
options_dict[name] = None
return
if not isinstance(data, dict):
raise TypeError(f"Expected dict but got {data}")
subnode_key_set = set(data.keys())
for option_type in value_types:
if issubclass(option_type, OptionsDict):
options_dict[name] = option_type.load(data)
break
if subnode_key_set == {field.name for field in fields(option_type)}:
options_dict[name] = option_type.load(data)
break
else:
raise ValueError(
f"Failed to parse config for node {name}: "
f"no matching options class for {data}. ({cls.__name__}.ValueType = {cls.ValueType})"
)
return
if origin in (list, tuple) and isinstance(data, (tuple, list)):
if len(value_types) != 1:
raise TypeError(
"When ValueType of a subclass of OptionsDict is either list or tuple, "
f"its element type must be specified, but {cls.__name__}.ValueType = {cls.ValueType}"
)
option_type = value_types[0]
options_dict[name] = origin(option_type.load(item) for item in data)
return
raise ValueError(f"{cls} cannot load the invalid value {data} at key={name}")
if isinstance(d, str): | d = load_json_or_yaml(d) | 2 | 2023-12-08 06:41:50+00:00 | 4k |
ximinng/PyTorch-SVGRender | pytorch_svgrender/libs/metric/piq/perceptual.py | [
{
"identifier": "_validate_input",
"path": "pytorch_svgrender/libs/metric/piq/utils/common.py",
"snippet": "def _validate_input(\n tensors: List[torch.Tensor],\n dim_range: Tuple[int, int] = (0, -1),\n data_range: Tuple[float, float] = (0., -1.),\n # size_dim_range: Tuple[flo... | from typing import List, Union, Collection
from torch.nn.modules.loss import _Loss
from torchvision.models import vgg16, vgg19, VGG16_Weights, VGG19_Weights
from .utils import _validate_input, _reduce
from .functional import similarity_map, L2Pool2d
import torch
import torch.nn as nn | 3,409 | "pool5": '36',
}
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
# Constant used in feature normalization to avoid zero division
EPS = 1e-10
class ContentLoss(_Loss):
r"""Creates Content loss that can be used for image style transfer or as a measure for image to image tasks.
Uses pretrained VGG models from torchvision.
Expects input to be in range [0, 1] or normalized with ImageNet statistics into range [-1, 1]
Args:
feature_extractor: Model to extract features or model name: ``'vgg16'`` | ``'vgg19'``.
layers: List of strings with layer names. Default: ``'relu3_3'``
weights: List of float weight to balance different layers
replace_pooling: Flag to replace MaxPooling layer with AveragePooling. See references for details.
distance: Method to compute distance between features: ``'mse'`` | ``'mae'``.
reduction: Specifies the reduction type:
``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'``
mean: List of float values used for data standardization. Default: ImageNet mean.
If there is no need to normalize data, use [0., 0., 0.].
std: List of float values used for data standardization. Default: ImageNet std.
If there is no need to normalize data, use [1., 1., 1.].
normalize_features: If true, unit-normalize each feature in channel dimension before scaling
and computing distance. See references for details.
Examples:
>>> loss = ContentLoss()
>>> x = torch.rand(3, 3, 256, 256, requires_grad=True)
>>> y = torch.rand(3, 3, 256, 256)
>>> output = loss(x, y)
>>> output.backward()
References:
Gatys, Leon and Ecker, Alexander and Bethge, Matthias (2016).
A Neural Algorithm of Artistic Style
Association for Research in Vision and Ophthalmology (ARVO)
https://arxiv.org/abs/1508.06576
Zhang, Richard and Isola, Phillip and Efros, et al. (2018)
The Unreasonable Effectiveness of Deep Features as a Perceptual Metric
IEEE/CVF Conference on Computer Vision and Pattern Recognition
https://arxiv.org/abs/1801.03924
"""
def __init__(self, feature_extractor: Union[str, torch.nn.Module] = "vgg16", layers: Collection[str] = ("relu3_3",),
weights: List[Union[float, torch.Tensor]] = [1.], replace_pooling: bool = False,
distance: str = "mse", reduction: str = "mean", mean: List[float] = IMAGENET_MEAN,
std: List[float] = IMAGENET_STD, normalize_features: bool = False,
allow_layers_weights_mismatch: bool = False) -> None:
assert allow_layers_weights_mismatch or len(layers) == len(weights), \
f'Lengths of provided layers and weighs mismatch ({len(weights)} weights and {len(layers)} layers), ' \
f'which will cause incorrect results. Please provide weight for each layer.'
super().__init__()
if callable(feature_extractor):
self.model = feature_extractor
self.layers = layers
else:
if feature_extractor == "vgg16":
# self.model = vgg16(pretrained=True, progress=False).features
self.model = vgg16(weights=VGG16_Weights.DEFAULT, progress=False).features
self.layers = [VGG16_LAYERS[l] for l in layers]
elif feature_extractor == "vgg19":
# self.model = vgg19(pretrained=True, progress=False).features
self.model = vgg19(weights=VGG19_Weights.DEFAULT, progress=False).features
self.layers = [VGG19_LAYERS[l] for l in layers]
else:
raise ValueError("Unknown feature extractor")
if replace_pooling:
self.model = self.replace_pooling(self.model)
# Disable gradients
for param in self.model.parameters():
param.requires_grad_(False)
self.distance = {
"mse": nn.MSELoss,
"mae": nn.L1Loss,
}[distance](reduction='none')
self.weights = [torch.tensor(w) if not isinstance(w, torch.Tensor) else w for w in weights]
mean = torch.tensor(mean)
std = torch.tensor(std)
self.mean = mean.view(1, -1, 1, 1)
self.std = std.view(1, -1, 1, 1)
self.normalize_features = normalize_features
self.reduction = reduction
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
r"""Computation of Content loss between feature representations of prediction :math:`x` and
target :math:`y` tensors.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
Returns:
Content loss between feature representations
"""
_validate_input([x, y], dim_range=(4, 4), data_range=(0, -1))
self.model.to(x)
x_features = self.get_features(x)
y_features = self.get_features(y)
distances = self.compute_distance(x_features, y_features)
# Scale distances, then average in spatial dimensions, then stack and sum in channels dimension
loss = torch.cat([(d * w.to(d)).mean(dim=[2, 3]) for d, w in zip(distances, self.weights)], dim=1).sum(dim=1)
| """
Implementation of Content loss, Style loss, LPIPS and DISTS metrics
References:
.. [1] Gatys, Leon and Ecker, Alexander and Bethge, Matthias
(2016). A Neural Algorithm of Artistic Style}
Association for Research in Vision and Ophthalmology (ARVO)
https://arxiv.org/abs/1508.06576
.. [2] Zhang, Richard and Isola, Phillip and Efros, et al.
(2018) The Unreasonable Effectiveness of Deep Features as a Perceptual Metric
2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition
https://arxiv.org/abs/1801.03924
"""
# Map VGG names to corresponding number in torchvision layer
VGG16_LAYERS = {
"conv1_1": '0', "relu1_1": '1',
"conv1_2": '2', "relu1_2": '3',
"pool1": '4',
"conv2_1": '5', "relu2_1": '6',
"conv2_2": '7', "relu2_2": '8',
"pool2": '9',
"conv3_1": '10', "relu3_1": '11',
"conv3_2": '12', "relu3_2": '13',
"conv3_3": '14', "relu3_3": '15',
"pool3": '16',
"conv4_1": '17', "relu4_1": '18',
"conv4_2": '19', "relu4_2": '20',
"conv4_3": '21', "relu4_3": '22',
"pool4": '23',
"conv5_1": '24', "relu5_1": '25',
"conv5_2": '26', "relu5_2": '27',
"conv5_3": '28', "relu5_3": '29',
"pool5": '30',
}
VGG19_LAYERS = {
"conv1_1": '0', "relu1_1": '1',
"conv1_2": '2', "relu1_2": '3',
"pool1": '4',
"conv2_1": '5', "relu2_1": '6',
"conv2_2": '7', "relu2_2": '8',
"pool2": '9',
"conv3_1": '10', "relu3_1": '11',
"conv3_2": '12', "relu3_2": '13',
"conv3_3": '14', "relu3_3": '15',
"conv3_4": '16', "relu3_4": '17',
"pool3": '18',
"conv4_1": '19', "relu4_1": '20',
"conv4_2": '21', "relu4_2": '22',
"conv4_3": '23', "relu4_3": '24',
"conv4_4": '25', "relu4_4": '26',
"pool4": '27',
"conv5_1": '28', "relu5_1": '29',
"conv5_2": '30', "relu5_2": '31',
"conv5_3": '32', "relu5_3": '33',
"conv5_4": '34', "relu5_4": '35',
"pool5": '36',
}
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
# Constant used in feature normalization to avoid zero division
EPS = 1e-10
class ContentLoss(_Loss):
r"""Creates Content loss that can be used for image style transfer or as a measure for image to image tasks.
Uses pretrained VGG models from torchvision.
Expects input to be in range [0, 1] or normalized with ImageNet statistics into range [-1, 1]
Args:
feature_extractor: Model to extract features or model name: ``'vgg16'`` | ``'vgg19'``.
layers: List of strings with layer names. Default: ``'relu3_3'``
weights: List of float weight to balance different layers
replace_pooling: Flag to replace MaxPooling layer with AveragePooling. See references for details.
distance: Method to compute distance between features: ``'mse'`` | ``'mae'``.
reduction: Specifies the reduction type:
``'none'`` | ``'mean'`` | ``'sum'``. Default:``'mean'``
mean: List of float values used for data standardization. Default: ImageNet mean.
If there is no need to normalize data, use [0., 0., 0.].
std: List of float values used for data standardization. Default: ImageNet std.
If there is no need to normalize data, use [1., 1., 1.].
normalize_features: If true, unit-normalize each feature in channel dimension before scaling
and computing distance. See references for details.
Examples:
>>> loss = ContentLoss()
>>> x = torch.rand(3, 3, 256, 256, requires_grad=True)
>>> y = torch.rand(3, 3, 256, 256)
>>> output = loss(x, y)
>>> output.backward()
References:
Gatys, Leon and Ecker, Alexander and Bethge, Matthias (2016).
A Neural Algorithm of Artistic Style
Association for Research in Vision and Ophthalmology (ARVO)
https://arxiv.org/abs/1508.06576
Zhang, Richard and Isola, Phillip and Efros, et al. (2018)
The Unreasonable Effectiveness of Deep Features as a Perceptual Metric
IEEE/CVF Conference on Computer Vision and Pattern Recognition
https://arxiv.org/abs/1801.03924
"""
def __init__(self, feature_extractor: Union[str, torch.nn.Module] = "vgg16", layers: Collection[str] = ("relu3_3",),
weights: List[Union[float, torch.Tensor]] = [1.], replace_pooling: bool = False,
distance: str = "mse", reduction: str = "mean", mean: List[float] = IMAGENET_MEAN,
std: List[float] = IMAGENET_STD, normalize_features: bool = False,
allow_layers_weights_mismatch: bool = False) -> None:
assert allow_layers_weights_mismatch or len(layers) == len(weights), \
f'Lengths of provided layers and weighs mismatch ({len(weights)} weights and {len(layers)} layers), ' \
f'which will cause incorrect results. Please provide weight for each layer.'
super().__init__()
if callable(feature_extractor):
self.model = feature_extractor
self.layers = layers
else:
if feature_extractor == "vgg16":
# self.model = vgg16(pretrained=True, progress=False).features
self.model = vgg16(weights=VGG16_Weights.DEFAULT, progress=False).features
self.layers = [VGG16_LAYERS[l] for l in layers]
elif feature_extractor == "vgg19":
# self.model = vgg19(pretrained=True, progress=False).features
self.model = vgg19(weights=VGG19_Weights.DEFAULT, progress=False).features
self.layers = [VGG19_LAYERS[l] for l in layers]
else:
raise ValueError("Unknown feature extractor")
if replace_pooling:
self.model = self.replace_pooling(self.model)
# Disable gradients
for param in self.model.parameters():
param.requires_grad_(False)
self.distance = {
"mse": nn.MSELoss,
"mae": nn.L1Loss,
}[distance](reduction='none')
self.weights = [torch.tensor(w) if not isinstance(w, torch.Tensor) else w for w in weights]
mean = torch.tensor(mean)
std = torch.tensor(std)
self.mean = mean.view(1, -1, 1, 1)
self.std = std.view(1, -1, 1, 1)
self.normalize_features = normalize_features
self.reduction = reduction
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
r"""Computation of Content loss between feature representations of prediction :math:`x` and
target :math:`y` tensors.
Args:
x: An input tensor. Shape :math:`(N, C, H, W)`.
y: A target tensor. Shape :math:`(N, C, H, W)`.
Returns:
Content loss between feature representations
"""
_validate_input([x, y], dim_range=(4, 4), data_range=(0, -1))
self.model.to(x)
x_features = self.get_features(x)
y_features = self.get_features(y)
distances = self.compute_distance(x_features, y_features)
# Scale distances, then average in spatial dimensions, then stack and sum in channels dimension
loss = torch.cat([(d * w.to(d)).mean(dim=[2, 3]) for d, w in zip(distances, self.weights)], dim=1).sum(dim=1)
| return _reduce(loss, self.reduction) | 1 | 2023-12-13 08:18:01+00:00 | 4k |
lyhisme/DeST | libs/models/DeST_linearformer.py | [
{
"identifier": "SingleStageTCN",
"path": "libs/models/tcn.py",
"snippet": "class SingleStageTCN(nn.Module):\n def __init__(\n self,\n in_channel: int,\n n_features: int,\n n_classes: int,\n n_layers: int,\n **kwargs: Any\n ) -> None:\n super().__in... | from typing import Any, Optional, Tuple
from .tcn import SingleStageTCN
from .SP import MultiScale_GraphConv
import torch
import torch.nn as nn
import copy
import math | 2,681 |
out = self.conv_out(out)
out = self.dropout(out)
return (x + out) * mask
class SFI(nn.Module):
def __init__(self, in_channel, n_features):
super().__init__()
self.conv_s = nn.Conv1d(in_channel, n_features, 1)
self.softmax = nn.Softmax(dim=-1)
self.ff = nn.Sequential(nn.Linear(n_features, n_features),
nn.GELU(),
nn.Dropout(0.3),
nn.Linear(n_features, n_features))
def forward(self, feature_s, feature_t, mask):
feature_s = feature_s.permute(0, 2, 1)
n, c, t = feature_s.shape
feature_s = self.conv_s(feature_s)
map = self.softmax(torch.einsum("nct,ndt->ncd", feature_s, feature_t)/t)
feature_cross = torch.einsum("ncd,ndt->nct", map, feature_t)
feature_cross = feature_cross + feature_t
feature_cross = feature_cross.permute(0, 2, 1)
feature_cross = self.ff(feature_cross).permute(0, 2, 1) + feature_t
return feature_cross * mask
class STI(nn.Module):
def __init__(self, node, in_channel, n_features, out_channel, num_layers, SFI_layer, channel_masking_rate=0.3, alpha=1):
super().__init__()
self.SFI_layer = SFI_layer
num_SFI_layers = len(SFI_layer)
self.channel_masking_rate = channel_masking_rate
self.dropout = nn.Dropout2d(p=channel_masking_rate)
self.conv_in = nn.Conv2d(in_channel, num_SFI_layers+1, kernel_size=1)
self.conv_t = nn.Conv1d(node, n_features, 1)
self.SFI_layers = nn.ModuleList(
[SFI(node, n_features) for i in range(num_SFI_layers)])
self.layers = nn.ModuleList(
[AttModule(2 ** i, n_features, n_features, 'encoder', alpha) for i in
range(num_layers)])
self.conv_out = nn.Conv1d(n_features, out_channel, 1)
def forward(self, x, mask):
if self.channel_masking_rate > 0:
x = self.dropout(x)
count = 0
x = self.conv_in(x)
feature_s, feature_t = torch.split(x, (len(self.SFI_layers), 1), dim=1)
feature_t = feature_t.squeeze(1).permute(0, 2, 1)
feature_st = self.conv_t(feature_t)
for index, layer in enumerate(self.layers):
if index in self.SFI_layer:
feature_st = self.SFI_layers[count](feature_s[:,count,:], feature_st, mask)
count+=1
feature_st = layer(feature_st, None, mask)
feature_st = self.conv_out(feature_st)
return feature_st * mask
class Decoder(nn.Module):
def __init__(self, in_channel, n_features, out_channel, num_layers, alpha=1):
super().__init__()
self.conv_in = nn.Conv1d(in_channel, n_features, 1)
self.layers = nn.ModuleList(
[AttModule(2 ** i, n_features, n_features, 'decoder', alpha) for i in
range(num_layers)])
self.conv_out = nn.Conv1d(n_features, out_channel, 1)
def forward(self, x, fencoder, mask):
feature = self.conv_in(x)
for layer in self.layers:
feature = layer(feature, fencoder, mask)
out = self.conv_out(feature)
return out, feature
class Model(nn.Module):
"""
this model predicts both frame-level classes and boundaries.
Args:
in_channel:
n_feature: 64
n_classes: the number of action classes
n_layers: 10
"""
def __init__(
self,
in_channel: int,
n_features: int,
n_classes: int,
n_stages: int,
n_layers: int,
n_refine_layers: int,
n_stages_asb: Optional[int] = None,
n_stages_brb: Optional[int] = None,
SFI_layer: Optional[int] = None,
dataset: str = None,
**kwargs: Any
) -> None:
if not isinstance(n_stages_asb, int):
n_stages_asb = n_stages
if not isinstance(n_stages_brb, int):
n_stages_brb = n_stages
super().__init__()
self.in_channel = in_channel
node = 19 if dataset == "LARA" else 25
|
def exponential_descrease(idx_decoder, p=3):
return math.exp(-p*idx_decoder)
class Linear_Attention(nn.Module):
def __init__(self,
in_channel,
n_features,
out_channel,
n_heads=4,
drop_out=0.05
):
super().__init__()
self.n_heads = n_heads
self.query_projection = nn.Linear(in_channel, n_features)
self.key_projection = nn.Linear(in_channel, n_features)
self.value_projection = nn.Linear(in_channel, n_features)
self.out_projection = nn.Linear(n_features, out_channel)
self.dropout = nn.Dropout(drop_out)
def elu(self, x):
return torch.sigmoid(x)
# return torch.nn.functional.elu(x) + 1
def forward(self, queries, keys, values, mask):
B, L, _ = queries.shape
_, S, _ = keys.shape
queries = self.query_projection(queries).view(B, L, self.n_heads, -1)
keys = self.key_projection(keys).view(B, S, self.n_heads, -1)
values = self.value_projection(values).view(B, S, self.n_heads, -1)
queries = queries.transpose(1, 2)
keys = keys.transpose(1, 2)
values = values.transpose(1, 2)
queries = self.elu(queries)
keys = self.elu(keys)
KV = torch.einsum('...sd,...se->...de', keys, values)
Z = 1.0 / torch.einsum('...sd,...d->...s',queries, keys.sum(dim=-2)+1e-6)
x = torch.einsum('...de,...sd,...s->...se', KV, queries, Z).transpose(1, 2)
x = x.reshape(B, L, -1)
x = self.out_projection(x)
x = self.dropout(x)
return x * mask[:, 0, :, None]
class AttModule(nn.Module):
def __init__(self, dilation, in_channel, out_channel, stage, alpha):
super(AttModule, self).__init__()
self.stage = stage
self.alpha = alpha
self.feed_forward = nn.Sequential(
nn.Conv1d(in_channel, out_channel, 3, padding=dilation, dilation=dilation),
nn.ReLU()
)
self.instance_norm = nn.InstanceNorm1d(out_channel, track_running_stats=False)
self.att_layer = Linear_Attention(out_channel, out_channel, out_channel)
self.conv_out = nn.Conv1d(out_channel, out_channel, 1)
self.dropout = nn.Dropout()
def forward(self, x, f, mask):
out = self.feed_forward(x)
if self.stage == 'encoder':
q = self.instance_norm(out).permute(0, 2, 1)
out = self.alpha * self.att_layer(q, q, q, mask).permute(0, 2, 1) + out
else:
assert f is not None
q = self.instance_norm(out).permute(0, 2, 1)
f = f.permute(0, 2, 1)
out = self.alpha * self.att_layer(q, q, f, mask).permute(0, 2, 1) + out
out = self.conv_out(out)
out = self.dropout(out)
return (x + out) * mask
class SFI(nn.Module):
def __init__(self, in_channel, n_features):
super().__init__()
self.conv_s = nn.Conv1d(in_channel, n_features, 1)
self.softmax = nn.Softmax(dim=-1)
self.ff = nn.Sequential(nn.Linear(n_features, n_features),
nn.GELU(),
nn.Dropout(0.3),
nn.Linear(n_features, n_features))
def forward(self, feature_s, feature_t, mask):
feature_s = feature_s.permute(0, 2, 1)
n, c, t = feature_s.shape
feature_s = self.conv_s(feature_s)
map = self.softmax(torch.einsum("nct,ndt->ncd", feature_s, feature_t)/t)
feature_cross = torch.einsum("ncd,ndt->nct", map, feature_t)
feature_cross = feature_cross + feature_t
feature_cross = feature_cross.permute(0, 2, 1)
feature_cross = self.ff(feature_cross).permute(0, 2, 1) + feature_t
return feature_cross * mask
class STI(nn.Module):
def __init__(self, node, in_channel, n_features, out_channel, num_layers, SFI_layer, channel_masking_rate=0.3, alpha=1):
super().__init__()
self.SFI_layer = SFI_layer
num_SFI_layers = len(SFI_layer)
self.channel_masking_rate = channel_masking_rate
self.dropout = nn.Dropout2d(p=channel_masking_rate)
self.conv_in = nn.Conv2d(in_channel, num_SFI_layers+1, kernel_size=1)
self.conv_t = nn.Conv1d(node, n_features, 1)
self.SFI_layers = nn.ModuleList(
[SFI(node, n_features) for i in range(num_SFI_layers)])
self.layers = nn.ModuleList(
[AttModule(2 ** i, n_features, n_features, 'encoder', alpha) for i in
range(num_layers)])
self.conv_out = nn.Conv1d(n_features, out_channel, 1)
def forward(self, x, mask):
if self.channel_masking_rate > 0:
x = self.dropout(x)
count = 0
x = self.conv_in(x)
feature_s, feature_t = torch.split(x, (len(self.SFI_layers), 1), dim=1)
feature_t = feature_t.squeeze(1).permute(0, 2, 1)
feature_st = self.conv_t(feature_t)
for index, layer in enumerate(self.layers):
if index in self.SFI_layer:
feature_st = self.SFI_layers[count](feature_s[:,count,:], feature_st, mask)
count+=1
feature_st = layer(feature_st, None, mask)
feature_st = self.conv_out(feature_st)
return feature_st * mask
class Decoder(nn.Module):
def __init__(self, in_channel, n_features, out_channel, num_layers, alpha=1):
super().__init__()
self.conv_in = nn.Conv1d(in_channel, n_features, 1)
self.layers = nn.ModuleList(
[AttModule(2 ** i, n_features, n_features, 'decoder', alpha) for i in
range(num_layers)])
self.conv_out = nn.Conv1d(n_features, out_channel, 1)
def forward(self, x, fencoder, mask):
feature = self.conv_in(x)
for layer in self.layers:
feature = layer(feature, fencoder, mask)
out = self.conv_out(feature)
return out, feature
class Model(nn.Module):
"""
this model predicts both frame-level classes and boundaries.
Args:
in_channel:
n_feature: 64
n_classes: the number of action classes
n_layers: 10
"""
def __init__(
self,
in_channel: int,
n_features: int,
n_classes: int,
n_stages: int,
n_layers: int,
n_refine_layers: int,
n_stages_asb: Optional[int] = None,
n_stages_brb: Optional[int] = None,
SFI_layer: Optional[int] = None,
dataset: str = None,
**kwargs: Any
) -> None:
if not isinstance(n_stages_asb, int):
n_stages_asb = n_stages
if not isinstance(n_stages_brb, int):
n_stages_brb = n_stages
super().__init__()
self.in_channel = in_channel
node = 19 if dataset == "LARA" else 25
| self.SP = MultiScale_GraphConv(13, in_channel, n_features, dataset) | 1 | 2023-12-12 02:27:15+00:00 | 4k |
bolna-ai/bolna | local_setup/demo_server.py | [
{
"identifier": "AssistantManager",
"path": "bolna/agent_manager/assistant_manager.py",
"snippet": "class AssistantManager(BaseManager):\n def __init__(self, agent_config, ws, context_data=None, user_id=None, assistant_id=None,\n connected_through_dashboard=None, cache = None):\n ... | import os
import asyncio
import json
import uuid
import traceback
import redis.asyncio as redis
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, HTTPException
from typing import List
from dotenv import load_dotenv
from bolna.agent_manager import AssistantManager
from bolna.helpers.logger_config import configure_logger
from bolna.models import AssistantModel | 1,634 |
logger = configure_logger(__name__)
load_dotenv()
redis_pool = redis.ConnectionPool.from_url(os.getenv('REDIS_URL'), decode_responses=True)
redis_client = redis.Redis.from_pool(redis_pool)
active_websockets: List[WebSocket] = []
app = FastAPI()
@app.post("/create_agent")
async def create_agent(agent_data: AssistantModel):
agent_uuid = '{}'.format(str(uuid.uuid4()))
redis_task = asyncio.create_task(redis_client.set(agent_uuid, agent_data.json()))
await asyncio.gather(redis_task)
return {"agent_id": "{}".format(agent_uuid), "state": "created"}
@app.websocket("/chat/v1/{user_id}/{agent_id}")
async def websocket_endpoint(agent_id: str, user_id: str, websocket: WebSocket):
logger.info('ws connected with user_id: {} and agent_id: {}'.format(user_id, agent_id))
await websocket.accept()
active_websockets.append(websocket)
agent_config, context_data = None, None
try:
retrieved_agent_config, retrieved_context_data = await redis_client.mget([agent_id, user_id])
agent_config, context_data = json.loads(retrieved_agent_config), json.loads(retrieved_context_data)
except Exception as e:
raise HTTPException(status_code=404, detail="Agent not found")
is_local = True
|
logger = configure_logger(__name__)
load_dotenv()
redis_pool = redis.ConnectionPool.from_url(os.getenv('REDIS_URL'), decode_responses=True)
redis_client = redis.Redis.from_pool(redis_pool)
active_websockets: List[WebSocket] = []
app = FastAPI()
@app.post("/create_agent")
async def create_agent(agent_data: AssistantModel):
agent_uuid = '{}'.format(str(uuid.uuid4()))
redis_task = asyncio.create_task(redis_client.set(agent_uuid, agent_data.json()))
await asyncio.gather(redis_task)
return {"agent_id": "{}".format(agent_uuid), "state": "created"}
@app.websocket("/chat/v1/{user_id}/{agent_id}")
async def websocket_endpoint(agent_id: str, user_id: str, websocket: WebSocket):
logger.info('ws connected with user_id: {} and agent_id: {}'.format(user_id, agent_id))
await websocket.accept()
active_websockets.append(websocket)
agent_config, context_data = None, None
try:
retrieved_agent_config, retrieved_context_data = await redis_client.mget([agent_id, user_id])
agent_config, context_data = json.loads(retrieved_agent_config), json.loads(retrieved_context_data)
except Exception as e:
raise HTTPException(status_code=404, detail="Agent not found")
is_local = True | agent_manager = AssistantManager(agent_config, websocket, context_data, user_id, agent_id) | 0 | 2023-12-13 09:07:35+00:00 | 4k |
relari-ai/continuous-eval | tests/evaluator_test.py | [
{
"identifier": "Dataset",
"path": "continuous_eval/dataset.py",
"snippet": "class Dataset(pd.DataFrame):\n def __init__(self, data=None, index=None, columns=None, copy=False):\n super().__init__(data=data, index=index, columns=columns, copy=copy)\n self.validate()\n\n def iterate(se... | import tempfile
import pandas as pd
import pytest
from continuous_eval.dataset import Dataset
from continuous_eval.evaluators import GenerationEvaluator, RetrievalEvaluator
from continuous_eval.metrics import DeterministicAnswerCorrectness
from tests.helpers.dummy_metric import DummyMetric | 1,797 |
retrieval_dataset = Dataset.from_jsonl("tests/data/retrieval_sm.jsonl")
generation_dataset = Dataset.from_jsonl("tests/data/correctness_sm.jsonl")
def test_retieval_evaluator():
expected_keys = {"precision", "NDCG", "recall"}
|
retrieval_dataset = Dataset.from_jsonl("tests/data/retrieval_sm.jsonl")
generation_dataset = Dataset.from_jsonl("tests/data/correctness_sm.jsonl")
def test_retieval_evaluator():
expected_keys = {"precision", "NDCG", "recall"}
| evaluator = RetrievalEvaluator( | 2 | 2023-12-08 21:30:39+00:00 | 4k |
ryanhe312/STSSNet-AAAI2024 | train.py | [
{
"identifier": "STSSNet",
"path": "model.py",
"snippet": "class STSSNet(nn.Module):\n def __init__(self, in_ch, out_ch, feat_ch, his_ch, skip=True):\n super(STSSNet, self).__init__()\n self.skip = skip\n\n self.convHis1 = nn.Sequential(\n nn.Conv2d(his_ch, 24, kernel_... | import os
import time
import torch
import lpips
import torchvision as tv
import torch.nn.functional as F
import torch.utils.data as data
from torch import optim
from torch.cuda import amp
from visdom import Visdom
from model import STSSNet
from tqdm.auto import tqdm
from dataloaders import *
from utils import metrics | 2,328 |
mdevice=torch.device("cuda:0")
learningrate=1e-4
epoch=100
printevery=50
batch_size=2
class VisdomWriter:
def __init__(self, visdom_port):
self.viz = Visdom(port=visdom_port)
self.names = []
def add_scalar(self, name, val, step):
try:
val = val.item()
except:
val = float(val)
if name not in self.names:
self.names.append(name)
self.viz.line([val], [step], win=name, opts=dict(title=name))
else:
self.viz.line([val], [step], win=name, update='append')
def add_image(self, name, image, step):
self.viz.image(image, win=name, opts=dict(title=name))
def close(self):
return
def colornorm(img):
img = img.clamp(0,1)
return img
def train(dataLoaderIns, modelSavePath, save_dir, reload=None, port=2336):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
vgg_model = lpips.LPIPS(net='vgg').cuda()
|
mdevice=torch.device("cuda:0")
learningrate=1e-4
epoch=100
printevery=50
batch_size=2
class VisdomWriter:
def __init__(self, visdom_port):
self.viz = Visdom(port=visdom_port)
self.names = []
def add_scalar(self, name, val, step):
try:
val = val.item()
except:
val = float(val)
if name not in self.names:
self.names.append(name)
self.viz.line([val], [step], win=name, opts=dict(title=name))
else:
self.viz.line([val], [step], win=name, update='append')
def add_image(self, name, image, step):
self.viz.image(image, win=name, opts=dict(title=name))
def close(self):
return
def colornorm(img):
img = img.clamp(0,1)
return img
def train(dataLoaderIns, modelSavePath, save_dir, reload=None, port=2336):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
vgg_model = lpips.LPIPS(net='vgg').cuda()
| model = STSSNet(6,3,9,4) | 0 | 2023-12-10 02:02:37+00:00 | 4k |
Seunggu0305/VLCounter | tools/test_carpk.py | [
{
"identifier": "save_density_map",
"path": "tools/util.py",
"snippet": "def save_density_map(query_img, pred_D,attn, GT_D,output_dir, fname='results.png', class_chosen=None, pred_cnt=None):\n\n if query_img is not None:\n _,h,w = query_img.shape\n query_img = query_img.cpu().numpy()\n ... | import os
import torch
import torch.nn as nn
import numpy as np
import argparse
import time
import random
import yaml
import scipy.ndimage as ndimage
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import hub
from dotmap import DotMap
from torch.utils.tensorboard import SummaryWriter
from .models.Counter_vit_tc_unet_info import Counter
from .util import save_density_map, save_density_map_carpk, get_model_dir, get_model_dir_carpk
from PIL import Image
from torchvision import transforms
from .tokenizer import tokenize | 2,399 |
# from .models.Counter_vit_af_tc_info_unet_v4 import Counter
def parse_args() -> None:
parser = argparse.ArgumentParser(description='Zero Shot Object Counting')
parser.add_argument('--config', type=str, required=True, help='config file')
parser.add_argument('--gpus', type=lambda s: [int(item) for item in s.split(',')], required=True, help='gpu ids')
parser.add_argument('--enc', type=str, required=True, help='LIT encoder setting')
parser.add_argument('--prompt', type=str, required=True, help='num of prompt')
parser.add_argument('--ckpt_used', type=str, required=True, help='best checkpoint')
parser.add_argument('--exp', type=int, required=True, help='exp')
parsed = parser.parse_args()
assert parsed.config is not None
with open(parsed.config, 'r') as f:
config = yaml.safe_load(f)
args = DotMap(config)
args.config = parsed.config
args.gpus = parsed.gpus
args.enc = parsed.enc
args.prompt = parsed.prompt
args.EVALUATION.ckpt_used = parsed.ckpt_used
args.exp = parsed.exp
if args.enc == 'res101':
args.MODEL.pretrain = '/workspace/YESCUTMIX/pretrain/RN101.pt'
return args
def main(args):
local_rank = args.local_rank
if args.TRAIN.manual_seed is not None:
cudnn.benchmark = False
cudnn.deterministic = True
torch.cuda.manual_seed(args.TRAIN.manual_seed)
np.random.seed(args.TRAIN.manual_seed)
torch.manual_seed(args.TRAIN.manual_seed)
torch.cuda.manual_seed_all(args.TRAIN.manual_seed)
random.seed(args.TRAIN.manual_seed)
model = Counter(args).cuda()
root_model = get_model_dir(args)
if args.EVALUATION.ckpt_used is not None:
filepath = os.path.join(root_model, f'{args.EVALUATION.ckpt_used}.pth')
assert os.path.isfile(filepath), filepath
print("=> loading model weight '{}'".format(filepath),flush=True)
checkpoint = torch.load(filepath)
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded model weight '{}'".format(filepath),flush=True)
else:
print("=> Not loading anything",flush=True)
# test_loader = get_val_loader(args,mode='test')
ds_test = hub.load("hub://activeloop/carpk-test")
#dataloader_train = ds_train.pytorch(num_workers=args.num_workers, batch_size=1, shuffle=False)
test_loader = ds_test.pytorch(num_workers=args.DATA.workers, batch_size=1, shuffle=False)
|
# from .models.Counter_vit_af_tc_info_unet_v4 import Counter
def parse_args() -> None:
parser = argparse.ArgumentParser(description='Zero Shot Object Counting')
parser.add_argument('--config', type=str, required=True, help='config file')
parser.add_argument('--gpus', type=lambda s: [int(item) for item in s.split(',')], required=True, help='gpu ids')
parser.add_argument('--enc', type=str, required=True, help='LIT encoder setting')
parser.add_argument('--prompt', type=str, required=True, help='num of prompt')
parser.add_argument('--ckpt_used', type=str, required=True, help='best checkpoint')
parser.add_argument('--exp', type=int, required=True, help='exp')
parsed = parser.parse_args()
assert parsed.config is not None
with open(parsed.config, 'r') as f:
config = yaml.safe_load(f)
args = DotMap(config)
args.config = parsed.config
args.gpus = parsed.gpus
args.enc = parsed.enc
args.prompt = parsed.prompt
args.EVALUATION.ckpt_used = parsed.ckpt_used
args.exp = parsed.exp
if args.enc == 'res101':
args.MODEL.pretrain = '/workspace/YESCUTMIX/pretrain/RN101.pt'
return args
def main(args):
local_rank = args.local_rank
if args.TRAIN.manual_seed is not None:
cudnn.benchmark = False
cudnn.deterministic = True
torch.cuda.manual_seed(args.TRAIN.manual_seed)
np.random.seed(args.TRAIN.manual_seed)
torch.manual_seed(args.TRAIN.manual_seed)
torch.cuda.manual_seed_all(args.TRAIN.manual_seed)
random.seed(args.TRAIN.manual_seed)
model = Counter(args).cuda()
root_model = get_model_dir(args)
if args.EVALUATION.ckpt_used is not None:
filepath = os.path.join(root_model, f'{args.EVALUATION.ckpt_used}.pth')
assert os.path.isfile(filepath), filepath
print("=> loading model weight '{}'".format(filepath),flush=True)
checkpoint = torch.load(filepath)
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded model weight '{}'".format(filepath),flush=True)
else:
print("=> Not loading anything",flush=True)
# test_loader = get_val_loader(args,mode='test')
ds_test = hub.load("hub://activeloop/carpk-test")
#dataloader_train = ds_train.pytorch(num_workers=args.num_workers, batch_size=1, shuffle=False)
test_loader = ds_test.pytorch(num_workers=args.DATA.workers, batch_size=1, shuffle=False)
| root_model = get_model_dir_carpk(args) | 3 | 2023-12-13 08:00:28+00:00 | 4k |
qitan/devops-backend-lite | dbapp/model/model_workflow.py | [
{
"identifier": "Environment",
"path": "dbapp/model/model_cmdb.py",
"snippet": "class Environment(TimeAbstract):\n \"\"\"环境\"\"\"\n name = models.CharField(max_length=100, unique=True, verbose_name='环境')\n alias = models.CharField(max_length=128, default='', verbose_name='环境别名')\n ticket_on ... | from datetime import datetime
from django.db import models
from dbapp.model.model_cmdb import Environment
from common.extends.models import CreateTimeAbstract, CommonParent
from dbapp.model.model_ucenter import UserProfile
from dbapp.models import TimeAbstract
from markdown import Markdown
import shortuuid | 2,231 | """
@Author : Ken Chen
@Contact : 316084217@qq.com
@Time : 2021/11/2 上午9:50
"""
class WorkflowCategory(models.Model):
"""
工单模板分组
"""
name = models.CharField(max_length=80, unique=True, verbose_name='分类名')
desc = models.TextField(verbose_name='描述', null=True, blank=True)
sort = models.IntegerField(default=999, verbose_name='排序')
def __str__(self):
return self.name
class Meta:
db_table = 'workflow_workflowcategory'
ordering = ['sort']
class WorkflowTemplateAbstract(TimeAbstract):
"""
工单模板 抽象类
"""
category = models.ForeignKey(
WorkflowCategory, null=True, verbose_name='所属分类', on_delete=models.SET_NULL)
name = models.CharField(max_length=100, unique=True, verbose_name='工单模板名')
products = models.JSONField(
default=list, verbose_name='关联产品', help_text='存储产品ID')
projects = models.JSONField(default=list, verbose_name='关联项目',
help_text='产品项目ID数组, eg: [[product_id, project_id]]')
environment = models.ForeignKey(
Environment, on_delete=models.SET_NULL, null=True, blank=True, verbose_name='关联环境')
enabled = models.BooleanField(default=True, verbose_name='是否启用')
nodes = models.JSONField(verbose_name='节点配置')
revision = models.IntegerField(
default=0, verbose_name='版本号') # 模板每次变更, 更新版本号加 1
comment = models.CharField(
max_length=100, null=True, blank=True, verbose_name='模板备注')
sort = models.IntegerField(default=999, verbose_name='排序')
@property
def node_list(self):
return [i['name'] for i in self.nodes]
def get_node_conf(self, node_name):
node_index = self.node_list.index(node_name)
return self.nodes[node_index]
class Meta:
abstract = True
ordering = ['sort']
def __str__(self):
return self.name
class WorkflowTemplate(WorkflowTemplateAbstract):
"""
工单模板
"""
class Meta:
db_table = 'workflow_workflowtemplate'
class WorkflowTemplateRevisionHistory(WorkflowTemplateAbstract):
"""
工单模板版本历史保存
创建工单的时候检查当前模板版本号是否在本模型中存在
如果不存在, 从 TicketTemplate 复制一份到这边。
"""
name = models.CharField(max_length=100, verbose_name='工单模板名')
class Meta:
db_table = 'workflow_workflowtemplaterevisionhistory'
class Workflow(TimeAbstract):
"""
工单
"""
class STATUS:
close = '已关闭'
revoke = '已撤回'
reject = '被驳回'
wait = '待处理'
complete = '已完成'
failed = '执行失败'
choices = (
(close, close),
(revoke, revoke),
(reject, reject),
(wait, wait),
(complete, complete),
(failed, failed)
)
wid = models.CharField(max_length=40, null=True, blank=True, unique=True, verbose_name='工单号',
help_text='前端不需要传值')
topic = models.CharField(max_length=200, verbose_name='工单标题')
node = models.CharField(max_length=50, verbose_name='当前节点名')
status = models.CharField(
max_length=30, choices=STATUS.choices, verbose_name='工单状态')
creator = models.ForeignKey(
| """
@Author : Ken Chen
@Contact : 316084217@qq.com
@Time : 2021/11/2 上午9:50
"""
class WorkflowCategory(models.Model):
"""
工单模板分组
"""
name = models.CharField(max_length=80, unique=True, verbose_name='分类名')
desc = models.TextField(verbose_name='描述', null=True, blank=True)
sort = models.IntegerField(default=999, verbose_name='排序')
def __str__(self):
return self.name
class Meta:
db_table = 'workflow_workflowcategory'
ordering = ['sort']
class WorkflowTemplateAbstract(TimeAbstract):
"""
工单模板 抽象类
"""
category = models.ForeignKey(
WorkflowCategory, null=True, verbose_name='所属分类', on_delete=models.SET_NULL)
name = models.CharField(max_length=100, unique=True, verbose_name='工单模板名')
products = models.JSONField(
default=list, verbose_name='关联产品', help_text='存储产品ID')
projects = models.JSONField(default=list, verbose_name='关联项目',
help_text='产品项目ID数组, eg: [[product_id, project_id]]')
environment = models.ForeignKey(
Environment, on_delete=models.SET_NULL, null=True, blank=True, verbose_name='关联环境')
enabled = models.BooleanField(default=True, verbose_name='是否启用')
nodes = models.JSONField(verbose_name='节点配置')
revision = models.IntegerField(
default=0, verbose_name='版本号') # 模板每次变更, 更新版本号加 1
comment = models.CharField(
max_length=100, null=True, blank=True, verbose_name='模板备注')
sort = models.IntegerField(default=999, verbose_name='排序')
@property
def node_list(self):
return [i['name'] for i in self.nodes]
def get_node_conf(self, node_name):
node_index = self.node_list.index(node_name)
return self.nodes[node_index]
class Meta:
abstract = True
ordering = ['sort']
def __str__(self):
return self.name
class WorkflowTemplate(WorkflowTemplateAbstract):
"""
工单模板
"""
class Meta:
db_table = 'workflow_workflowtemplate'
class WorkflowTemplateRevisionHistory(WorkflowTemplateAbstract):
"""
工单模板版本历史保存
创建工单的时候检查当前模板版本号是否在本模型中存在
如果不存在, 从 TicketTemplate 复制一份到这边。
"""
name = models.CharField(max_length=100, verbose_name='工单模板名')
class Meta:
db_table = 'workflow_workflowtemplaterevisionhistory'
class Workflow(TimeAbstract):
"""
工单
"""
class STATUS:
close = '已关闭'
revoke = '已撤回'
reject = '被驳回'
wait = '待处理'
complete = '已完成'
failed = '执行失败'
choices = (
(close, close),
(revoke, revoke),
(reject, reject),
(wait, wait),
(complete, complete),
(failed, failed)
)
wid = models.CharField(max_length=40, null=True, blank=True, unique=True, verbose_name='工单号',
help_text='前端不需要传值')
topic = models.CharField(max_length=200, verbose_name='工单标题')
node = models.CharField(max_length=50, verbose_name='当前节点名')
status = models.CharField(
max_length=30, choices=STATUS.choices, verbose_name='工单状态')
creator = models.ForeignKey( | UserProfile, null=True, on_delete=models.SET_NULL, verbose_name='发起人') | 3 | 2023-12-13 03:09:32+00:00 | 4k |
timo-reymann/python-oauth2-cli-auth | oauth2_cli_auth/http_server.py | [
{
"identifier": "_method_with_timeout",
"path": "oauth2_cli_auth/_timeout.py",
"snippet": "def _method_with_timeout(your_method, timeout_seconds=5, *args, **kwargs):\n signal.signal(signal.SIGALRM, _timeout_handler)\n signal.alarm(timeout_seconds)\n\n try:\n result = your_method(*args, *... | from http.server import BaseHTTPRequestHandler, HTTPServer
from string import Template
from typing import Optional
from urllib.parse import parse_qs, urlparse
from oauth2_cli_auth._timeout import _method_with_timeout, TimeoutException | 1,801 | opacity: 0
}
100% {
opacity: 100
}
}
.icon svg {
padding: 1rem;
}
.icon svg polyline {
-webkit-animation: checkmark 0.25s ease-in-out 0.7s backwards;
animation: checkmark 0.25s ease-in-out 0.7s backwards
}
.icon svg circle {
-webkit-animation: checkmark-circle 0.6s ease-in-out backwards;
animation: checkmark-circle 0.6s ease-in-out backwards;
}
.icon svg circle#colored {
-webkit-animation: colored-circle 0.6s ease-in-out 0.7s backwards;
animation: colored-circle 0.6s ease-in-out 0.7s backwards;
}
</style>
</head>
<body>
<div class="message">
<div class="animation-ctn">
<div class="icon">
$svg
</div>
</div>
<h1>$title</h1>
<p>$message</p>
</div>
</body>
</html>
""")
def render(self, title: str, message: str, lang: str = "en", has_error: bool = False):
return self.PAGE_TEMPLATE.substitute(
lang=lang,
title=title,
message=message,
svg=self.ERROR_SVG if has_error else self.SUCCESS_SVG,
)
class OAuthRedirectHandler(BaseHTTPRequestHandler):
callback_template = CallbackPageTemplate()
def log_message(self, format, *args):
# silence the log messages
pass
def do_GET(self):
params = parse_qs(urlparse(self.path).query)
has_error = "code" not in params or len(params['code']) != 1 or params['code'][0].strip() == ""
if has_error:
self.send_response(400)
title = "Oh snap!"
message = "Something went wrong trying to authenticate you. Please try going back in your browser, or restart the auth process."
else:
self.send_response(200)
self.server._code = params["code"][0]
title = "Success"
message = "You have been authenticated successfully. You may close this browser window now and go back to the terminal"
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(
self.callback_template
.render(
lang="en",
title=title,
message=message,
has_error=has_error
)
.encode("utf-8")
)
class OAuthCallbackHttpServer(HTTPServer):
"""
Simplistic HTTP Server to provide local callback URL for oauth2 provider
"""
def __init__(self, port):
super().__init__(("", port), OAuthRedirectHandler)
self._code = None
def get_code(self):
return self._code
@property
def callback_url(self):
return f"http://localhost:{self.server_port}"
def wait_for_code(self, attempts: int = 3, timeout_per_attempt=10) -> Optional[int]:
"""
Wait for the server to open the callback page containing the code query parameter.
It tries for #attempts with a timeout of #timeout_per_attempts for each attempt.
This prevents the CLI from getting stuck by unsolved callback URls
:param attempts: Amount of attempts
:param timeout_per_attempt: Timeout for each attempt to be successful
:return: Code from callback page or None if the callback page is not called successfully
"""
for i in range(0, attempts):
try:
_method_with_timeout(self.handle_request, timeout_seconds=timeout_per_attempt)
|
class CallbackPageTemplate:
SUCCESS_SVG = """
<svg xmlns="http://www.w3.org/2000/svg" width="154px" height="154px">
<g fill="none" stroke="#22AE73" stroke-width="2">
<circle cx="77" cy="77" r="72" style="stroke-dasharray:480px, 480px; stroke-dashoffset: 960px;"></circle>
<circle id="colored" fill="#22AE73" cx="77" cy="77" r="72" style="stroke-dasharray:480px, 480px; stroke-dashoffset: 960px;"></circle>
<polyline class="st0" stroke="#fff" stroke-width="10" points="43.5,77.8 63.7,97.9 112.2,49.4 " style="stroke-dasharray:100px, 100px; stroke-dashoffset: 200px;"/>
</g>
</svg>
"""
ERROR_SVG = """
<svg xmlns="http://www.w3.org/2000/svg" width="154px" height="154px">
<g fill="none" stroke="#F44812" stroke-width="2">
<circle cx="77" cy="77" r="72" style="stroke-dasharray:480px, 480px; stroke-dashoffset: 960px;"></circle>
<circle id="colored" fill="#F44812" cx="77" cy="77" r="72" style="stroke-dasharray:480px, 480px; stroke-dashoffset: 960px;"></circle>
<polyline class="st0" stroke="#fff" stroke-width="10" points="43.5,77.8 112.2,77.8 " style="stroke-dasharray:100px, 100px; stroke-dashoffset: 200px;"/>
</g>
</svg>
"""
PAGE_TEMPLATE = Template("""
<html lang="$lang">
<head>
<title>$title</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
<meta name="charset" content="utf-8">
<style>
* {
margin: 0;
padding: 0;
}
body {
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol";
}
@media (prefers-color-scheme: dark) {
body {
background: rgb(34, 39, 46);
color: rgb(173, 186, 199);
}
}
html, body {
display: flex;
align-items: center;
justify-content: center;
height: 100%;
}
h1 {
font-size: 4rem;
}
p {
font-size: 1.4rem;
max-width: 70ch;
}
.message {
text-align: center;
}
.animation-ctn {
text-align: center;
}
@keyframes checkmark {
0% {
stroke-dashoffset: 100px
}
100% {
stroke-dashoffset: 0px
}
}
@keyframes checkmark-circle {
0% {
stroke-dashoffset: 480px
}
100% {
stroke-dashoffset: 960px
}
}
@keyframes colored-circle {
0% {
opacity: 0
}
100% {
opacity: 100
}
}
.icon svg {
padding: 1rem;
}
.icon svg polyline {
-webkit-animation: checkmark 0.25s ease-in-out 0.7s backwards;
animation: checkmark 0.25s ease-in-out 0.7s backwards
}
.icon svg circle {
-webkit-animation: checkmark-circle 0.6s ease-in-out backwards;
animation: checkmark-circle 0.6s ease-in-out backwards;
}
.icon svg circle#colored {
-webkit-animation: colored-circle 0.6s ease-in-out 0.7s backwards;
animation: colored-circle 0.6s ease-in-out 0.7s backwards;
}
</style>
</head>
<body>
<div class="message">
<div class="animation-ctn">
<div class="icon">
$svg
</div>
</div>
<h1>$title</h1>
<p>$message</p>
</div>
</body>
</html>
""")
def render(self, title: str, message: str, lang: str = "en", has_error: bool = False):
return self.PAGE_TEMPLATE.substitute(
lang=lang,
title=title,
message=message,
svg=self.ERROR_SVG if has_error else self.SUCCESS_SVG,
)
class OAuthRedirectHandler(BaseHTTPRequestHandler):
callback_template = CallbackPageTemplate()
def log_message(self, format, *args):
# silence the log messages
pass
def do_GET(self):
params = parse_qs(urlparse(self.path).query)
has_error = "code" not in params or len(params['code']) != 1 or params['code'][0].strip() == ""
if has_error:
self.send_response(400)
title = "Oh snap!"
message = "Something went wrong trying to authenticate you. Please try going back in your browser, or restart the auth process."
else:
self.send_response(200)
self.server._code = params["code"][0]
title = "Success"
message = "You have been authenticated successfully. You may close this browser window now and go back to the terminal"
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(
self.callback_template
.render(
lang="en",
title=title,
message=message,
has_error=has_error
)
.encode("utf-8")
)
class OAuthCallbackHttpServer(HTTPServer):
"""
Simplistic HTTP Server to provide local callback URL for oauth2 provider
"""
def __init__(self, port):
super().__init__(("", port), OAuthRedirectHandler)
self._code = None
def get_code(self):
return self._code
@property
def callback_url(self):
return f"http://localhost:{self.server_port}"
def wait_for_code(self, attempts: int = 3, timeout_per_attempt=10) -> Optional[int]:
"""
Wait for the server to open the callback page containing the code query parameter.
It tries for #attempts with a timeout of #timeout_per_attempts for each attempt.
This prevents the CLI from getting stuck by unsolved callback URls
:param attempts: Amount of attempts
:param timeout_per_attempt: Timeout for each attempt to be successful
:return: Code from callback page or None if the callback page is not called successfully
"""
for i in range(0, attempts):
try:
_method_with_timeout(self.handle_request, timeout_seconds=timeout_per_attempt) | except TimeoutException: | 1 | 2023-12-09 12:14:33+00:00 | 4k |
Chris10M/Ev2Hands | src/Ev2Hands/model/TEHNet.py | [
{
"identifier": "PointNetSetAbstractionMsg",
"path": "src/Ev2Hands/model/pointnet2_utils.py",
"snippet": "class PointNetSetAbstractionMsg(nn.Module):\n def __init__(self, npoint, radius_list, nsample_list, in_channel, mlp_list):\n super(PointNetSetAbstractionMsg, self).__init__()\n self... | import numpy as np
import torch.nn as nn
import torch
import os
import torch.nn.functional as F
from .pointnet2_utils import PointNetSetAbstractionMsg, PointNetSetAbstraction, PointNetFeaturePropagation | 3,103 | def __init__(self):
super(AttentionBlock, self).__init__()
def forward(self, key, value, query):
query = query.permute(0, 2, 1)
N, KC = key.shape[:2]
key = key.view(N, KC, -1)
N, KC = value.shape[:2]
value = value.view(N, KC, -1)
sim_map = torch.bmm(key, query)
sim_map = (KC ** -.5 ) * sim_map
sim_map = F.softmax(sim_map, dim=1)
context = torch.bmm(sim_map, value)
return context
class MANORegressor(nn.Module):
def __init__(self, n_inp_features=4, n_pose_params=6, n_shape_params=10):
super(MANORegressor, self).__init__()
normal_channel = True
if normal_channel:
additional_channel = n_inp_features
else:
additional_channel = 0
self.normal_channel = normal_channel
self.sa1 = PointNetSetAbstractionMsg(128, [0.4,0.8], [64, 128], additional_channel, [[128, 128, 256], [128, 196, 256]])
self.sa2 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=512 + 3, mlp=[256, 512], group_all=True)
self.n_pose_params = n_pose_params
self.n_mano_params = n_pose_params + n_shape_params
self.mano_regressor = nn.Sequential(
nn.Linear(512, 1024),
nn.ReLU(),
nn.BatchNorm1d(1024),
nn.Dropout(0.3),
nn.Linear(1024, 3 + self.n_mano_params + 3),
)
def J3dtoJ2d(self, j3d, scale):
B, N = j3d.shape[:2]
device = j3d.device
j2d = torch.zeros(B, N, 2, device=device)
j2d[:, :, 0] = scale[:, :, 0] * j3d[:, :, 0]
j2d[:, :, 1] = scale[:, :, 1] * j3d[:, :, 1]
return j2d
def forward(self, xyz, features, mano_hand, previous_mano_params=None):
device = xyz.device
batch_size = xyz.shape[0]
l0_xyz = xyz
l0_points = features
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l2_xyz = l2_xyz.squeeze(-1)
l2_points = l2_points.squeeze(-1)
if previous_mano_params is None:
previous_mano_params = torch.zeros(self.n_mano_params).unsqueeze(0).expand(batch_size, -1).to(device)
previous_rot_trans_params = torch.zeros(6).unsqueeze(0).expand(batch_size, -1).to(device)
mano_params = self.mano_regressor(l2_points)
global_orient = mano_params[:, :3]
hand_pose = mano_params[:, 3:3+self.n_pose_params]
betas = mano_params[:, 3+self.n_pose_params:-3]
transl = mano_params[:, -3:]
device = mano_hand.shapedirs.device
mano_args = {
'global_orient': global_orient.to(device),
'hand_pose' : hand_pose.to(device),
'betas' : betas.to(device),
'transl' : transl.to(device),
}
mano_outs = dict()
output = mano_hand(**mano_args)
mano_outs['vertices'] = output.vertices
mano_outs['j3d'] = output.joints
mano_outs.update(mano_args)
if not self.training:
mano_outs['faces'] = np.tile(mano_hand.faces, (batch_size, 1, 1))
return mano_outs
class TEHNet(nn.Module):
def __init__(self, n_pose_params, num_classes=4):
super(TEHNet, self).__init__()
normal_channel = True
if normal_channel:
additional_channel = 1 + int(os.getenv('ERPC', 0))
else:
additional_channel = 0
self.normal_channel = normal_channel
self.sa1 = PointNetSetAbstractionMsg(512, [0.1, 0.2, 0.4], [32, 64, 128], 3+additional_channel, [[32, 32, 64], [64, 64, 128], [64, 96, 128]])
self.sa2 = PointNetSetAbstractionMsg(128, [0.4,0.8], [64, 128], 128+128+64, [[128, 128, 256], [128, 196, 256]])
self.sa3 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=512 + 3, mlp=[256, 512, 1024], group_all=True)
|
class AttentionBlock(nn.Module):
def __init__(self):
super(AttentionBlock, self).__init__()
def forward(self, key, value, query):
query = query.permute(0, 2, 1)
N, KC = key.shape[:2]
key = key.view(N, KC, -1)
N, KC = value.shape[:2]
value = value.view(N, KC, -1)
sim_map = torch.bmm(key, query)
sim_map = (KC ** -.5 ) * sim_map
sim_map = F.softmax(sim_map, dim=1)
context = torch.bmm(sim_map, value)
return context
class MANORegressor(nn.Module):
def __init__(self, n_inp_features=4, n_pose_params=6, n_shape_params=10):
super(MANORegressor, self).__init__()
normal_channel = True
if normal_channel:
additional_channel = n_inp_features
else:
additional_channel = 0
self.normal_channel = normal_channel
self.sa1 = PointNetSetAbstractionMsg(128, [0.4,0.8], [64, 128], additional_channel, [[128, 128, 256], [128, 196, 256]])
self.sa2 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=512 + 3, mlp=[256, 512], group_all=True)
self.n_pose_params = n_pose_params
self.n_mano_params = n_pose_params + n_shape_params
self.mano_regressor = nn.Sequential(
nn.Linear(512, 1024),
nn.ReLU(),
nn.BatchNorm1d(1024),
nn.Dropout(0.3),
nn.Linear(1024, 3 + self.n_mano_params + 3),
)
def J3dtoJ2d(self, j3d, scale):
B, N = j3d.shape[:2]
device = j3d.device
j2d = torch.zeros(B, N, 2, device=device)
j2d[:, :, 0] = scale[:, :, 0] * j3d[:, :, 0]
j2d[:, :, 1] = scale[:, :, 1] * j3d[:, :, 1]
return j2d
def forward(self, xyz, features, mano_hand, previous_mano_params=None):
device = xyz.device
batch_size = xyz.shape[0]
l0_xyz = xyz
l0_points = features
l1_xyz, l1_points = self.sa1(l0_xyz, l0_points)
l2_xyz, l2_points = self.sa2(l1_xyz, l1_points)
l2_xyz = l2_xyz.squeeze(-1)
l2_points = l2_points.squeeze(-1)
if previous_mano_params is None:
previous_mano_params = torch.zeros(self.n_mano_params).unsqueeze(0).expand(batch_size, -1).to(device)
previous_rot_trans_params = torch.zeros(6).unsqueeze(0).expand(batch_size, -1).to(device)
mano_params = self.mano_regressor(l2_points)
global_orient = mano_params[:, :3]
hand_pose = mano_params[:, 3:3+self.n_pose_params]
betas = mano_params[:, 3+self.n_pose_params:-3]
transl = mano_params[:, -3:]
device = mano_hand.shapedirs.device
mano_args = {
'global_orient': global_orient.to(device),
'hand_pose' : hand_pose.to(device),
'betas' : betas.to(device),
'transl' : transl.to(device),
}
mano_outs = dict()
output = mano_hand(**mano_args)
mano_outs['vertices'] = output.vertices
mano_outs['j3d'] = output.joints
mano_outs.update(mano_args)
if not self.training:
mano_outs['faces'] = np.tile(mano_hand.faces, (batch_size, 1, 1))
return mano_outs
class TEHNet(nn.Module):
def __init__(self, n_pose_params, num_classes=4):
super(TEHNet, self).__init__()
normal_channel = True
if normal_channel:
additional_channel = 1 + int(os.getenv('ERPC', 0))
else:
additional_channel = 0
self.normal_channel = normal_channel
self.sa1 = PointNetSetAbstractionMsg(512, [0.1, 0.2, 0.4], [32, 64, 128], 3+additional_channel, [[32, 32, 64], [64, 64, 128], [64, 96, 128]])
self.sa2 = PointNetSetAbstractionMsg(128, [0.4,0.8], [64, 128], 128+128+64, [[128, 128, 256], [128, 196, 256]])
self.sa3 = PointNetSetAbstraction(npoint=None, radius=None, nsample=None, in_channel=512 + 3, mlp=[256, 512, 1024], group_all=True) | self.fp3 = PointNetFeaturePropagation(in_channel=1536, mlp=[256, 256]) | 2 | 2023-12-13 08:18:53+00:00 | 4k |
solanav/phishflood | phishings/views.py | [
{
"identifier": "rabbit_conf",
"path": "config/rabbit_conf.py",
"snippet": "HOST = \"rabbitmq\"\nQUEUE = \"phishings_queue\"\nEXCHANGE = \"phishings\"\nROUTINGKEY = \"info\""
},
{
"identifier": "Phishing",
"path": "phishings/models.py",
"snippet": "class Phishing(models.Model):\n id =... | import json
import pika
from config import rabbit_conf
from typing import Dict
from phishings.models import Phishing, Form, Input, Action
from rest_framework import viewsets
from django.contrib.auth.models import User, Group
from rest_framework import permissions
from rest_framework.response import Response
from pika.exchange_type import ExchangeType
from phishings.serializers import (
GroupSerializer,
UserSerializer,
PhishingSerializer,
FormSerializer,
InputSerializer,
ActionSerializer,
) | 1,617 |
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by("-date_joined")
serializer_class = UserSerializer
permission_classes = [permissions.IsAuthenticated]
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
permission_classes = [permissions.IsAuthenticated]
class FullPhishingViewSet(viewsets.ModelViewSet):
"""
API endpoint that returns all phishings as a single object with nested forms, inputs, and actions.
"""
queryset = Phishing.objects.all()
serializer_class = PhishingSerializer
permission_classes = [permissions.IsAuthenticated]
def fullphishing(self, request, id: str) -> Dict:
phishing = Phishing.objects.get(id=id)
json_phishing = self.get_serializer(phishing).data
json_phishing["forms"] = []
json_phishing["actions"] = []
forms = Form.objects.filter(phishing=phishing)
for form in forms:
json_form = FormSerializer(form, context={"request": request}).data
assert isinstance(json_form, Dict)
json_form["inputs"] = []
inputs = Input.objects.filter(form=form)
for input_obj in inputs:
json_input = InputSerializer(
input_obj, context={"request": request}
).data
json_form["inputs"].append(json_input)
json_phishing["forms"].append(json_form)
actions = Action.objects.filter(phishing=phishing)
for action in actions:
|
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by("-date_joined")
serializer_class = UserSerializer
permission_classes = [permissions.IsAuthenticated]
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
permission_classes = [permissions.IsAuthenticated]
class FullPhishingViewSet(viewsets.ModelViewSet):
"""
API endpoint that returns all phishings as a single object with nested forms, inputs, and actions.
"""
queryset = Phishing.objects.all()
serializer_class = PhishingSerializer
permission_classes = [permissions.IsAuthenticated]
def fullphishing(self, request, id: str) -> Dict:
phishing = Phishing.objects.get(id=id)
json_phishing = self.get_serializer(phishing).data
json_phishing["forms"] = []
json_phishing["actions"] = []
forms = Form.objects.filter(phishing=phishing)
for form in forms:
json_form = FormSerializer(form, context={"request": request}).data
assert isinstance(json_form, Dict)
json_form["inputs"] = []
inputs = Input.objects.filter(form=form)
for input_obj in inputs:
json_input = InputSerializer(
input_obj, context={"request": request}
).data
json_form["inputs"].append(json_input)
json_phishing["forms"].append(json_form)
actions = Action.objects.filter(phishing=phishing)
for action in actions: | json_action = ActionSerializer(action, context={"request": request}).data | 10 | 2023-12-11 16:38:36+00:00 | 4k |
aatmunbaxi/orgroamtools | orgroamtools/data.py | [
{
"identifier": "IdentifierType",
"path": "orgroamtools/_utils.py",
"snippet": "class IdentifierType(Enum):\n \"\"\"\n Nodes in an org-roam graph can identified uniquely by their ID, and non-uniquely\n by their title. This enum disambiguates the the type of an identifier\n for functions that... | import os
import warnings
import sqlite3 as sql
import copy
import networkx as nx
import orgparse as op
from typing import Iterable, Tuple, Optional
from dataclasses import dataclass
from orgroamtools._utils import (
IdentifierType,
DuplicateTitlesWarning,
extract_math_snippets,
extract_src_blocks,
) | 2,593 | except sql.Error as e:
print("Connection failed: ", e)
return []
def __init_misc_links(self, dbpath: str) -> list[list[OrgLink]]:
"""Initialize list of miscellaneous org-mode links
Parameters
----------
dbpath : ``str``
path to org-roam database
Returns
-------
``list[OrgLink]``
List of OrgRoam links that are not other nodes (files, images,
internet links, etc)
Examples
--------
FIXME: Add docs.
"""
q = """SELECT n.id, GROUP_CONCAT(CASE WHEN l.type != '"id"' THEN l.dest END),
GROUP_CONCAT(CASE WHEN l.type != '"id"' THEN l.type END)
FROM
nodes n
LEFT JOIN
links l ON n.id = l.source
GROUP BY
n.id
ORDER BY
n.id;"""
try:
with sql.connect(dbpath, uri=True) as con:
csr = con.cursor()
clean = lambda s: s.replace('"', "")
quer = csr.execute(q)
output = quer.fetchall()
links_and_types = [
list(
zip(
tuple(clean(row[1]).split(",")),
tuple(clean(row[2]).split(",")),
)
)
if row[1]
else []
for row in output
]
return [
[OrgLink(prop[1], prop[0], None) if prop else [] for prop in lst]
for lst in links_and_types
]
except sql.Error as e:
print("Connection failed: ", e)
return []
def remove_orphans(self) -> RoamGraph:
"""Remove orphans from network
This method returns a new network that has orphans removed.
Returns
-------
RoamGraph
Connected subcollection of self
Examples
--------
FIXME: Add docs.
"""
indices_of_orphans = [
i for i in range(len(self.IDs)) if self.nodes[i] in self._orphans
]
new_node_data = [
data
for idx, data in enumerate(
zip(
self.IDs,
self.titles,
self.fnames,
self._tags,
self._links_to,
self.misc_link_index.values(),
)
)
if idx not in indices_of_orphans
]
new_node_index = {
j[2]: RoamNode(j[0], j[1], j[2], j[3], j[4], j[5]) for j in new_node_data
}
self._node_index = new_node_index
self.refresh()
return self
def __is_orphan(self, node: RoamNode) -> bool:
"""Check if node is an orphan
Parameters
----------
node : ``RoamNode``
Node to check
Returns
-------
``bool``
True if node is an orphan
Examples
--------
FIXME: Add docs.
"""
pointed_to = True if any(node.id in n.backlinks for n in self.nodes) else False
points_to = node.backlinks != []
return not points_to and not pointed_to
| from __future__ import annotations
@dataclass
class RoamNode:
"""Store relevant org-roam node information
A node is an atomic note known to the org-roam database.
It is uniquely determined by an ID generated at the time of creation, but
has other identifiers and information that a user might want to know about.
Attributes
----------
id : ``str``
Unique org ID of org-roam node
title : ``str``
Title of org-roam node
fname : ``str``
Filename of org-roam node
tags : ``set[str]``
Collection of tags of org-roam node
backlinks : ``list[str]``
List of backlinks in org-roam node
misc_links : ``list[OrgLink]``
List of miscellaneous links that are not links to other nodes
"""
id: str
title: str
fname: str
tags: set[str]
backlinks: list[str]
misc_links: list[OrgLink]
@property
def body(self) -> str:
"""Return body of node
Returns
-------
``str``
Body text of node
"""
root = op.load(self.fname)
node_heading = None
for node in root:
if node.get_property("ID") == self.id:
node_heading = node
break
return "\n".join(subtree.get_body() for subtree in node_heading)
class RoamGraph:
"""Store information of ``org-roam`` graph.
By default, the nodes in the _node_index are ordered ascending on
the node IDs. In the documentation, the words "collection", "network",
"graph", all mean the same thing: the graph with nodes the ``org-roam`` nodes
and edges determined by backlinks in the ``org-roam`` collection.
The location of the ``org-roam`` database is the value of ``(org-roam-db-location)``::
from orgroamtools.data import RoamGraph
collection = RoamGraph(LOCATION_OF_DB)
Attributes
----------
db_path : ``str``
Path to org-roam database connected to graph
_id_title_map : ``dict[str,str]``
Map with keys the id of nodes and values the titles of the corresponding nodes
_graph : ``nx.MultiDiGraph``
``networkx`` graph representation of the collection
_node_index : ``dict[str, RoamNode]``
Map with keys the ID of nodes and values the ``RoamNode`` object that corresponds
_orphans : ``list[RoamNode]``
List of orphans in network. An orphan node is one with no links connecting it to any
other node
_is_connected : ``bool``
Tracks if network is connected (i.e. has no orphans)
_duplicate_titles : ``list[str]``
List of duplicated titles in network, used for warning user
_contains_dup_titles : ``bool``
Whether the collection has duplicated titles
"""
@classmethod
def init_empty(self):
"""Initialize empty RoamNode object
Returns
-------
RoamNode object with default fields initialized
"""
self.db_path = None
self._duplicate_titles = []
self._contains_dup_titles = None
self._id_title_map = dict()
self._graph = None
self._node_index = dict()
self._misc_link_index = dict()
self._orphans = []
self._is_connected = None
return self
def __init__(self, db: str):
"""Initializes RoamGraph object
The RoamGraph object stores information about the nodes in the
collection described by the database path provided. The nodes also store
information about how they relate to each other via backlinks.
Parameters
----------
db : ``str``
Path to org-roam database
Examples
--------
>>> collection = RoamGraph(PATH_TO_ORGROAM_DB)
"""
super(RoamGraph, self).__init__()
self.db_path = os.path.expanduser(db)
if not os.path.isfile(self.db_path):
raise AttributeError(f"No such file or directory: {self.db_path}")
_fnames = self.__init_fnames(self.db_path)
_titles = self.__init_titles(self.db_path)
_ids = self.__init_ids(self.db_path)
links = self.__init_links_to(db)
_links_to = [[ID for ID in link_list if ID in _ids] for link_list in links]
_tags = self.__init_tags(self.db_path)
_misc_links = self.__init_misc_links(self.db_path)
self._node_index = {
j[2]: RoamNode(j[0], j[1], j[2], j[3], j[4], j[5])
for j in zip(_ids, _titles, _fnames, _tags, _links_to, _misc_links)
}
seen = set()
self._duplicate_titles = [x for x in self.titles if x in seen or seen.add(x)]
self._contains_dup_titles = len(self._duplicate_titles) > 0
if self._contains_dup_titles:
warnings.warn(
"Collection contains duplicate titles. Matching nodes by title will be non-exhaustive.",
DuplicateTitlesWarning,
)
# In rare cases we'll pick up links to nonexistent nodes
self._misc_link_index = {_ids[i]: _misc_links[i] for i in range(len(_ids))}
self._id_title_map = {_ids[i]: self.titles[i] for i in range(len(_ids))}
self._graph = nx.MultiDiGraph({_ids[i]: _links_to[i] for i in range(len(_ids))})
self._orphans = [
node
for node in self._node_index.values()
if not any(
[
self._nodes_linked(node, other, directed=False)
for other in self._node_index.values()
if other != node
]
)
]
self._is_connected = self._orphans == []
def __filter_tags(self, tags: list[str], exclude: bool) -> list[RoamNode]:
"""Filter network by tags
Parameters
----------
tags : ``list[str]``
List of tags to filter by
exclude : ``bool``
Whether to exclude the tags in the new network or not
"""
tfilter = [self._node_has_tag(node, tag) for node in self.nodes for tag in tags]
if exclude:
tfilter = [not b for b in tfilter]
return [node for (node, b) in zip(self.nodes, tfilter) if b]
def __init_ids(self, dbpath: str) -> list[str]:
"""Initialize list of IDs for each node
Parameters
----------
dbpath : ``str``
Path of org-roam database
Returns
-------
List of node IDs
"""
id_query = "SELECT id FROM nodes ORDER BY id ASC;"
try:
with sql.connect(dbpath, uri=True) as con:
csr = con.cursor()
query = csr.execute(id_query)
return [i[0].replace('"', "") for i in query.fetchall()]
except sql.Error as e:
print("Connection failed: ", e)
return []
def __init_fnames(self, dbpath: str) -> list[str]:
"""
Initializes list of filenames for each node
Parameters
----------
dbpath : ``str``
Path to org-roam database
Returns
-------
List of node filepaths
"""
fname_query = "SELECT file FROM nodes ORDER BY id ASC;"
try:
with sql.connect(dbpath, uri=True) as con:
csr = con.cursor()
query = csr.execute(fname_query)
return [i[0].replace('"', "") for i in query.fetchall()]
except sql.Error as e:
print("Connection failed: ", e)
return []
def __init_titles(self, dbpath: str) -> list[str]:
"""
Initialize list of titles for each node
Parameters
----------
dbpath : ``str``
Path to org-roam database
Returns
-------
List of node titles
"""
title_query = "SELECT title FROM nodes ORDER BY id ASC;"
try:
with sql.connect(dbpath, uri=True) as con:
csr = con.cursor()
query = csr.execute(title_query)
return [i[0].replace('"', "") for i in query.fetchall()]
except sql.Error as e:
print("Connection failed: ", e)
return []
def __init_tags(self, dbpath: str) -> list[set[str]]:
"""
Initialize list of tags for each node
Parameters
----------
dbpath : ``str``
Path to org-roam database
Returns
-------
List of node tags (as sets)
"""
tags_query = "SELECT nodes.id, GROUP_CONCAT(tags.tag) AS tags FROM nodes LEFT JOIN tags ON nodes.id = tags.node_id GROUP BY nodes.id ORDER BY nodes.id ASC;"
try:
with sql.connect(dbpath, uri=True) as con:
csr = con.cursor()
query = csr.execute(tags_query)
clean = lambda s: s.replace('"', "")
match_null = lambda s: set() if not s else s.split(",")
return [set(map(clean, match_null(i[1]))) for i in query.fetchall()]
except sql.Error as e:
print("Connection failed: ", e)
return []
def __init_links_to(self, dbpath: str) -> list[list[str]]:
"""Initialize list of links
Parameters
----------
dbpath : ``str``
Path to org-roam database
Returns
-------
List of backlinks in node (as a list)
"""
links_to_query = """
SELECT n.id,
GROUP_CONCAT(CASE WHEN l.type = '"id"' THEN l.dest END)
FROM nodes n
LEFT JOIN links l ON n.id = l.source
GROUP BY n.id
ORDER BY n.id ;
"""
try:
with sql.connect(dbpath, uri=True) as con:
csr = con.cursor()
query = csr.execute(links_to_query)
clean = lambda s: s.replace('"', "")
links = query.fetchall()
# Separated links by comma might still have links we dont want (e.g. files, etc)
self_and_links = [
[clean(i[0])] + list(map(clean, i[1].split(",")))
if i[1]
else [clean(i[0])]
for i in links
]
return self_and_links
except sql.Error as e:
print("Connection failed: ", e)
return []
def __init_misc_links(self, dbpath: str) -> list[list[OrgLink]]:
"""Initialize list of miscellaneous org-mode links
Parameters
----------
dbpath : ``str``
path to org-roam database
Returns
-------
``list[OrgLink]``
List of OrgRoam links that are not other nodes (files, images,
internet links, etc)
Examples
--------
FIXME: Add docs.
"""
q = """SELECT n.id, GROUP_CONCAT(CASE WHEN l.type != '"id"' THEN l.dest END),
GROUP_CONCAT(CASE WHEN l.type != '"id"' THEN l.type END)
FROM
nodes n
LEFT JOIN
links l ON n.id = l.source
GROUP BY
n.id
ORDER BY
n.id;"""
try:
with sql.connect(dbpath, uri=True) as con:
csr = con.cursor()
clean = lambda s: s.replace('"', "")
quer = csr.execute(q)
output = quer.fetchall()
links_and_types = [
list(
zip(
tuple(clean(row[1]).split(",")),
tuple(clean(row[2]).split(",")),
)
)
if row[1]
else []
for row in output
]
return [
[OrgLink(prop[1], prop[0], None) if prop else [] for prop in lst]
for lst in links_and_types
]
except sql.Error as e:
print("Connection failed: ", e)
return []
def remove_orphans(self) -> RoamGraph:
"""Remove orphans from network
This method returns a new network that has orphans removed.
Returns
-------
RoamGraph
Connected subcollection of self
Examples
--------
FIXME: Add docs.
"""
indices_of_orphans = [
i for i in range(len(self.IDs)) if self.nodes[i] in self._orphans
]
new_node_data = [
data
for idx, data in enumerate(
zip(
self.IDs,
self.titles,
self.fnames,
self._tags,
self._links_to,
self.misc_link_index.values(),
)
)
if idx not in indices_of_orphans
]
new_node_index = {
j[2]: RoamNode(j[0], j[1], j[2], j[3], j[4], j[5]) for j in new_node_data
}
self._node_index = new_node_index
self.refresh()
return self
def __is_orphan(self, node: RoamNode) -> bool:
"""Check if node is an orphan
Parameters
----------
node : ``RoamNode``
Node to check
Returns
-------
``bool``
True if node is an orphan
Examples
--------
FIXME: Add docs.
"""
pointed_to = True if any(node.id in n.backlinks for n in self.nodes) else False
points_to = node.backlinks != []
return not points_to and not pointed_to
| def __identifier_type(self, identifier: str) -> IdentifierType: | 0 | 2023-12-14 04:46:33+00:00 | 4k |
abing7k/redroid-script | stuffs/magisk.py | [
{
"identifier": "General",
"path": "stuffs/general.py",
"snippet": "class General:\n def download(self):\n loc_md5 = \"\"\n if os.path.isfile(self.dl_file_name):\n with open(self.dl_file_name,\"rb\") as f:\n bytes = f.read()\n loc_md5 = hashlib.m... | import gzip
import os
import shutil
import re
from stuffs.general import General
from tools.helper import bcolors, download_file, host, print_color, run, get_download_dir | 1,880 |
class Magisk(General):
download_loc = get_download_dir()
dl_link = "https://mgb1.androidfilehost.com/dl/_E1ugpo3KLudP2K-WauRfQ/1702724403/10620683726822077179/Magisk+Delta+25206+canary+%284dbd8358%29.apk"
dl_file_name = os.path.join(download_loc, "magisk.apk")
extract_to = "/tmp/magisk_unpack"
copy_dir = "./magisk"
magisk_dir = os.path.join(copy_dir, "system", "etc", "init", "magisk")
machine = host()
oringinal_bootanim = """
service bootanim /system/bin/bootanimation
class core animation
user graphics
group graphics audio
disabled
oneshot
ioprio rt 0
task_profiles MaxPerformance
"""
bootanim_component = """
on post-fs-data
start logd
exec u:r:su:s0 root root -- /system/etc/init/magisk/magisk{arch} --auto-selinux --setup-sbin /system/etc/init/magisk
exec u:r:su:s0 root root -- /system/etc/init/magisk/magiskpolicy --live --magisk "allow * magisk_file lnk_file *"
mkdir /sbin/.magisk 700
mkdir /sbin/.magisk/mirror 700
mkdir /sbin/.magisk/block 700
copy /system/etc/init/magisk/config /sbin/.magisk/config
rm /dev/.magisk_unblock
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --post-fs-data
wait /dev/.magisk_unblock 40
rm /dev/.magisk_unblock
on zygote-start
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --service
on property:sys.boot_completed=1
mkdir /data/adb/magisk 755
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --boot-complete
exec -- /system/bin/sh -c "if [ ! -e /data/data/io.github.huskydg.magisk ] ; then pm install /system/etc/init/magisk/magisk.apk ; fi"
on property:init.svc.zygote=restarting
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --zygote-restart
on property:init.svc.zygote=stopped
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --zygote-restart
""".format(arch=machine[1])
def download(self):
if os.path.isfile(self.dl_file_name):
os.remove(self.dl_file_name)
print_color("Downloading latest Magisk-Delta now .....", bcolors.GREEN)
download_file(self.dl_link, self.dl_file_name)
def copy(self):
if os.path.exists(self.copy_dir):
shutil.rmtree(self.copy_dir)
if not os.path.exists(self.magisk_dir):
os.makedirs(self.magisk_dir, exist_ok=True)
if not os.path.exists(os.path.join(self.copy_dir, "sbin")):
os.makedirs(os.path.join(self.copy_dir, "sbin"), exist_ok=True)
print_color("Copying magisk libs now ...", bcolors.GREEN)
lib_dir = os.path.join(self.extract_to, "lib", self.machine[0])
for parent, dirnames, filenames in os.walk(lib_dir):
for filename in filenames:
o_path = os.path.join(lib_dir, filename)
filename = re.search('lib(.*)\.so', filename)
n_path = os.path.join(self.magisk_dir, filename.group(1))
shutil.copyfile(o_path, n_path)
|
class Magisk(General):
download_loc = get_download_dir()
dl_link = "https://mgb1.androidfilehost.com/dl/_E1ugpo3KLudP2K-WauRfQ/1702724403/10620683726822077179/Magisk+Delta+25206+canary+%284dbd8358%29.apk"
dl_file_name = os.path.join(download_loc, "magisk.apk")
extract_to = "/tmp/magisk_unpack"
copy_dir = "./magisk"
magisk_dir = os.path.join(copy_dir, "system", "etc", "init", "magisk")
machine = host()
oringinal_bootanim = """
service bootanim /system/bin/bootanimation
class core animation
user graphics
group graphics audio
disabled
oneshot
ioprio rt 0
task_profiles MaxPerformance
"""
bootanim_component = """
on post-fs-data
start logd
exec u:r:su:s0 root root -- /system/etc/init/magisk/magisk{arch} --auto-selinux --setup-sbin /system/etc/init/magisk
exec u:r:su:s0 root root -- /system/etc/init/magisk/magiskpolicy --live --magisk "allow * magisk_file lnk_file *"
mkdir /sbin/.magisk 700
mkdir /sbin/.magisk/mirror 700
mkdir /sbin/.magisk/block 700
copy /system/etc/init/magisk/config /sbin/.magisk/config
rm /dev/.magisk_unblock
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --post-fs-data
wait /dev/.magisk_unblock 40
rm /dev/.magisk_unblock
on zygote-start
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --service
on property:sys.boot_completed=1
mkdir /data/adb/magisk 755
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --boot-complete
exec -- /system/bin/sh -c "if [ ! -e /data/data/io.github.huskydg.magisk ] ; then pm install /system/etc/init/magisk/magisk.apk ; fi"
on property:init.svc.zygote=restarting
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --zygote-restart
on property:init.svc.zygote=stopped
exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --zygote-restart
""".format(arch=machine[1])
def download(self):
if os.path.isfile(self.dl_file_name):
os.remove(self.dl_file_name)
print_color("Downloading latest Magisk-Delta now .....", bcolors.GREEN)
download_file(self.dl_link, self.dl_file_name)
def copy(self):
if os.path.exists(self.copy_dir):
shutil.rmtree(self.copy_dir)
if not os.path.exists(self.magisk_dir):
os.makedirs(self.magisk_dir, exist_ok=True)
if not os.path.exists(os.path.join(self.copy_dir, "sbin")):
os.makedirs(os.path.join(self.copy_dir, "sbin"), exist_ok=True)
print_color("Copying magisk libs now ...", bcolors.GREEN)
lib_dir = os.path.join(self.extract_to, "lib", self.machine[0])
for parent, dirnames, filenames in os.walk(lib_dir):
for filename in filenames:
o_path = os.path.join(lib_dir, filename)
filename = re.search('lib(.*)\.so', filename)
n_path = os.path.join(self.magisk_dir, filename.group(1))
shutil.copyfile(o_path, n_path) | run(["chmod", "+x", n_path]) | 5 | 2023-12-06 09:03:05+00:00 | 4k |
zvict/papr | models/tx.py | [
{
"identifier": "MLP",
"path": "models/mlp.py",
"snippet": "class MLP(nn.Module):\n def __init__(self, inp_dim=2, num_layers=3, num_channels=128, out_dim=2, act_type=\"leakyrelu\", last_act_type=\"none\",\n use_wn=True, a=1., b=1., trainable=False, skip_layers=[], bias=True, half_laye... | import torch
import torch.nn as nn
import math
from torch import autocast
from .mlp import MLP
from .utils import PoseEnc, activation_func | 2,721 |
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class InstanceNorm(nn.Module):
"Construct a InstanceNorm module"
def __init__(self, eps=1e-6):
super(InstanceNorm, self).__init__()
self.eps = eps
def forward(self, x):
mean = x.mean(0, keepdim=True)
std = x.std(0, keepdim=True)
return (x - mean) / (std + self.eps)
def attention(query, key, kernel_type):
"""
Compute Attention Scores
query: [batch_size, n_heads, query_len, d_kq] or [batch_size, query_len, d_kq]
key: [batch_size, n_heads, seq_len, d_kq] or [batch_size, seq_len, d_kq]
"""
d_kq = query.size(-1)
if kernel_type == "scaled-dot":
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_kq)
elif kernel_type == "-scaled-dot":
scores = -torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_kq)
elif kernel_type == "dot":
scores = torch.matmul(query, key.transpose(-2, -1))
elif kernel_type == "-dot":
scores = -torch.matmul(query, key.transpose(-2, -1))
elif kernel_type == "l1-dist":
scores = torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=1, dim=-1)
elif kernel_type == "-l1-dist":
scores = -torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=1, dim=-1)
elif kernel_type == "l2-dist":
scores = torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=2, dim=-1)
elif kernel_type == "-l2-dist":
scores = -torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=2, dim=-1)
elif kernel_type == "scaled-l2-dist":
scores = torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=2, dim=-1) / math.sqrt(d_kq)
elif kernel_type == "-scaled-l2-dist":
scores = -torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=2, dim=-1) / math.sqrt(d_kq)
elif kernel_type == "cosine":
scores = torch.matmul(query, key.transpose(-2, -1)) / (
torch.norm(query, dim=-1, keepdim=True)
* torch.norm(key, dim=-1, keepdim=True).transpose(-2, -1)
)
else:
raise ValueError("Unknown kernel type: {}".format(kernel_type))
return scores
class FeedForward(nn.Module):
"Implements FFN module."
def __init__(self, d_input, d_output, d_ff, n_layer=2, act="relu", last_act="none", dropout=0.1, norm="layernorm",
residual=True, act_a=1.0, act_b=1.0, act_trainable=False, use_wn=False, eps=1e-6, skip_layers=[],
half_layers=[]):
super(FeedForward, self).__init__()
self.eps = eps
self.d_input = d_input
self.d_output = d_output
if norm == "layernorm":
self.innorm = LayerNorm(d_input, eps)
self.outnorm = LayerNorm(d_output, eps)
elif norm == "instancenorm":
self.innorm = InstanceNorm(eps)
self.outnorm = InstanceNorm(eps)
elif norm == "none":
self.innorm = nn.Identity()
self.outnorm = nn.Identity()
else:
raise ValueError("Invalid Transformer norm type")
self.dropout = nn.Dropout(dropout)
self.mlp = MLP(d_input, n_layer, d_ff, d_output, act_type=act, last_act_type=last_act, use_wn=use_wn,
a=act_a, b=act_b, trainable=act_trainable, skip_layers=skip_layers, half_layers=half_layers)
self.residual = residual
def forward(self, x):
if self.residual and x.shape[-1] == self.d_output:
return self.outnorm(x + self.dropout(self.mlp(self.innorm(x))))
else:
return self.outnorm(self.dropout(self.mlp(self.innorm(x))))
class Embeddings(nn.Module):
def __init__(self, d_k, d_q, d_v, d_model, seq_len, args, d_ko=0, d_qo=0, d_vo=0, eps=1e-6):
super(Embeddings, self).__init__()
self.d_k = d_k
self.d_q = d_q
self.d_v = d_v
self.seq_len = seq_len
self.args = args
self.embed_type = args.embed_type
self.d_model = d_model
self.share_embed = args.share_embed
self.d_ko = d_ko
self.d_qo = d_qo
self.d_vo = d_vo
self.eps = eps
|
def get_transformer(args, seq_len, v_extra_dim=0, k_extra_dim=0, q_extra_dim=0, eps=1e-6, use_amp=False, amp_dtype=torch.float16):
k_dim_map = {
1: [3, 3, 3],
}
k_dim = k_dim_map[args.k_type]
q_dim_map = {
1: [3],
}
q_dim = q_dim_map[args.q_type]
v_dim_map = {
1: [3, 3],
}
v_dim = v_dim_map[args.v_type]
return Transformer(d_k=k_dim, d_q=q_dim, d_v=v_dim, d_model=args.d_model, d_out=args.d_out, seq_len=seq_len,
embed_args=args.embed, block_args=args.block, d_ko=k_extra_dim, d_qo=q_extra_dim,
d_vo=v_extra_dim, eps=eps, use_amp=use_amp, amp_dtype=amp_dtype)
class LayerNorm(nn.Module):
"Construct a layernorm module"
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class InstanceNorm(nn.Module):
"Construct a InstanceNorm module"
def __init__(self, eps=1e-6):
super(InstanceNorm, self).__init__()
self.eps = eps
def forward(self, x):
mean = x.mean(0, keepdim=True)
std = x.std(0, keepdim=True)
return (x - mean) / (std + self.eps)
def attention(query, key, kernel_type):
"""
Compute Attention Scores
query: [batch_size, n_heads, query_len, d_kq] or [batch_size, query_len, d_kq]
key: [batch_size, n_heads, seq_len, d_kq] or [batch_size, seq_len, d_kq]
"""
d_kq = query.size(-1)
if kernel_type == "scaled-dot":
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_kq)
elif kernel_type == "-scaled-dot":
scores = -torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_kq)
elif kernel_type == "dot":
scores = torch.matmul(query, key.transpose(-2, -1))
elif kernel_type == "-dot":
scores = -torch.matmul(query, key.transpose(-2, -1))
elif kernel_type == "l1-dist":
scores = torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=1, dim=-1)
elif kernel_type == "-l1-dist":
scores = -torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=1, dim=-1)
elif kernel_type == "l2-dist":
scores = torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=2, dim=-1)
elif kernel_type == "-l2-dist":
scores = -torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=2, dim=-1)
elif kernel_type == "scaled-l2-dist":
scores = torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=2, dim=-1) / math.sqrt(d_kq)
elif kernel_type == "-scaled-l2-dist":
scores = -torch.norm(query.unsqueeze(-2) -
key.unsqueeze(-3), p=2, dim=-1) / math.sqrt(d_kq)
elif kernel_type == "cosine":
scores = torch.matmul(query, key.transpose(-2, -1)) / (
torch.norm(query, dim=-1, keepdim=True)
* torch.norm(key, dim=-1, keepdim=True).transpose(-2, -1)
)
else:
raise ValueError("Unknown kernel type: {}".format(kernel_type))
return scores
class FeedForward(nn.Module):
"Implements FFN module."
def __init__(self, d_input, d_output, d_ff, n_layer=2, act="relu", last_act="none", dropout=0.1, norm="layernorm",
residual=True, act_a=1.0, act_b=1.0, act_trainable=False, use_wn=False, eps=1e-6, skip_layers=[],
half_layers=[]):
super(FeedForward, self).__init__()
self.eps = eps
self.d_input = d_input
self.d_output = d_output
if norm == "layernorm":
self.innorm = LayerNorm(d_input, eps)
self.outnorm = LayerNorm(d_output, eps)
elif norm == "instancenorm":
self.innorm = InstanceNorm(eps)
self.outnorm = InstanceNorm(eps)
elif norm == "none":
self.innorm = nn.Identity()
self.outnorm = nn.Identity()
else:
raise ValueError("Invalid Transformer norm type")
self.dropout = nn.Dropout(dropout)
self.mlp = MLP(d_input, n_layer, d_ff, d_output, act_type=act, last_act_type=last_act, use_wn=use_wn,
a=act_a, b=act_b, trainable=act_trainable, skip_layers=skip_layers, half_layers=half_layers)
self.residual = residual
def forward(self, x):
if self.residual and x.shape[-1] == self.d_output:
return self.outnorm(x + self.dropout(self.mlp(self.innorm(x))))
else:
return self.outnorm(self.dropout(self.mlp(self.innorm(x))))
class Embeddings(nn.Module):
def __init__(self, d_k, d_q, d_v, d_model, seq_len, args, d_ko=0, d_qo=0, d_vo=0, eps=1e-6):
super(Embeddings, self).__init__()
self.d_k = d_k
self.d_q = d_q
self.d_v = d_v
self.seq_len = seq_len
self.args = args
self.embed_type = args.embed_type
self.d_model = d_model
self.share_embed = args.share_embed
self.d_ko = d_ko
self.d_qo = d_qo
self.d_vo = d_vo
self.eps = eps
| self.posenc = PoseEnc(args.pe_factor, args.pe_mult_factor) | 1 | 2023-12-08 19:51:42+00:00 | 4k |
Saibo-creator/transformers-CFG | transformers_cfg/grammar_utils.py | [
{
"identifier": "LEAF",
"path": "transformers_cfg/vocab_struct.py",
"snippet": "LEAF = -1"
},
{
"identifier": "TokenTrie",
"path": "transformers_cfg/vocab_struct.py",
"snippet": "class TokenTrie:\n def __init__(self, tokenizer):\n self.eos_token_id = tokenizer.eos_token_id\n ... | import logging
import sys
import time
import torch
from abc import ABC
from functools import lru_cache
from typing import Dict, List
from .vocab_struct import LEAF, TokenTrie | 3,365 | return remaining_src
def parse_rule(state, src):
name, remaining_src = parse_name(src)
remaining_src = remove_leading_white_space(remaining_src, False)
rule_id = get_symbol_id(state, name)
if remaining_src[:3] != "::=":
raise RuntimeError("expecting ::= at " + remaining_src)
remaining_src = remove_leading_white_space(remaining_src[3:], True)
remaining_src = parse_alternates(state, remaining_src, name, rule_id, False)
if remaining_src and remaining_src[0] == "\r":
remaining_src = (
remaining_src[2:] if remaining_src[1] == "\n" else remaining_src[1:]
)
elif remaining_src and remaining_src[0] == "\n":
remaining_src = remaining_src[1:]
elif remaining_src:
raise RuntimeError("expecting newline or end at " + remaining_src)
return remove_leading_white_space(remaining_src, True)
def parse_ebnf(src):
try:
state = ParseState()
grammar_repr = remove_leading_white_space(src, True)
last_grammar_repr = ""
while grammar_repr:
if last_grammar_repr:
last_parsed_rule_len = len(last_grammar_repr) - len(grammar_repr)
logger.debug(
f"last_parsed_rule: {last_grammar_repr[:last_parsed_rule_len]}"
)
last_grammar_repr = grammar_repr
grammar_repr = parse_rule(state, grammar_repr)
state.grammar_encoding.append(0xFFFF)
return state
except RuntimeError as err:
logger.warning("error parsing grammar:", err)
return ParseState()
def print_rule(file, grammar_encoding, index, symbol_id_names):
rule_id = grammar_encoding[index]
print(f"<{index}>{symbol_id_names[rule_id]} ::=", end=" ", file=file)
pos = index + 1
while grammar_encoding[pos]:
if pos - 1 > index:
print("|", end=" ", file=file)
pos += 1 # sequence size, not needed here
while grammar_encoding[pos]:
if grammar_encoding[pos] == REF_RULE_MARKER:
ref_rule_id = grammar_encoding[pos + 1]
print(
f"<{pos}>{symbol_id_names[ref_rule_id]}",
end=" ",
file=file,
)
pos += 2
else:
print("<{}>[".format(pos), end="", file=file)
num_chars = grammar_encoding[pos]
pos += 1
for i in range(0, num_chars, 2):
print(
"{}-".format(chr(grammar_encoding[pos + i])), end="", file=file
)
if i + 1 < num_chars:
print(
"{}".format(chr(grammar_encoding[pos + i + 1])),
end="",
file=file,
)
print("]", end=" ", file=file)
pos += num_chars
pos += 1
print(file=file)
return pos + 1
def print_grammar(file, state):
pos = 0
symbol_id_names = {v: k for k, v in state.symbol_ids.items()}
print("Grammar Rules:", file=file)
while state.grammar_encoding[pos] != 0xFFFF:
pos = print_rule(file, state.grammar_encoding, pos, symbol_id_names)
pos = 0
print("\nBinary representation:", file=file)
while state.grammar_encoding[pos] != 0xFFFF:
print(f"{state.grammar_encoding[pos]:04x}", end=" ", file=file)
pos += 1
print("ffff\n")
offset = 0
print("Grammar Rule Sizes:", file=file)
for i, rule_size in enumerate(state.grammar_encoding_rule_size):
print(
f"<{i}> {rule_size} {state.grammar_encoding[offset:offset+rule_size]}",
file=file,
)
offset += rule_size
###################################
# EBNF Grammar Parsing ends here #
###################################
class AbstractGrammarConstraint(ABC):
def __init__(self, grammar_str, start_rule_name, tokenizer):
state = parse_ebnf(grammar_str)
grammar_encoding = state.grammar_encoding
self.start_rule_id = state.symbol_ids.get(start_rule_name)
self.eos_token_id = tokenizer.eos_token_id
|
logger = logging.getLogger(__name__)
########################
# EBNF Grammar Parsing #
########################
END_OF_ALTERNATE_MARKER = 0
END_OF_RULE_MARKER = 0
TO_BE_FILLED_MARKER = 0
REF_RULE_MARKER = 1
LITERAL_MARKER = 2
class ParseState:
def __init__(self):
self.symbol_ids = {}
self.grammar_encoding = [] # old name: out_grammar
self.grammar_encoding_rule_size = []
def get_symbol_id(state, src):
if src not in state.symbol_ids:
state.symbol_ids[src] = len(state.symbol_ids)
return state.symbol_ids[src]
def generate_symbol_id(state, base_name):
next_id = len(state.symbol_ids)
state.symbol_ids[base_name + "_" + str(next_id)] = next_id
return next_id
def is_word_char(c):
return c.isalnum() or c == "-" or c == "_"
def hex_to_int(c):
if c.isdigit():
return int(c)
elif "a" <= c.lower() <= "f":
return ord(c.lower()) - ord("a") + 10
return -1
def remove_leading_white_space(src, newline_ok):
"""
Skips over whitespace and comments in the input string.
This function processes the input string, skipping over any spaces, tabs,
and content following a '#' character, which denotes a comment. The parsing
of a comment continues until the end of the line (denoted by newline characters
'\r' or '\n'). If the 'newline_ok' parameter is set to False, the function
will stop processing and return the remaining string upon encountering a
newline character, otherwise it will skip over newline characters as well.
Parameters:
src (str): The input string to be processed.
newline_ok (bool): A flag indicating whether encountering a newline character
should stop the parsing (False) or if it should be skipped (True).
Returns:
str: The remaining portion of the input string after skipping whitespace and comments.
"""
pos = 0
while pos < len(src) and (src[pos].isspace() or src[pos] == "#"):
if src[pos] == "#":
while pos < len(src) and src[pos] not in ("\r", "\n"):
pos += 1
else:
if not newline_ok and src[pos] in ("\r", "\n"):
break
pos += 1
return src[pos:]
def parse_name(src):
pos = 0
while pos < len(src) and is_word_char(src[pos]):
pos += 1
if pos == 0:
raise RuntimeError("expecting name at " + src)
return src[:pos], src[pos:]
def parse_char(src):
"""
parse the leading char from the input string
:param src:
:return: char, remaining_src
"""
# if we have a backslash, it's maybe an escape
if src[0] == "\\":
esc = src[1]
if esc == "x":
first = hex_to_int(src[2])
if first > -1:
second = hex_to_int(src[3])
if second > -1:
return (first << 4) + second, src[4:]
raise RuntimeError("expecting \\xNN at " + src)
elif esc in ('"', "[", "]"):
return esc, src[2:]
elif esc == "r":
return "\r", src[2:]
elif esc == "n":
return "\n", src[2:]
elif esc == "t":
return "\t", src[2:]
raise RuntimeError("unknown escape at " + src)
elif src:
return src[0], src[1:]
raise RuntimeError("unexpected end of input")
def parse_sequence(state, src, rule_name, outbuf, is_nested):
out_start_pos = len(outbuf)
# sequence size, will be replaced at end when known
outbuf.append(TO_BE_FILLED_MARKER)
last_sym_start = len(outbuf)
remaining_src = src
while remaining_src:
if remaining_src[0] == '"': # literal string
remaining_src = remaining_src[1:]
last_sym_start = len(outbuf)
while remaining_src[0] != '"':
char, remaining_src = parse_char(remaining_src)
# each char of a literal is encoded as a "range" of char - char
outbuf.append(LITERAL_MARKER)
outbuf.append(ord(char))
outbuf.append(ord(char))
remaining_src = remove_leading_white_space(remaining_src[1:], is_nested)
elif remaining_src[0] == "[": # char range(s)
remaining_src = remaining_src[1:]
last_sym_start = len(outbuf)
# num chars in range - replaced at end of loop
outbuf.append(TO_BE_FILLED_MARKER)
while remaining_src[0] != "]":
char, remaining_src = parse_char(remaining_src)
outbuf.append(ord(char))
if remaining_src[0] == "-" and remaining_src[1] != "]":
endchar_pair, remaining_src = parse_char(remaining_src[1:])
outbuf.append(ord(endchar_pair))
else:
# chars that aren't part of a c1-c2 range are just doubled (i.e., c-c)
outbuf.append(ord(char))
# replace num chars with actual
outbuf[last_sym_start] = len(outbuf) - last_sym_start - 1
remaining_src = remove_leading_white_space(remaining_src[1:], is_nested)
elif is_word_char(remaining_src[0]): # rule reference
name, remaining_src = parse_name(remaining_src)
ref_rule_id = get_symbol_id(state, name)
remaining_src = remove_leading_white_space(remaining_src, is_nested)
last_sym_start = len(outbuf)
outbuf.append(REF_RULE_MARKER)
outbuf.append(ref_rule_id)
elif remaining_src[0] == "(": # grouping
# parse nested alternates into synthesized rule
remaining_src = remove_leading_white_space(remaining_src[1:], True)
sub_rule_id = generate_symbol_id(state, rule_name)
remaining_src = parse_alternates(
state, remaining_src, rule_name, sub_rule_id, True
)
last_sym_start = len(outbuf)
# output reference to synthesized rule
outbuf.append(REF_RULE_MARKER)
outbuf.append(sub_rule_id)
if remaining_src[0] != ")":
raise RuntimeError("expecting ')' at " + remaining_src)
remaining_src = remove_leading_white_space(remaining_src[1:], is_nested)
elif remaining_src[0] in ("*", "+", "?"): # repetition operator
if len(outbuf) - out_start_pos - 1 == 0:
raise RuntimeError(
"expecting preceeding item to */+/? at " + remaining_src
)
out_grammar = state.grammar_encoding
# apply transformation to previous symbol (last_sym_start -
# end) according to rewrite rules:
# S* --> S' ::= S S' |
# S+ --> S' ::= S S' | S
# S? --> S' ::= S |
sub_rule_id = generate_symbol_id(state, rule_name)
out_grammar.append(sub_rule_id)
sub_rule_start = len(out_grammar)
# placeholder for size of 1st alternate
out_grammar.append(TO_BE_FILLED_MARKER)
# add preceding symbol to generated rule
out_grammar.extend(outbuf[last_sym_start:])
if remaining_src[0] in ("*", "+"):
# cause generated rule to recurse
out_grammar.append(REF_RULE_MARKER)
out_grammar.append(sub_rule_id)
# apply actual size
out_grammar[sub_rule_start] = len(out_grammar) - sub_rule_start
# mark end of 1st alternate
out_grammar.append(END_OF_ALTERNATE_MARKER)
sub_rule_start = len(out_grammar)
# placeholder for size of 2nd alternate
out_grammar.append(TO_BE_FILLED_MARKER)
if remaining_src[0] == "+":
# add preceding symbol as alternate only for '+'
out_grammar.extend(outbuf[last_sym_start:])
# apply actual size of 2nd alternate
out_grammar[sub_rule_start] = len(out_grammar) - sub_rule_start
# mark end of 2nd alternate, then end of rule
out_grammar.append(END_OF_ALTERNATE_MARKER)
out_grammar.append(END_OF_RULE_MARKER)
# in original rule, replace previous symbol with reference to generated rule
outbuf[last_sym_start:] = [1, sub_rule_id]
remaining_src = remove_leading_white_space(remaining_src[1:], is_nested)
else:
break
# apply actual size of this alternate sequence
outbuf[out_start_pos] = len(outbuf) - out_start_pos
# mark end of alternate
outbuf.append(END_OF_ALTERNATE_MARKER)
return remaining_src
def parse_alternates(state, src, rule_name, rule_id, is_nested):
outbuf = []
remaining_src = parse_sequence(state, src, rule_name, outbuf, is_nested)
while remaining_src and remaining_src[0] == "|":
remaining_src = remove_leading_white_space(remaining_src[1:], True)
remaining_src = parse_sequence(
state, remaining_src, rule_name, outbuf, is_nested
)
state.grammar_encoding.append(rule_id)
state.grammar_encoding.extend(outbuf)
state.grammar_encoding.append(0)
state.grammar_encoding_rule_size.append(len(outbuf) + 2)
return remaining_src
def parse_rule(state, src):
name, remaining_src = parse_name(src)
remaining_src = remove_leading_white_space(remaining_src, False)
rule_id = get_symbol_id(state, name)
if remaining_src[:3] != "::=":
raise RuntimeError("expecting ::= at " + remaining_src)
remaining_src = remove_leading_white_space(remaining_src[3:], True)
remaining_src = parse_alternates(state, remaining_src, name, rule_id, False)
if remaining_src and remaining_src[0] == "\r":
remaining_src = (
remaining_src[2:] if remaining_src[1] == "\n" else remaining_src[1:]
)
elif remaining_src and remaining_src[0] == "\n":
remaining_src = remaining_src[1:]
elif remaining_src:
raise RuntimeError("expecting newline or end at " + remaining_src)
return remove_leading_white_space(remaining_src, True)
def parse_ebnf(src):
try:
state = ParseState()
grammar_repr = remove_leading_white_space(src, True)
last_grammar_repr = ""
while grammar_repr:
if last_grammar_repr:
last_parsed_rule_len = len(last_grammar_repr) - len(grammar_repr)
logger.debug(
f"last_parsed_rule: {last_grammar_repr[:last_parsed_rule_len]}"
)
last_grammar_repr = grammar_repr
grammar_repr = parse_rule(state, grammar_repr)
state.grammar_encoding.append(0xFFFF)
return state
except RuntimeError as err:
logger.warning("error parsing grammar:", err)
return ParseState()
def print_rule(file, grammar_encoding, index, symbol_id_names):
rule_id = grammar_encoding[index]
print(f"<{index}>{symbol_id_names[rule_id]} ::=", end=" ", file=file)
pos = index + 1
while grammar_encoding[pos]:
if pos - 1 > index:
print("|", end=" ", file=file)
pos += 1 # sequence size, not needed here
while grammar_encoding[pos]:
if grammar_encoding[pos] == REF_RULE_MARKER:
ref_rule_id = grammar_encoding[pos + 1]
print(
f"<{pos}>{symbol_id_names[ref_rule_id]}",
end=" ",
file=file,
)
pos += 2
else:
print("<{}>[".format(pos), end="", file=file)
num_chars = grammar_encoding[pos]
pos += 1
for i in range(0, num_chars, 2):
print(
"{}-".format(chr(grammar_encoding[pos + i])), end="", file=file
)
if i + 1 < num_chars:
print(
"{}".format(chr(grammar_encoding[pos + i + 1])),
end="",
file=file,
)
print("]", end=" ", file=file)
pos += num_chars
pos += 1
print(file=file)
return pos + 1
def print_grammar(file, state):
pos = 0
symbol_id_names = {v: k for k, v in state.symbol_ids.items()}
print("Grammar Rules:", file=file)
while state.grammar_encoding[pos] != 0xFFFF:
pos = print_rule(file, state.grammar_encoding, pos, symbol_id_names)
pos = 0
print("\nBinary representation:", file=file)
while state.grammar_encoding[pos] != 0xFFFF:
print(f"{state.grammar_encoding[pos]:04x}", end=" ", file=file)
pos += 1
print("ffff\n")
offset = 0
print("Grammar Rule Sizes:", file=file)
for i, rule_size in enumerate(state.grammar_encoding_rule_size):
print(
f"<{i}> {rule_size} {state.grammar_encoding[offset:offset+rule_size]}",
file=file,
)
offset += rule_size
###################################
# EBNF Grammar Parsing ends here #
###################################
class AbstractGrammarConstraint(ABC):
def __init__(self, grammar_str, start_rule_name, tokenizer):
state = parse_ebnf(grammar_str)
grammar_encoding = state.grammar_encoding
self.start_rule_id = state.symbol_ids.get(start_rule_name)
self.eos_token_id = tokenizer.eos_token_id | self.token_trie = TokenTrie(tokenizer) | 1 | 2023-12-07 13:32:54+00:00 | 4k |
rinnakk/nue-asr | nue_asr/cli.py | [
{
"identifier": "transcribe",
"path": "nue_asr/transcribe.py",
"snippet": "@torch.inference_mode()\ndef transcribe(\n model: NueASRModel,\n tokenizer: PreTrainedTokenizer,\n audio: Union[str, np.ndarray, torch.Tensor],\n **decode_options,\n) -> ASRResult:\n device = model.device\n sr =... | import argparse
import os
import torch
from .transcribe import transcribe
from .utils import load_model, load_tokenizer, set_seed, str2bool | 1,836 | #!/usr/bin/env python3
# Copyright 2023 rinna Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def cli_main():
default_device = "cuda" if torch.cuda.is_available() else "cpu"
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"audio_files",
nargs="+",
type=str,
help="Audio file paths",
)
parser.add_argument(
"--model",
type=str,
default=None,
help="Model name or path",
)
parser.add_argument(
"--device",
type=str,
default=default_device,
help="Device to use for inference.",
)
parser.add_argument(
"--fp16", type=str2bool, default=True, help="Whether to fp16 inference."
)
parser.add_argument(
"--use-deepspeed",
action="store_true",
help="Whether to use DeepSpeed-Inference.",
)
group = parser.add_argument_group("Sequence generation options")
group.add_argument(
"--do-sample",
action="store_true",
help="Whether or not to use sampling; use greedy decoding otherwise.",
)
group.add_argument(
"--num-beams",
type=int,
default=1,
help="Number of beams for beam search. 1 means no beam search.",
)
group.add_argument(
"--temperature",
type=float,
default=1.0,
help="The value used to modulate the next token probabilities.",
)
group.add_argument(
"--top-p",
type=float,
default=1.0,
help="The value used to modulate the next token probabilities.",
)
group.add_argument(
"--min-new-tokens",
type=int,
default=2,
help="The minimum length of the sequence to be generated.",
)
group.add_argument(
"--max-new-tokens",
type=int,
default=None,
help="The maximum numbers of tokens to generate.",
)
args = parser.parse_args()
set_seed(1234)
| #!/usr/bin/env python3
# Copyright 2023 rinna Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def cli_main():
default_device = "cuda" if torch.cuda.is_available() else "cpu"
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"audio_files",
nargs="+",
type=str,
help="Audio file paths",
)
parser.add_argument(
"--model",
type=str,
default=None,
help="Model name or path",
)
parser.add_argument(
"--device",
type=str,
default=default_device,
help="Device to use for inference.",
)
parser.add_argument(
"--fp16", type=str2bool, default=True, help="Whether to fp16 inference."
)
parser.add_argument(
"--use-deepspeed",
action="store_true",
help="Whether to use DeepSpeed-Inference.",
)
group = parser.add_argument_group("Sequence generation options")
group.add_argument(
"--do-sample",
action="store_true",
help="Whether or not to use sampling; use greedy decoding otherwise.",
)
group.add_argument(
"--num-beams",
type=int,
default=1,
help="Number of beams for beam search. 1 means no beam search.",
)
group.add_argument(
"--temperature",
type=float,
default=1.0,
help="The value used to modulate the next token probabilities.",
)
group.add_argument(
"--top-p",
type=float,
default=1.0,
help="The value used to modulate the next token probabilities.",
)
group.add_argument(
"--min-new-tokens",
type=int,
default=2,
help="The minimum length of the sequence to be generated.",
)
group.add_argument(
"--max-new-tokens",
type=int,
default=None,
help="The maximum numbers of tokens to generate.",
)
args = parser.parse_args()
set_seed(1234) | model = load_model( | 1 | 2023-12-07 01:37:23+00:00 | 4k |
AdaCheng/EgoThink | models/instruct_blip/models/clip_vit.py | [
{
"identifier": "convert_weights_to_fp16",
"path": "models/instruct_blip/models/eva_vit.py",
"snippet": "def convert_weights_to_fp16(model: nn.Module):\n \"\"\"Convert applicable model parameters to fp16\"\"\"\n\n def _convert_weights_to_fp16(l):\n if isinstance(l, (nn.Conv1d, nn.Conv2d, nn... | from collections import OrderedDict
from itertools import repeat
from torch import nn
from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper
from .eva_vit import convert_weights_to_fp16
from ..common.dist_utils import download_cached_file
import collections.abc
import math
import torch
import torch.nn.functional as F | 2,836 | ("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
if use_grad_checkpointing:
self.attn = checkpoint_wrapper(self.attn)
self.mlp = checkpoint_wrapper(self.mlp)
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, use_grad_checkpointing=False):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask, use_grad_checkpointing and i>12) for i in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisionTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, use_grad_checkpointing: bool):
super().__init__()
self.input_resolution = input_resolution
self.num_features = width
self.num_heads = heads
self.num_patches = (input_resolution // patch_size) ** 2
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn(self.num_patches + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, use_grad_checkpointing=use_grad_checkpointing)
# self.ln_final = LayerNorm(width)
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
# x = self.ln_final(x)
return x
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_2tuple = _ntuple(2)
def interpolate_pos_embed(model, state_dict, interpolation: str = 'bicubic', seq_dim=1):
# Rescale the grid of position embeddings when loading from state_dict
old_pos_embed = state_dict.get('positional_embedding', None)
grid_size = round((model.positional_embedding.shape[0] - 1) ** 0.5)
if old_pos_embed is None:
return
grid_size = to_2tuple(grid_size)
extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
if new_seq_len == old_pos_embed.shape[0]:
return
if extra_tokens:
pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
else:
pos_emb_tok, pos_emb_img = None, old_pos_embed
old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
print('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)
pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
pos_emb_img = F.interpolate(
pos_emb_img,
size=grid_size,
mode=interpolation,
align_corners=True,
)
pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
if pos_emb_tok is not None:
new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
else:
new_pos_embed = pos_emb_img
state_dict['positional_embedding'] = new_pos_embed
def create_clip_vit_L(img_size=224,use_checkpoint=False,precision="fp16"):
model = VisionTransformer(
input_resolution=img_size,
patch_size=14,
width=1024,
layers=23,
heads=16,
use_grad_checkpointing=use_checkpoint,
)
url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/clip_vit_L.pth"
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu3 = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu3(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, use_grad_checkpointing=False):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
if use_grad_checkpointing:
self.attn = checkpoint_wrapper(self.attn)
self.mlp = checkpoint_wrapper(self.mlp)
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, use_grad_checkpointing=False):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask, use_grad_checkpointing and i>12) for i in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisionTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, use_grad_checkpointing: bool):
super().__init__()
self.input_resolution = input_resolution
self.num_features = width
self.num_heads = heads
self.num_patches = (input_resolution // patch_size) ** 2
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn(self.num_patches + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, use_grad_checkpointing=use_grad_checkpointing)
# self.ln_final = LayerNorm(width)
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
# x = self.ln_final(x)
return x
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_2tuple = _ntuple(2)
def interpolate_pos_embed(model, state_dict, interpolation: str = 'bicubic', seq_dim=1):
# Rescale the grid of position embeddings when loading from state_dict
old_pos_embed = state_dict.get('positional_embedding', None)
grid_size = round((model.positional_embedding.shape[0] - 1) ** 0.5)
if old_pos_embed is None:
return
grid_size = to_2tuple(grid_size)
extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
if new_seq_len == old_pos_embed.shape[0]:
return
if extra_tokens:
pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
else:
pos_emb_tok, pos_emb_img = None, old_pos_embed
old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
print('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)
pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
pos_emb_img = F.interpolate(
pos_emb_img,
size=grid_size,
mode=interpolation,
align_corners=True,
)
pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
if pos_emb_tok is not None:
new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
else:
new_pos_embed = pos_emb_img
state_dict['positional_embedding'] = new_pos_embed
def create_clip_vit_L(img_size=224,use_checkpoint=False,precision="fp16"):
model = VisionTransformer(
input_resolution=img_size,
patch_size=14,
width=1024,
layers=23,
heads=16,
use_grad_checkpointing=use_checkpoint,
)
url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/clip_vit_L.pth" | cached_file = download_cached_file( | 1 | 2023-12-05 14:17:17+00:00 | 4k |
3dlg-hcvc/cage | metrics/aor.py | [
{
"identifier": "get_bbox_vertices",
"path": "objects/dict_utils.py",
"snippet": "def get_bbox_vertices(obj_dict, part_idx):\n \"\"\"\n Get the 8 vertices of the bounding box\\n\n The order of the vertices is the same as the order that pytorch3d.ops.box3d_overlap expects\\n\n (This order is ... | import sys, os, json
import numpy as np
import quaternion
from iou import sampling_iou
from objects.dict_utils import get_bbox_vertices, get_base_part_idx
from tqdm import tqdm
from copy import deepcopy | 2,121 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
'''
AOR: Average Overlapping Ratio
- compute the vIoU between the sibling parts of the objects
'''
def transform_all_parts(bbox_vertices, obj_dict, joint_state, use_plucker=False, dry_run=True):
"""
Transform all parts of the object according to the joint state\n
- bbox_vertices: the bounding box vertices of the object in rest pose in the form: [[x0, y0, z0], [x1, y1, z1], ...]\n
- obj_dict: the object dictionary\n
- joint_state: the joint state in the range of [0, 1]\n
- use_plucker (optional): whether to use plucker coordinate to transform the parts\n
- dry_run (optional): if True, only return the transformation matrices without transforming the parts\n
Return:\n
- part_transformations: the transformation matrices used to transform the parts\n
"""
# Get a visit order of the parts such that children parts are visited before parents
part_visit_order = []
base_idx = get_base_part_idx(obj_dict)
indices_to_visit = [base_idx]
while len(indices_to_visit) > 0: # Breadth-first traversal
current_idx = indices_to_visit.pop(0)
part_visit_order.append(current_idx)
indices_to_visit += obj_dict["diffuse_tree"][current_idx]["children"]
part_visit_order.reverse()
part_transformations = [[] for _ in range(len(obj_dict["diffuse_tree"]))]
# Transform the parts in the visit order - children first, then parents
for i in part_visit_order:
part = obj_dict["diffuse_tree"][i]
joint = part["joint"]
children_idxs = part["children"]
# Store the transformation used to transform the part and its children
applied_tramsformation_matrix = np.eye(4)
applied_rotation_axis_origin = np.array([np.nan, np.nan, np.nan])
applied_transformation_type = "none"
if not use_plucker: # Direct translation and rotation
if joint["type"] == "prismatic":
# Translate the part and its children
translation = np.array(joint["axis"]["direction"]) * joint["range"][1] * joint_state
if not dry_run:
bbox_vertices[[i] + children_idxs] += translation
# Store the transformation used
applied_tramsformation_matrix[:3, 3] = translation
applied_transformation_type = "translation"
elif joint["type"] == "revolute" or joint["type"] == "continuous":
if joint["type"] == "revolute":
rotation_radian = np.radians(joint["range"][1] * joint_state)
else:
rotation_radian = np.radians(360 * joint_state)
# Prepare the rotation matrix via axis-angle representation and quaternion
rotation_axis_origin = np.array(joint["axis"]["origin"])
rotation_axis_direction = np.array(joint["axis"]["direction"]) / np.linalg.norm(joint["axis"]["direction"])
rotation_matrix = quaternion.as_rotation_matrix(quaternion.from_rotation_vector(rotation_radian * rotation_axis_direction))
if not dry_run:
# Rotate the part and its children
vertices_to_rotate = (bbox_vertices[[i] + children_idxs] - rotation_axis_origin)
bbox_vertices[[i] + children_idxs] = np.matmul(rotation_matrix, vertices_to_rotate.transpose([0, 2, 1])).transpose([0, 2, 1]) + rotation_axis_origin
# Store the transformation used
applied_tramsformation_matrix[:3, :3] = rotation_matrix
applied_rotation_axis_origin = rotation_axis_origin
applied_transformation_type = "rotation"
else: # Translation and rotation together using the plucker coordinate as in NAP
plucker_direction = np.array(joint["axis"]["plucker"])[:3]
plucker_moment = np.array(joint["axis"]["plucker"])[3:]
translation_distance = joint["raw_ranges"][0][1] * joint_state
rotation_radian = np.radians(joint["raw_ranges"][1][1] * joint_state)
# Prepare the transformation matrix via plucker coordinate using equation (1) in NAP
transformation_matrix = np.eye(4)
rotation_matrix = quaternion.as_rotation_matrix(quaternion.from_rotation_vector(rotation_radian * plucker_direction))
translation = (np.eye(3) - rotation_matrix) @ np.cross(plucker_direction, plucker_moment) + plucker_direction * translation_distance
transformation_matrix[:3, :3] = rotation_matrix
transformation_matrix[:3, 3] = translation
if not dry_run:
# Transform the part and its children via homogeneous coordinates
vertices_to_transform = np.concatenate([bbox_vertices[[i] + children_idxs], np.ones((len([i] + children_idxs), 8, 1))], axis=2)
bbox_vertices[[i] + children_idxs] = np.matmul(transformation_matrix, vertices_to_transform.transpose([0, 2, 1])).transpose([0, 2, 1])[:, :, :3]
# Store the transformation used
applied_tramsformation_matrix = transformation_matrix
applied_transformation_type = "plucker"
# Record the transformation used
if not applied_transformation_type == "none":
record = {
"type": applied_transformation_type,
"matrix": applied_tramsformation_matrix,
"rotation_axis_origin": applied_rotation_axis_origin
}
for idx in [i] + children_idxs:
part_transformations[idx].append(record)
return part_transformations
def AOR(tgt, num_states=20, transform_use_plucker=False):
tree = tgt["diffuse_tree"]
states = np.linspace(0, 1, num_states)
| sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
'''
AOR: Average Overlapping Ratio
- compute the vIoU between the sibling parts of the objects
'''
def transform_all_parts(bbox_vertices, obj_dict, joint_state, use_plucker=False, dry_run=True):
"""
Transform all parts of the object according to the joint state\n
- bbox_vertices: the bounding box vertices of the object in rest pose in the form: [[x0, y0, z0], [x1, y1, z1], ...]\n
- obj_dict: the object dictionary\n
- joint_state: the joint state in the range of [0, 1]\n
- use_plucker (optional): whether to use plucker coordinate to transform the parts\n
- dry_run (optional): if True, only return the transformation matrices without transforming the parts\n
Return:\n
- part_transformations: the transformation matrices used to transform the parts\n
"""
# Get a visit order of the parts such that children parts are visited before parents
part_visit_order = []
base_idx = get_base_part_idx(obj_dict)
indices_to_visit = [base_idx]
while len(indices_to_visit) > 0: # Breadth-first traversal
current_idx = indices_to_visit.pop(0)
part_visit_order.append(current_idx)
indices_to_visit += obj_dict["diffuse_tree"][current_idx]["children"]
part_visit_order.reverse()
part_transformations = [[] for _ in range(len(obj_dict["diffuse_tree"]))]
# Transform the parts in the visit order - children first, then parents
for i in part_visit_order:
part = obj_dict["diffuse_tree"][i]
joint = part["joint"]
children_idxs = part["children"]
# Store the transformation used to transform the part and its children
applied_tramsformation_matrix = np.eye(4)
applied_rotation_axis_origin = np.array([np.nan, np.nan, np.nan])
applied_transformation_type = "none"
if not use_plucker: # Direct translation and rotation
if joint["type"] == "prismatic":
# Translate the part and its children
translation = np.array(joint["axis"]["direction"]) * joint["range"][1] * joint_state
if not dry_run:
bbox_vertices[[i] + children_idxs] += translation
# Store the transformation used
applied_tramsformation_matrix[:3, 3] = translation
applied_transformation_type = "translation"
elif joint["type"] == "revolute" or joint["type"] == "continuous":
if joint["type"] == "revolute":
rotation_radian = np.radians(joint["range"][1] * joint_state)
else:
rotation_radian = np.radians(360 * joint_state)
# Prepare the rotation matrix via axis-angle representation and quaternion
rotation_axis_origin = np.array(joint["axis"]["origin"])
rotation_axis_direction = np.array(joint["axis"]["direction"]) / np.linalg.norm(joint["axis"]["direction"])
rotation_matrix = quaternion.as_rotation_matrix(quaternion.from_rotation_vector(rotation_radian * rotation_axis_direction))
if not dry_run:
# Rotate the part and its children
vertices_to_rotate = (bbox_vertices[[i] + children_idxs] - rotation_axis_origin)
bbox_vertices[[i] + children_idxs] = np.matmul(rotation_matrix, vertices_to_rotate.transpose([0, 2, 1])).transpose([0, 2, 1]) + rotation_axis_origin
# Store the transformation used
applied_tramsformation_matrix[:3, :3] = rotation_matrix
applied_rotation_axis_origin = rotation_axis_origin
applied_transformation_type = "rotation"
else: # Translation and rotation together using the plucker coordinate as in NAP
plucker_direction = np.array(joint["axis"]["plucker"])[:3]
plucker_moment = np.array(joint["axis"]["plucker"])[3:]
translation_distance = joint["raw_ranges"][0][1] * joint_state
rotation_radian = np.radians(joint["raw_ranges"][1][1] * joint_state)
# Prepare the transformation matrix via plucker coordinate using equation (1) in NAP
transformation_matrix = np.eye(4)
rotation_matrix = quaternion.as_rotation_matrix(quaternion.from_rotation_vector(rotation_radian * plucker_direction))
translation = (np.eye(3) - rotation_matrix) @ np.cross(plucker_direction, plucker_moment) + plucker_direction * translation_distance
transformation_matrix[:3, :3] = rotation_matrix
transformation_matrix[:3, 3] = translation
if not dry_run:
# Transform the part and its children via homogeneous coordinates
vertices_to_transform = np.concatenate([bbox_vertices[[i] + children_idxs], np.ones((len([i] + children_idxs), 8, 1))], axis=2)
bbox_vertices[[i] + children_idxs] = np.matmul(transformation_matrix, vertices_to_transform.transpose([0, 2, 1])).transpose([0, 2, 1])[:, :, :3]
# Store the transformation used
applied_tramsformation_matrix = transformation_matrix
applied_transformation_type = "plucker"
# Record the transformation used
if not applied_transformation_type == "none":
record = {
"type": applied_transformation_type,
"matrix": applied_tramsformation_matrix,
"rotation_axis_origin": applied_rotation_axis_origin
}
for idx in [i] + children_idxs:
part_transformations[idx].append(record)
return part_transformations
def AOR(tgt, num_states=20, transform_use_plucker=False):
tree = tgt["diffuse_tree"]
states = np.linspace(0, 1, num_states) | original_bbox_vertices = np.array([get_bbox_vertices(tgt, i) for i in range(len(tgt["diffuse_tree"]))]) | 0 | 2023-12-06 23:08:41+00:00 | 4k |
duxiaodan/intrinsic-lora | sd_single_diode_pseudo_normal.py | [
{
"identifier": "plot_normal_map",
"path": "diode/diode.py",
"snippet": "def plot_normal_map(normal_map):\n normal_viz = normal_map[:, ::, :]\n\n #Normalize normals\n normi = np.where(np.sum(normal_viz,axis=2)!=0.)\n zero_mask = np.equal(np.sum(normal_viz, 2, keepdims=True), 0.).astype(np.fl... | import argparse
import logging
import math
import os
import os.path as osp
import random
import shutil
import wandb
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms.functional as TF
import torch.utils.checkpoint
import transformers
import diffusers
import copy
import json
import datetime
import wandb
import xformers
import bitsandbytes as bnb
from pathlib import Path
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration, set_seed
from torch.utils.data import Dataset
from huggingface_hub import create_repo, upload_folder
from packaging import version
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel, DPMSolverMultistepScheduler
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import LoRAAttnProcessor
from diffusers.optimization import get_scheduler
from diffusers.utils import is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
from PIL import Image
from PIL.ImageOps import exif_transpose
from diode.diode import (
plot_normal_map,
check_and_tuplize_tokens,
enumerate_paths,
_VALID_SPLITS,
_VALID_SCENE_TYPES
) | 2,585 | ):
latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
latents = latents * vae.config.scaling_factor
bsz = latents.shape[0]
timesteps = torch.randint(max_timestep-1, max_timestep, (bsz,), device=latents.device)
timesteps = timesteps.long()
original_image_embeds = vae.encode(batch["original_pixel_values"].to(weight_dtype)).latent_dist.mode()
original_image_embeds = original_image_embeds * vae.config.scaling_factor
encoder_hidden_states = text_encoder(batch["input_ids"])[0]
model_pred = unet(original_image_embeds, timesteps, encoder_hidden_states).sample
images = vae.decode(model_pred / vae.config.scaling_factor, return_dict=False)[0]
return images
@torch.inference_mode()
def log_validation(
text_encoder,
tokenizer,
unet,
vae,
args,
accelerator,
test_batches,
train_batch,
weight_dtype,
epoch,
global_step
):
unwrapped_unet=accelerator.unwrap_model(unet)
unwrapped_text_encoder=accelerator.unwrap_model(text_encoder)
unwrapped_vae = accelerator.unwrap_model(vae)
max_timestep = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler").config.num_train_timesteps
val_test_images1 = []
val_test_images2 = []
val_train_images = []
test_batch1 = test_batches[0]
test_batch2 = test_batches[1]
with torch.cuda.amp.autocast():
images = val_inference_pipe(test_batch1, unwrapped_vae, unwrapped_unet, unwrapped_text_encoder,
weight_dtype,max_timestep
)
images = images.clamp(min=-1.,max=1,)
val_test_images1.extend([Image.fromarray(tensor2np(img)) for img in images])
images = val_inference_pipe(test_batch2, unwrapped_vae, unwrapped_unet, unwrapped_text_encoder,
weight_dtype,max_timestep
)
images = images.clamp(min=-1.,max=1,)
val_test_images2.extend([Image.fromarray(tensor2np(img)) for img in images])
images = val_inference_pipe(train_batch, unwrapped_vae, unwrapped_unet, unwrapped_text_encoder,
weight_dtype,max_timestep
)
images = images.clamp(min=-1.,max=1,)
val_train_images.extend([Image.fromarray(tensor2np(img)) for img in images])
concat_test_images1 = []
concat_test_images2 = []
concat_train_images = []
for gt, im_1, im_2, im_3 in zip(test_batch1['gt_values'],test_batch1['original_pixel_values'],test_batch1['pixel_values'],val_test_images1):
output_img = visualization_routine(gt,im_1,im_2,im_3)
concat_test_images1.append(output_img)
for gt, im_1, im_2, im_3 in zip(test_batch2['gt_values'],test_batch2['original_pixel_values'],test_batch2['pixel_values'],val_test_images2):
output_img = visualization_routine(gt,im_1,im_2,im_3)
concat_test_images2.append(output_img)
for gt, im_1, im_2, im_3 in zip(train_batch['gt_values'],train_batch['original_pixel_values'],train_batch['pixel_values'],val_train_images):
output_img = visualization_routine(gt,im_1,im_2,im_3)
concat_train_images.append(output_img)
for tracker in accelerator.trackers:
if tracker.name == "wandb":
tracker.log(
{
"validation: training images": [
wandb.Image(image, )
for i, image in enumerate(concat_train_images)
],
},
step=global_step
)
tracker.log(
{
"validation: test images 1": [
wandb.Image(image, )
for i, image in enumerate(concat_test_images1)
],
},
step=global_step
)
tracker.log(
{
"validation: test images 2": [
wandb.Image(image, )
for i, image in enumerate(concat_test_images2)
],
},
step=global_step
)
torch.cuda.empty_cache()
return
class PSEUDODataset(Dataset):
def __init__(
self,
data_root,
pseudo_root,
tokenizer,
splits,
scene_types,
size=512,
center_crop=True,
num_train_imgs=None,
tokenizer_max_length=None,
empty_prompt = False,
unified_prompt = None,
):
self.data_root = Path(data_root)
self.pseudo_root = Path(pseudo_root)
| # coding=utf-8
# Intrinsic-LoRA
"""Intrinsic-LoRA Single UNet model for surface normal training"""
logger = get_logger(__name__, log_level="INFO")
def save_model_card(repo_id: str, images=None, base_model=str, dataset_name=str, repo_folder=None):
img_str = ""
for i, image in enumerate(images):
image.save(os.path.join(repo_folder, f"image_{i}.png"))
img_str += f"\n"
yaml = f"""
---
license: creativeml-openrail-m
base_model: {base_model}
tags:
- stable-diffusion
- stable-diffusion-diffusers
- text-to-image
- diffusers
- lora
inference: true
---
"""
model_card = f"""
# LoRA text2image fine-tuning - {repo_id}
These are LoRA adaption weights for {base_model}. The weights were fine-tuned on the {dataset_name} dataset. You can find some example images in the following. \n
{img_str}
"""
with open(os.path.join(repo_folder, "README.md"), "w") as f:
f.write(yaml + model_card)
def tokenize_prompt(tokenizer, prompt, tokenizer_max_length=None):
if tokenizer_max_length is not None:
max_length = tokenizer_max_length
else:
max_length = tokenizer.model_max_length
text_inputs = tokenizer(
prompt,
truncation=True,
padding="max_length",
max_length=max_length,
return_tensors="pt",
)
return text_inputs
def tensor2np(tensor):
return (255*(tensor.cpu().permute(1,2,0).numpy()*0.5+0.5)).astype(np.uint8)
def listPILToTensor(listPILs):
size = listPILs[0].size[0]
image_transforms = transforms.Compose(
[
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(size),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
return torch.stack([image_transforms(p) for p in listPILs])
def visualization_routine(gt,im_1,im_2,im_3):
gt = tensor2np(gt)
im_1 = tensor2np(im_1)
im_2 = tensor2np(im_2)
im_3 = np.array(im_3)
return Image.fromarray(np.hstack((im_1,gt,im_2,im_3)))
@torch.inference_mode()
def val_inference_pipe(
batch,
vae,
unet,
text_encoder,
weight_dtype,
max_timestep
):
latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
latents = latents * vae.config.scaling_factor
bsz = latents.shape[0]
timesteps = torch.randint(max_timestep-1, max_timestep, (bsz,), device=latents.device)
timesteps = timesteps.long()
original_image_embeds = vae.encode(batch["original_pixel_values"].to(weight_dtype)).latent_dist.mode()
original_image_embeds = original_image_embeds * vae.config.scaling_factor
encoder_hidden_states = text_encoder(batch["input_ids"])[0]
model_pred = unet(original_image_embeds, timesteps, encoder_hidden_states).sample
images = vae.decode(model_pred / vae.config.scaling_factor, return_dict=False)[0]
return images
@torch.inference_mode()
def log_validation(
text_encoder,
tokenizer,
unet,
vae,
args,
accelerator,
test_batches,
train_batch,
weight_dtype,
epoch,
global_step
):
unwrapped_unet=accelerator.unwrap_model(unet)
unwrapped_text_encoder=accelerator.unwrap_model(text_encoder)
unwrapped_vae = accelerator.unwrap_model(vae)
max_timestep = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler").config.num_train_timesteps
val_test_images1 = []
val_test_images2 = []
val_train_images = []
test_batch1 = test_batches[0]
test_batch2 = test_batches[1]
with torch.cuda.amp.autocast():
images = val_inference_pipe(test_batch1, unwrapped_vae, unwrapped_unet, unwrapped_text_encoder,
weight_dtype,max_timestep
)
images = images.clamp(min=-1.,max=1,)
val_test_images1.extend([Image.fromarray(tensor2np(img)) for img in images])
images = val_inference_pipe(test_batch2, unwrapped_vae, unwrapped_unet, unwrapped_text_encoder,
weight_dtype,max_timestep
)
images = images.clamp(min=-1.,max=1,)
val_test_images2.extend([Image.fromarray(tensor2np(img)) for img in images])
images = val_inference_pipe(train_batch, unwrapped_vae, unwrapped_unet, unwrapped_text_encoder,
weight_dtype,max_timestep
)
images = images.clamp(min=-1.,max=1,)
val_train_images.extend([Image.fromarray(tensor2np(img)) for img in images])
concat_test_images1 = []
concat_test_images2 = []
concat_train_images = []
for gt, im_1, im_2, im_3 in zip(test_batch1['gt_values'],test_batch1['original_pixel_values'],test_batch1['pixel_values'],val_test_images1):
output_img = visualization_routine(gt,im_1,im_2,im_3)
concat_test_images1.append(output_img)
for gt, im_1, im_2, im_3 in zip(test_batch2['gt_values'],test_batch2['original_pixel_values'],test_batch2['pixel_values'],val_test_images2):
output_img = visualization_routine(gt,im_1,im_2,im_3)
concat_test_images2.append(output_img)
for gt, im_1, im_2, im_3 in zip(train_batch['gt_values'],train_batch['original_pixel_values'],train_batch['pixel_values'],val_train_images):
output_img = visualization_routine(gt,im_1,im_2,im_3)
concat_train_images.append(output_img)
for tracker in accelerator.trackers:
if tracker.name == "wandb":
tracker.log(
{
"validation: training images": [
wandb.Image(image, )
for i, image in enumerate(concat_train_images)
],
},
step=global_step
)
tracker.log(
{
"validation: test images 1": [
wandb.Image(image, )
for i, image in enumerate(concat_test_images1)
],
},
step=global_step
)
tracker.log(
{
"validation: test images 2": [
wandb.Image(image, )
for i, image in enumerate(concat_test_images2)
],
},
step=global_step
)
torch.cuda.empty_cache()
return
class PSEUDODataset(Dataset):
def __init__(
self,
data_root,
pseudo_root,
tokenizer,
splits,
scene_types,
size=512,
center_crop=True,
num_train_imgs=None,
tokenizer_max_length=None,
empty_prompt = False,
unified_prompt = None,
):
self.data_root = Path(data_root)
self.pseudo_root = Path(pseudo_root) | self.splits = check_and_tuplize_tokens( | 1 | 2023-12-08 16:34:44+00:00 | 4k |
modelscope/llmuses | llmuses/benchmarks/general_qa/general_qa_adapter.py | [
{
"identifier": "DataAdapter",
"path": "llmuses/benchmarks/data_adapter.py",
"snippet": "class DataAdapter(ABC):\n\n def __init__(self,\n subset_list: list,\n metric_list: list,\n few_shot_num: Optional[int] = 0,\n train_split: Optional[... | from llmuses.benchmarks.data_adapter import DataAdapter
from llmuses.metrics.metrics import bleu_ngram_one_sample, weighted_mean
from llmuses.metrics.rouge_metric import compute_rouge_score_one_sample_zh
from llmuses.utils.logger import get_logger
from typing import Any, Optional
from collections import defaultdict
import json | 3,162 | # Copyright (c) Alibaba, Inc. and its affiliates.
logger = get_logger()
DATASET_ID = 'general_qa'
SUBSET_LIST = ['default']
class GeneralQAAdapter(DataAdapter):
def __init__(self,
subset_list: list = None,
metric_list: list = None,
train_split: str = 'train',
eval_split: str = 'test',
**kwargs):
if subset_list is None:
subset_list = SUBSET_LIST
if metric_list is None:
| # Copyright (c) Alibaba, Inc. and its affiliates.
logger = get_logger()
DATASET_ID = 'general_qa'
SUBSET_LIST = ['default']
class GeneralQAAdapter(DataAdapter):
def __init__(self,
subset_list: list = None,
metric_list: list = None,
train_split: str = 'train',
eval_split: str = 'test',
**kwargs):
if subset_list is None:
subset_list = SUBSET_LIST
if metric_list is None: | metric_list = [{'name': 'WeightedAverageBLEU', 'object': weighted_mean}] | 2 | 2023-12-07 06:10:49+00:00 | 4k |
AsuradaYuci/TF-CLIP | model/make_model_clipreid.py | [
{
"identifier": "SimpleTokenizer",
"path": "model/clip/simple_tokenizer.py",
"snippet": "class SimpleTokenizer(object):\n def __init__(self, bpe_path: str = default_bpe()):\n self.byte_encoder = bytes_to_unicode()\n self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}\n ... | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from collections import OrderedDict
from .clip.simple_tokenizer import SimpleTokenizer as _Tokenizer
from .clip.model import QuickGELU, LayerNorm
from .Visual_Prompt import visual_prompt
from .clip import clip | 3,090 | _tokenizer = _Tokenizer()
# from .TAT import TemporalAttentionTransformer
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
def load_clip_to_cpu(backbone_name, h_resolution, w_resolution, vision_stride_size):
| _tokenizer = _Tokenizer()
# from .TAT import TemporalAttentionTransformer
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
def load_clip_to_cpu(backbone_name, h_resolution, w_resolution, vision_stride_size): | url = clip._MODELS[backbone_name] | 4 | 2023-12-11 04:03:46+00:00 | 4k |
MarilynKeller/aitviewer-skel | aitviewer/remote/viewer.py | [
{
"identifier": "Message",
"path": "aitviewer/remote/message.py",
"snippet": "class Message(enum.Enum):\n \"\"\"Enumeration for the type of message.\"\"\"\n\n # Messages used to create nodes on the remote viewer.\n NODE = 1\n MESHES = 2\n SPHERES = 3\n LINES = 4\n ARROWS = 5\n RI... | import asyncio
import pickle
import queue
import subprocess
import threading
import websockets
from typing import Callable
from .message import Message, make_message | 1,709 | v = cls(**kwargs)
v.process = process
return v
def _entry(self, url):
# Entry point of the client thread.
asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(self._async_entry(url))
async def _async_entry(self, url):
# Async entry point of the client thread.
# Attempt to connect until 'self.timeout' seconds passed.
start_time = self.loop.time()
try:
while self.loop.time() < start_time + self.timeout:
try:
self.websocket = await websockets.connect(url, max_size=None)
self.connected = True
break
except Exception as e:
pass
finally:
# Release the semaphore to let the main thread continue after
# attempting to connect. The main thread will read the
# self.connected variable to know if we succeded at connecting.
self.semaphore.release()
# Exit the client thread if we failed to connect.
if not self.connected:
return
# Create a queue for incoming messages to the main thread.
self.recv_queue = queue.Queue()
# Message loop.
try:
# This loop is exited whenever the connection is dropped
# which causes and exception to be raised.
async for message in self.websocket:
data = pickle.loads(message)
# Equeue data for the main thread to process.
self.recv_queue.put_nowait(data)
except Exception as e:
print(f"Message loop exception: {e}")
# Mark the connection as closed.
self.connected = False
def get_message(self, block=True):
"""
Returns the next message received by the remote viewer.
:param block: if True this function blocks until a message is received, otherwise it returns immediately.
:return: if block is True returns the next message or None if the connection has been closed.
if block is False returns the next message or None if there are no messages.
"""
if self.connected:
if block:
while self.connected:
try:
return self.recv_queue.get(timeout=0.1)
except queue.Empty:
pass
else:
if not self.recv_queue.empty():
return self.recv_queue.get_nowait()
return None
def process_messages(self, handler: Callable[["RemoteViewer", object], None], block=True):
"""
Processes messages in a loop calling 'handler' for each message.
:param block: if True this function blocks until the connection is closed, otherwise it returns
after all messages received so far have been processed.
:return: if block is True always returns False when the connection has been closed.
if block is False returns True if the connection is still open or False if the connection
has been closed.
"""
while True:
msg = self.get_message(block)
if msg is None:
if block:
return False
else:
return self.connected
handler(self, msg)
async def _async_send(self, data):
await self.websocket.send(data)
def send(self, data):
try:
if self.connected:
# Send a message by adding a send coroutine to the thread's loop and wait for it to complete.
asyncio.run_coroutine_threadsafe(self._async_send(data), self.loop).result()
except Exception as e:
print(f"Send exception: {e}")
def send_message(self, type, uid=None, *args, **kwargs):
"""
Send a message to the viewer. See Viewer.process_message()
for information about how these parameters are interpreted
by the viewer.
"""
msg = make_message(type, uid, args, kwargs)
data = pickle.dumps(msg)
self.send(data)
def set_frame(self, frame: int):
"""
Set the current active frame of the remote viewer.
:param frame: an integer representing the id of the frame.
"""
| # Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos
class RemoteViewer:
def __init__(self, host="localhost", port=8417, timeout=10, verbose=True):
"""
Initializer.
:param host: the IP address of a host to connect to as a string.
:param port: the TCP port to connect to.
:param timeout: a timeout in seconds for attempting to connect to the viewer.
:param verbose: if True print info messages.
"""
url = f"ws://{host}:{port}"
if verbose:
print(f"Connecting to remote viewer at {url}")
self.timeout = timeout
self.connected = False
# Semaphore used to wait for the connection to be setup by the client thread.
self.semaphore = threading.Semaphore(0)
# Create a thread for running the websocket client async loop.
self.loop = asyncio.new_event_loop()
self.thread = threading.Thread(target=self._entry, args=(url,), daemon=True)
self.thread.start()
# Wait for the connection to be setup.
self.semaphore.acquire()
if verbose:
if self.connected:
print("Connected")
else:
print(f"Failed to connect")
self.process: subprocess.Popen = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close_connection()
@classmethod
def create_new_process(cls, args=None, **kwargs):
"""
Open a Viewer in a new process and return a RemoteViewer connected to it.
:param args: This parameter can be used to specify an argument or
a list of arguments that is used to create the new process.
e.g: args = ["path/to/script.py", "arg1", "arg2"] will invoke the following command:
python path/to/script.py arg1 arg2
"""
# If host is None create a new viewer in a separate process.
if args is None:
popen_args = ["python", "-m", "aitviewer.server"]
else:
if isinstance(args, list):
popen_args = ["python"] + args
else:
popen_args = ["python", str(args)]
# Create the viewer process.
process = subprocess.Popen(
popen_args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
# Create a remote viewer connected to the child process.
v = cls(**kwargs)
v.process = process
return v
def _entry(self, url):
# Entry point of the client thread.
asyncio.set_event_loop(self.loop)
self.loop.run_until_complete(self._async_entry(url))
async def _async_entry(self, url):
# Async entry point of the client thread.
# Attempt to connect until 'self.timeout' seconds passed.
start_time = self.loop.time()
try:
while self.loop.time() < start_time + self.timeout:
try:
self.websocket = await websockets.connect(url, max_size=None)
self.connected = True
break
except Exception as e:
pass
finally:
# Release the semaphore to let the main thread continue after
# attempting to connect. The main thread will read the
# self.connected variable to know if we succeded at connecting.
self.semaphore.release()
# Exit the client thread if we failed to connect.
if not self.connected:
return
# Create a queue for incoming messages to the main thread.
self.recv_queue = queue.Queue()
# Message loop.
try:
# This loop is exited whenever the connection is dropped
# which causes and exception to be raised.
async for message in self.websocket:
data = pickle.loads(message)
# Equeue data for the main thread to process.
self.recv_queue.put_nowait(data)
except Exception as e:
print(f"Message loop exception: {e}")
# Mark the connection as closed.
self.connected = False
def get_message(self, block=True):
"""
Returns the next message received by the remote viewer.
:param block: if True this function blocks until a message is received, otherwise it returns immediately.
:return: if block is True returns the next message or None if the connection has been closed.
if block is False returns the next message or None if there are no messages.
"""
if self.connected:
if block:
while self.connected:
try:
return self.recv_queue.get(timeout=0.1)
except queue.Empty:
pass
else:
if not self.recv_queue.empty():
return self.recv_queue.get_nowait()
return None
def process_messages(self, handler: Callable[["RemoteViewer", object], None], block=True):
"""
Processes messages in a loop calling 'handler' for each message.
:param block: if True this function blocks until the connection is closed, otherwise it returns
after all messages received so far have been processed.
:return: if block is True always returns False when the connection has been closed.
if block is False returns True if the connection is still open or False if the connection
has been closed.
"""
while True:
msg = self.get_message(block)
if msg is None:
if block:
return False
else:
return self.connected
handler(self, msg)
async def _async_send(self, data):
await self.websocket.send(data)
def send(self, data):
try:
if self.connected:
# Send a message by adding a send coroutine to the thread's loop and wait for it to complete.
asyncio.run_coroutine_threadsafe(self._async_send(data), self.loop).result()
except Exception as e:
print(f"Send exception: {e}")
def send_message(self, type, uid=None, *args, **kwargs):
"""
Send a message to the viewer. See Viewer.process_message()
for information about how these parameters are interpreted
by the viewer.
"""
msg = make_message(type, uid, args, kwargs)
data = pickle.dumps(msg)
self.send(data)
def set_frame(self, frame: int):
"""
Set the current active frame of the remote viewer.
:param frame: an integer representing the id of the frame.
""" | self.send_message(Message.SET_FRAME, None, frame) | 0 | 2023-12-07 16:13:50+00:00 | 4k |
nexB/dejacode | organization/urls.py | [
{
"identifier": "DataspacedCreateView",
"path": "dje/views.py",
"snippet": "class DataspacedCreateView(\n LoginRequiredMixin,\n PermissionRequiredMixin,\n SuccessMessageMixin,\n DataspacedModelFormMixin,\n CreateView,\n):\n template_name = \"object_form.html\"\n\n def get_success_me... | from django.urls import path
from dje.views import DataspacedCreateView
from dje.views import DataspacedDeleteView
from dje.views import DataspacedUpdateView
from organization.forms import OwnerForm
from organization.models import Owner
from organization.views import OwnerDetailsView
from organization.views import OwnerListView | 3,019 | #
# Copyright (c) nexB Inc. and others. All rights reserved.
# DejaCode is a trademark of nexB Inc.
# SPDX-License-Identifier: AGPL-3.0-only
# See https://github.com/nexB/dejacode for support or download.
# See https://aboutcode.org for more information about AboutCode FOSS projects.
#
urlpatterns = [
path(
"<str:dataspace>/<str:name>/change/",
DataspacedUpdateView.as_view(
model=Owner,
form_class=OwnerForm,
slug_url_kwarg="name",
permission_required="organization.change_owner",
),
name="owner_change",
),
path(
"<str:dataspace>/<str:name>/delete/",
DataspacedDeleteView.as_view(
model=Owner,
slug_url_kwarg="name",
permission_required="organization.delete_owner",
),
name="owner_delete",
),
path(
"<str:dataspace>/<str:name>/",
OwnerDetailsView.as_view(),
name="owner_details",
),
path(
"add/",
| #
# Copyright (c) nexB Inc. and others. All rights reserved.
# DejaCode is a trademark of nexB Inc.
# SPDX-License-Identifier: AGPL-3.0-only
# See https://github.com/nexB/dejacode for support or download.
# See https://aboutcode.org for more information about AboutCode FOSS projects.
#
urlpatterns = [
path(
"<str:dataspace>/<str:name>/change/",
DataspacedUpdateView.as_view(
model=Owner,
form_class=OwnerForm,
slug_url_kwarg="name",
permission_required="organization.change_owner",
),
name="owner_change",
),
path(
"<str:dataspace>/<str:name>/delete/",
DataspacedDeleteView.as_view(
model=Owner,
slug_url_kwarg="name",
permission_required="organization.delete_owner",
),
name="owner_delete",
),
path(
"<str:dataspace>/<str:name>/",
OwnerDetailsView.as_view(),
name="owner_details",
),
path(
"add/", | DataspacedCreateView.as_view( | 0 | 2023-12-07 16:57:42+00:00 | 4k |
kylemcdonald/i2i-realtime | worker_app.py | [
{
"identifier": "Settings",
"path": "settings.py",
"snippet": "class Settings(BaseSettings):\n # config, cannot be changed\n mode: str = Field(default=\"video\")\n worker_id: int = Field(default=0)\n \n output_fast: bool = Field(default=True)\n zmq_video_port: int = Field(default=5554)... | from settings import Settings
from turbojpeg import TurboJPEG, TJPF_RGB
from threaded_worker import ThreadedWorker
from diffusion_processor import DiffusionProcessor
import zmq
import msgpack
import numpy as np
import time | 2,521 |
settings = Settings()
print(f"Starting worker #{settings.worker_id}")
class WorkerReceiver(ThreadedWorker):
def __init__(self, hostname, port):
super().__init__(has_input=False)
self.context = zmq.Context()
self.sock = self.context.socket(zmq.PULL)
self.sock.setsockopt(zmq.RCVTIMEO, 100)
self.sock.setsockopt(zmq.RCVHWM, 1)
self.sock.setsockopt(zmq.LINGER, 0)
address = f"tcp://{hostname}:{port}"
print(f"WorkerReceiver connecting to {address}")
self.sock.connect(address)
self.jpeg = TurboJPEG()
def work(self):
while not self.should_exit:
try:
msg = self.sock.recv(flags=zmq.NOBLOCK, copy=False).bytes
receive_time = time.time()
# print(int(time.time()*1000)%1000, "receiving")
except zmq.Again:
continue
try:
unpacked = msgpack.unpackb(msg)
parameters = unpacked["parameters"]
images = []
for frame in unpacked["frames"]:
img = self.jpeg.decode(frame, pixel_format=TJPF_RGB)
images.append(img / 255)
unpacked["frames"] = images
return unpacked
except OSError:
continue
def cleanup(self):
self.sock.close()
self.context.term()
class Processor(ThreadedWorker):
def __init__(self, settings):
super().__init__()
self.generator = None
self.batch_count = 0
warmup = None
if settings.warmup:
warmup = f"{settings.batch_size}x{settings.warmup}"
|
settings = Settings()
print(f"Starting worker #{settings.worker_id}")
class WorkerReceiver(ThreadedWorker):
def __init__(self, hostname, port):
super().__init__(has_input=False)
self.context = zmq.Context()
self.sock = self.context.socket(zmq.PULL)
self.sock.setsockopt(zmq.RCVTIMEO, 100)
self.sock.setsockopt(zmq.RCVHWM, 1)
self.sock.setsockopt(zmq.LINGER, 0)
address = f"tcp://{hostname}:{port}"
print(f"WorkerReceiver connecting to {address}")
self.sock.connect(address)
self.jpeg = TurboJPEG()
def work(self):
while not self.should_exit:
try:
msg = self.sock.recv(flags=zmq.NOBLOCK, copy=False).bytes
receive_time = time.time()
# print(int(time.time()*1000)%1000, "receiving")
except zmq.Again:
continue
try:
unpacked = msgpack.unpackb(msg)
parameters = unpacked["parameters"]
images = []
for frame in unpacked["frames"]:
img = self.jpeg.decode(frame, pixel_format=TJPF_RGB)
images.append(img / 255)
unpacked["frames"] = images
return unpacked
except OSError:
continue
def cleanup(self):
self.sock.close()
self.context.term()
class Processor(ThreadedWorker):
def __init__(self, settings):
super().__init__()
self.generator = None
self.batch_count = 0
warmup = None
if settings.warmup:
warmup = f"{settings.batch_size}x{settings.warmup}" | self.processor = DiffusionProcessor(warmup, settings.local_files_only) | 2 | 2023-12-05 12:32:28+00:00 | 4k |
sinantan/jsonpyd | src/jsonpyd/cli.py | [
{
"identifier": "JsonPyd",
"path": "src/jsonpyd/jsonpyd.py",
"snippet": "class JsonPyd:\n def __init__(self, schema, options={}):\n assert self.valid_json(schema), \"Schema should be String JSON format.\"\n\n self.schema = json.loads(schema)\n self.options: Options = Options(**op... | from .jsonpyd import JsonPyd
from .util import FileHandler
from argparse import ArgumentParser
from datetime import date | 1,797 |
class CLI:
def __init__(self):
self.args = self.parse_arguments()
def parse_arguments(self):
parser = ArgumentParser(description="JsonPyd command line arguments")
parser.add_argument(
"schema_path", type=str, help="Path of the referenced schema file."
)
parser.add_argument(
"--apply_snake_case",
type=bool,
default=True,
help="Apply snake_case to variables.",
)
parser.add_argument(
"--force_optional",
type=bool,
default=False,
help="Make variables optional by default.",
)
parser.add_argument(
"--file_name",
type=str,
default=f'{date.today().strftime("%d-%m-%Y")}_schema',
help="Name of the output file.",
)
return parser.parse_args()
def run(self):
|
class CLI:
def __init__(self):
self.args = self.parse_arguments()
def parse_arguments(self):
parser = ArgumentParser(description="JsonPyd command line arguments")
parser.add_argument(
"schema_path", type=str, help="Path of the referenced schema file."
)
parser.add_argument(
"--apply_snake_case",
type=bool,
default=True,
help="Apply snake_case to variables.",
)
parser.add_argument(
"--force_optional",
type=bool,
default=False,
help="Make variables optional by default.",
)
parser.add_argument(
"--file_name",
type=str,
default=f'{date.today().strftime("%d-%m-%Y")}_schema',
help="Name of the output file.",
)
return parser.parse_args()
def run(self): | schema = FileHandler.read_file(path=self.args.schema_path) | 1 | 2023-12-12 18:11:16+00:00 | 4k |
wusize/CLIM | ovdet/ovdet/models/vlms/clip/clip.py | [
{
"identifier": "build_model",
"path": "ovdet/ovdet/models/vlms/clip/openai_model.py",
"snippet": "def build_model(state_dict, state_file, use_image_encoder, use_text_encoder=True, **kwargs):\n vit = \"visual.proj\" in state_dict\n\n if vit:\n vision_width = state_dict[\"visual.conv1.weight... | import hashlib
import os
import urllib
import warnings
import torch
from typing import Any, Union, List
from pkg_resources import packaging
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .openai_model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
from torchvision.transforms import InterpolationMode | 1,659 |
try:
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize", "tokenize_dynamic", "get_only_word_tokens"]
|
try:
BICUBIC = InterpolationMode.BICUBIC
except ImportError:
BICUBIC = Image.BICUBIC
if packaging.version.parse(torch.__version__) < packaging.version.parse("1.7.1"):
warnings.warn("PyTorch version 1.7.1 or higher is recommended")
__all__ = ["available_models", "load", "tokenize", "tokenize_dynamic", "get_only_word_tokens"] | _tokenizer = _Tokenizer() | 0 | 2023-12-09 05:43:08+00:00 | 4k |
eezkni/ColNeRF | src/model/models.py | [
{
"identifier": "ImageEncoder",
"path": "src/model/encoder.py",
"snippet": "class ImageEncoder(nn.Module):\n \"\"\"\n Global image encoder\n \"\"\"\n\n def __init__(self, backbone=\"resnet34\", pretrained=True, latent_size=128):\n \"\"\"\n :param backbone Backbone network. Assu... | import torch
import torch.autograd.profiler as profiler
import os
import os.path as osp
import warnings
from .encoder import ImageEncoder
from .code import PositionalEncoding
from .model_util import make_encoder, make_mlp
from .InterviewAttention import InterviewFusion
from util import repeat_interleave
from cmath import isnan
from shutil import copyfile
from shutil import copyfile | 1,835 | """
Main model implementation
"""
class ColNeRFNet(torch.nn.Module):
def __init__(self, conf, nviews, data_format=None, stop_encoder_grad=False):
"""
:param conf PyHocon config subtree 'model'
"""
super().__init__()
| """
Main model implementation
"""
class ColNeRFNet(torch.nn.Module):
def __init__(self, conf, nviews, data_format=None, stop_encoder_grad=False):
"""
:param conf PyHocon config subtree 'model'
"""
super().__init__() | self.encoder = make_encoder(conf["encoder"]) | 2 | 2023-12-12 13:06:50+00:00 | 4k |
ku-dmlab/PORelDICE | train_offline.py | [
{
"identifier": "D4RLDataset",
"path": "dataset_utils.py",
"snippet": "class D4RLDataset(Dataset):\n def __init__(\n self,\n env: gym.Env,\n add_env: gym.Env = \"None\",\n expert_ratio: float = 1.0,\n clip_to_eps: bool = True,\n heavy_tail: bool = False,\n ... | from pathlib import Path
from typing import Tuple
from absl import app, flags
from ml_collections import config_flags
from dataset_utils import D4RLDataset, Log, split_into_trajectories
from evaluation import evaluate
from learner import Learner
import gym
import numpy as np
import tqdm
import wandb
import wrappers | 3,131 |
FLAGS = flags.FLAGS
flags.DEFINE_string("env_name", "halfcheetah-expert-v2", "Environment name.")
flags.DEFINE_string("save_dir", "./results/", "Tensorboard logging dir.")
flags.DEFINE_integer("seed", 42, "Random seed.")
flags.DEFINE_integer("eval_episodes", 5, "Number of episodes used for evaluation.")
flags.DEFINE_integer("log_interval", 1000, "Logging interval.")
flags.DEFINE_integer("eval_interval", 10000, "Eval interval.")
flags.DEFINE_integer("batch_size", 256, "Mini batch size.")
flags.DEFINE_integer("max_steps", int(1e6), "Number of training steps.")
flags.DEFINE_string("mix_dataset", "None", "mix the dataset")
flags.DEFINE_boolean("tqdm", True, "Use tqdm progress bar.")
flags.DEFINE_string("alg", "PORelDICE", "the training algorithm")
flags.DEFINE_float("alpha", 1.0, "temperature")
flags.DEFINE_float("epsilon", -1.0, "epsilon")
config_flags.DEFINE_config_file(
"config",
"default.py",
"File path to the training hyperparameter configuration.",
lock_config=False,
)
def normalize(dataset):
trajs = split_into_trajectories(
dataset.observations,
dataset.actions,
dataset.rewards,
dataset.masks,
dataset.dones_float,
dataset.next_observations,
)
def compute_returns(traj):
episode_return = 0
for _, _, rew, _, _, _ in traj:
episode_return += rew
return episode_return
trajs.sort(key=compute_returns)
dataset.rewards /= compute_returns(trajs[-1]) - compute_returns(trajs[0])
dataset.rewards *= 1000.0
def make_env_and_dataset(env_name: str, seed: int) -> Tuple[gym.Env, D4RLDataset]:
env = gym.make(env_name)
env = wrappers.EpisodeMonitor(env)
env = wrappers.SinglePrecision(env)
env.seed(seed=seed)
env.action_space.seed(seed)
env.observation_space.seed(seed)
dataset = D4RLDataset(env)
if "antmaze" in FLAGS.env_name:
dataset.rewards -= 1.0
elif (
"halfcheetah" in FLAGS.env_name
or "walker2d" in FLAGS.env_name
or "hopper" in FLAGS.env_name
):
# pass
normalize(dataset)
return env, dataset
def main(_):
env, dataset = make_env_and_dataset(FLAGS.env_name, FLAGS.seed)
kwargs = dict(FLAGS.config)
kwargs["alpha"] = FLAGS.alpha
kwargs["alg"] = FLAGS.alg
kwargs["epsilon"] = FLAGS.epsilon
|
FLAGS = flags.FLAGS
flags.DEFINE_string("env_name", "halfcheetah-expert-v2", "Environment name.")
flags.DEFINE_string("save_dir", "./results/", "Tensorboard logging dir.")
flags.DEFINE_integer("seed", 42, "Random seed.")
flags.DEFINE_integer("eval_episodes", 5, "Number of episodes used for evaluation.")
flags.DEFINE_integer("log_interval", 1000, "Logging interval.")
flags.DEFINE_integer("eval_interval", 10000, "Eval interval.")
flags.DEFINE_integer("batch_size", 256, "Mini batch size.")
flags.DEFINE_integer("max_steps", int(1e6), "Number of training steps.")
flags.DEFINE_string("mix_dataset", "None", "mix the dataset")
flags.DEFINE_boolean("tqdm", True, "Use tqdm progress bar.")
flags.DEFINE_string("alg", "PORelDICE", "the training algorithm")
flags.DEFINE_float("alpha", 1.0, "temperature")
flags.DEFINE_float("epsilon", -1.0, "epsilon")
config_flags.DEFINE_config_file(
"config",
"default.py",
"File path to the training hyperparameter configuration.",
lock_config=False,
)
def normalize(dataset):
trajs = split_into_trajectories(
dataset.observations,
dataset.actions,
dataset.rewards,
dataset.masks,
dataset.dones_float,
dataset.next_observations,
)
def compute_returns(traj):
episode_return = 0
for _, _, rew, _, _, _ in traj:
episode_return += rew
return episode_return
trajs.sort(key=compute_returns)
dataset.rewards /= compute_returns(trajs[-1]) - compute_returns(trajs[0])
dataset.rewards *= 1000.0
def make_env_and_dataset(env_name: str, seed: int) -> Tuple[gym.Env, D4RLDataset]:
env = gym.make(env_name)
env = wrappers.EpisodeMonitor(env)
env = wrappers.SinglePrecision(env)
env.seed(seed=seed)
env.action_space.seed(seed)
env.observation_space.seed(seed)
dataset = D4RLDataset(env)
if "antmaze" in FLAGS.env_name:
dataset.rewards -= 1.0
elif (
"halfcheetah" in FLAGS.env_name
or "walker2d" in FLAGS.env_name
or "hopper" in FLAGS.env_name
):
# pass
normalize(dataset)
return env, dataset
def main(_):
env, dataset = make_env_and_dataset(FLAGS.env_name, FLAGS.seed)
kwargs = dict(FLAGS.config)
kwargs["alpha"] = FLAGS.alpha
kwargs["alg"] = FLAGS.alg
kwargs["epsilon"] = FLAGS.epsilon | agent = Learner( | 4 | 2023-12-11 07:47:22+00:00 | 4k |
Anashel-RPG/echoai | job_manager.py | [
{
"identifier": "download_image",
"path": "image_downloader.py",
"snippet": "def download_image(image_url, local_path, job_id, prompt, additional_metadata):\r\n logging.info(f\"Initiating download: URL {image_url}, Local Path {local_path}, Job ID {job_id}, Prompt {prompt[:30]}...\")\r\n\r\n try:\r... | import threading
import time
import os
import json
import requests
import logging
from queue import Queue, Empty
from datetime import datetime
from image_downloader import download_image
from config import MAX_CONCURRENT_JOBS, RATE_LIMIT_DELAY, API_BASE_URL, HEADERS, API_CALL_DELAY
from job_data_store import get_job_data, store_job_data
| 3,015 | download_image(image_url, local_path, job_id, job_data['prompt'], additional_metadata)
print(f"NOW SHOWING: {job_data}")
except Exception as e:
logging.error(f"Error downloading content for job ID {job_id}: {e}")
queue_processor = Queue()
class Job:
def __init__(self, data):
self.data = data
self.status = 'pending'
self.id = None
self.start_time = datetime.now()
self.last_checked = None
self.check_count = 0
self.previous_status = None
self.last_log_time = None
def start(self):
self.id = API.start_job(self.data)
self.start_time = datetime.now()
if self.id:
store_job_data(self.id, self.data['prompt'])
self.status = 'processing'
else:
# Retry once if the job fails to start
logging.info("== WARNING RETRY ==")
logging.info(self.data['prompt'])
time.sleep(5) # Use the configurable delay from config.py
self.id = API.start_job(self.data)
if self.id:
store_job_data(self.id, self.data['prompt']) # Store job data in the job_data_store on successful retry
self.status = 'processing'
else:
self.status = 'failed'
logging.info("== RETRY FAILED ==")
self.last_checked = datetime.now()
def should_log(self):
"""Determines if the current status should be logged."""
current_time = datetime.now()
if self.previous_status != self.status or (
self.last_log_time is None or (current_time - self.last_log_time).total_seconds() > 10):
self.last_log_time = current_time
return True
return False
def check_status(self):
if self.id is None: # Skip processing if job ID is None
logging.error(f"== SKIPPING ID NONE ==")
self.status = 'failed'
return
current_time = datetime.now()
# Initial delay of 10 seconds before the first check
if self.last_checked is None:
if (current_time - self.start_time).total_seconds() < 10:
if self.should_log():
logging.info(f"Initial delay in progress for job ID {self.id}.")
threading.Timer(1, lambda: queue_processor.put(self)).start()
return
self.last_checked = current_time
# Check job status at one-second intervals after the initial delay
if (current_time - self.last_checked).total_seconds() >= 1:
self.last_checked = current_time
self.previous_status = self.status
self.status = API.check_job_status(self.id)
if self.should_log():
logging.info(f"Checked status for job ID {self.id}: {self.status}")
if self.status == 'COMPLETE':
self.status = 'completed'
if self.should_log():
logging.info(f"Job ID {self.id} completed, downloading content.")
API.download_job_content(self.id)
elif (current_time - self.start_time).total_seconds() > 10000000:
self.status = 'failed'
if self.should_log():
logging.error(f"Job ID {self.id} failed due to timeout.")
else:
threading.Timer(1, lambda: queue_processor.put(self)).start()
else:
threading.Timer(1, lambda: queue_processor.put(self)).start()
class JobManager:
def __init__(self):
self.jobs = []
self.active_jobs = 0
self.lock = threading.Lock()
self.empty_queue_count = 0 # Counter for empty queue checks
def run_job(self, job_payloads):
with self.lock:
for payload in job_payloads:
if self.active_jobs < MAX_CONCURRENT_JOBS:
job = Job(payload)
self.jobs.append(job)
job.start()
self.active_jobs += 1
queue_processor.put(job)
logging.info(f"Job {job.id} started.")
else:
self.jobs.append(Job(payload))
logging.info("Maximum concurrent jobs reached, job added to queue.")
def process_queue(self):
while True:
all_jobs_done = len(self.jobs) == 0 and self.active_jobs == 0
if all_jobs_done:
logging.info("All jobs have been processed. Exiting.")
break
try:
| # job_manager.py
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
class API:
total_api_credit_cost = 0 # Class-level variable to track the total cost
total_images = 0 # Class-level variable to track the total images
@staticmethod
def start_job(data):
url = API_BASE_URL + 'generations'
headers = HEADERS
payload = json.dumps(data)
try:
logging.info("Calling Leonardo GENERATE")
logging.info("======")
response = requests.post(url, headers=headers, data=payload)
response.raise_for_status() # Raises an HTTPError for certain status codes
job_response = response.json()
job_id = job_response.get('sdGenerationJob', {}).get('generationId')
api_credit_cost = job_response.get('sdGenerationJob', {}).get('apiCreditCost', 0) # Get the credit cost
if job_id:
logging.info(f"Job started with ID: {job_id}, Credit Cost: {api_credit_cost}")
API.total_api_credit_cost += api_credit_cost # Increment the total cost
API.total_images += 1 # Increment the total images
logging.info(f"== TOTAL COST: {API.total_api_credit_cost} API Credits ==")
logging.info(f"== TOTAL IMAGES: {API.total_images} ==")
store_job_data(job_id, data['prompt']) # Store the job ID and prompt
return job_id
else:
logging.error("Failed to start job: No 'generationId' found in response.")
return None
except requests.exceptions.HTTPError as e:
# HTTP error occurred
logging.error(f"HTTP error occurred while starting the job: {e.response.status_code} - {e.response.text}")
except Exception as e:
# Other errors (e.g., network issues, JSON decoding issue, etc.)
logging.error(f"Error starting job: {e}")
return None
@staticmethod
def check_job_status(job_id):
url = API_BASE_URL + f'generations/{job_id}'
headers = HEADERS
# time.sleep(3) # Wait for 1 second before making the API call
time.sleep(API_CALL_DELAY) # Use the configurable delay from config.py
try:
logging.info(f"Calling Leonardo STATUS for job ID {job_id}") # Include job ID in log
response = requests.get(url, headers=headers)
response.raise_for_status()
job_status_response = response.json()
status = job_status_response.get('generations_by_pk', {}).get('status', 'UNKNOWN')
return status
except Exception as e:
logging.error(f"Error checking job status for ID {job_id}: {e}")
return 'UNKNOWN'
@staticmethod
def download_job_content(job_id):
url = API_BASE_URL + f'generations/{job_id}'
headers = HEADERS
try:
logging.info(f"Calling Leonardo CDN DOWNLOAD")
response = requests.get(url, headers=headers)
response.raise_for_status()
job_content_response = response.json()
generated_images = job_content_response.get('generations_by_pk', {}).get('generated_images', [])
# Extract additional metadata
additional_metadata = {
"inferenceSteps": job_content_response.get('generations_by_pk', {}).get('inferenceSteps'),
"seed": job_content_response.get('generations_by_pk', {}).get('seed'),
"presetStyle": job_content_response.get('generations_by_pk', {}).get('presetStyle'),
"initStrength": job_content_response.get('generations_by_pk', {}).get('initStrength'),
"guidanceScale": job_content_response.get('generations_by_pk', {}).get('guidanceScale'),
"promptMagic": job_content_response.get('generations_by_pk', {}).get('promptMagic'),
"promptMagicVersion": job_content_response.get('generations_by_pk', {}).get('promptMagicVersion'),
"promptMagicStrength": job_content_response.get('generations_by_pk', {}).get('promptMagicStrength'),
"photoReal": job_content_response.get('generations_by_pk', {}).get('photoReal'),
"photoRealStrength": job_content_response.get('generations_by_pk', {}).get('photoRealStrength')
}
logging.info(f"Attempting to download content for job ID {job_id}")
job_data = get_job_data(job_id) # Retrieve job data
if job_data: # Check if job data is available
for image in generated_images:
image_url = image.get('url')
if image_url:
local_path = os.path.join("downloaded_images", f"{job_id}_{image.get('id', 'unknown')}.jpg")
logging.info(f"Downloading image: {image_url}")
download_image(image_url, local_path, job_id, job_data['prompt'], additional_metadata)
print(f"NOW SHOWING: {job_data}")
except Exception as e:
logging.error(f"Error downloading content for job ID {job_id}: {e}")
queue_processor = Queue()
class Job:
def __init__(self, data):
self.data = data
self.status = 'pending'
self.id = None
self.start_time = datetime.now()
self.last_checked = None
self.check_count = 0
self.previous_status = None
self.last_log_time = None
def start(self):
self.id = API.start_job(self.data)
self.start_time = datetime.now()
if self.id:
store_job_data(self.id, self.data['prompt'])
self.status = 'processing'
else:
# Retry once if the job fails to start
logging.info("== WARNING RETRY ==")
logging.info(self.data['prompt'])
time.sleep(5) # Use the configurable delay from config.py
self.id = API.start_job(self.data)
if self.id:
store_job_data(self.id, self.data['prompt']) # Store job data in the job_data_store on successful retry
self.status = 'processing'
else:
self.status = 'failed'
logging.info("== RETRY FAILED ==")
self.last_checked = datetime.now()
def should_log(self):
"""Determines if the current status should be logged."""
current_time = datetime.now()
if self.previous_status != self.status or (
self.last_log_time is None or (current_time - self.last_log_time).total_seconds() > 10):
self.last_log_time = current_time
return True
return False
def check_status(self):
if self.id is None: # Skip processing if job ID is None
logging.error(f"== SKIPPING ID NONE ==")
self.status = 'failed'
return
current_time = datetime.now()
# Initial delay of 10 seconds before the first check
if self.last_checked is None:
if (current_time - self.start_time).total_seconds() < 10:
if self.should_log():
logging.info(f"Initial delay in progress for job ID {self.id}.")
threading.Timer(1, lambda: queue_processor.put(self)).start()
return
self.last_checked = current_time
# Check job status at one-second intervals after the initial delay
if (current_time - self.last_checked).total_seconds() >= 1:
self.last_checked = current_time
self.previous_status = self.status
self.status = API.check_job_status(self.id)
if self.should_log():
logging.info(f"Checked status for job ID {self.id}: {self.status}")
if self.status == 'COMPLETE':
self.status = 'completed'
if self.should_log():
logging.info(f"Job ID {self.id} completed, downloading content.")
API.download_job_content(self.id)
elif (current_time - self.start_time).total_seconds() > 10000000:
self.status = 'failed'
if self.should_log():
logging.error(f"Job ID {self.id} failed due to timeout.")
else:
threading.Timer(1, lambda: queue_processor.put(self)).start()
else:
threading.Timer(1, lambda: queue_processor.put(self)).start()
class JobManager:
def __init__(self):
self.jobs = []
self.active_jobs = 0
self.lock = threading.Lock()
self.empty_queue_count = 0 # Counter for empty queue checks
def run_job(self, job_payloads):
with self.lock:
for payload in job_payloads:
if self.active_jobs < MAX_CONCURRENT_JOBS:
job = Job(payload)
self.jobs.append(job)
job.start()
self.active_jobs += 1
queue_processor.put(job)
logging.info(f"Job {job.id} started.")
else:
self.jobs.append(Job(payload))
logging.info("Maximum concurrent jobs reached, job added to queue.")
def process_queue(self):
while True:
all_jobs_done = len(self.jobs) == 0 and self.active_jobs == 0
if all_jobs_done:
logging.info("All jobs have been processed. Exiting.")
break
try:
| job = queue_processor.get(timeout=RATE_LIMIT_DELAY.total_seconds())
| 2 | 2023-12-09 16:16:39+00:00 | 4k |
digitalfortress-dev/python-sqs-client | examples/publish.py | [
{
"identifier": "SQSClient",
"path": "sqs_client/client.py",
"snippet": "class SQSClient:\n \"\"\"\n This class represents a client for interacting with the SQS service.\n\n It provides methods for sending and receiving messages.\n \"\"\"\n\n def __init__(\n self,\n region_n... | from sqs_client.client import SQSClient
from sqs_client.publisher import Publisher | 2,129 |
sqs_client = SQSClient()
sqs_client.publish(
queue_name="sqs-queue-name",
message="test message",
)
# or
|
sqs_client = SQSClient()
sqs_client.publish(
queue_name="sqs-queue-name",
message="test message",
)
# or
| publisher = Publisher( | 1 | 2023-12-06 07:35:29+00:00 | 4k |
LkPrtctrd/BSL-V53 | Heart/Packets/Client/Authentification/LoginMessage.py | [
{
"identifier": "Messaging",
"path": "Heart/Messaging.py",
"snippet": "class Messaging:\n def writeHeader(message, payloadLen):\n message.messageBuffer += message.getMessageType().to_bytes(2, 'big', signed=True)\n message.messageBuffer += payloadLen.to_bytes(3, 'big', signed=True)\n ... | from Heart.Messaging import Messaging
from DB.DatabaseHandler import DatabaseHandler
from Heart.Packets.PiranhaMessage import PiranhaMessage
from Heart.Utils.ClientsManager import ClientsManager
import json | 1,994 |
class LoginMessage(PiranhaMessage):
def __init__(self, messageData):
super().__init__(messageData)
self.messageVersion = 0
def encode(self, fields):
pass
def decode(self):
fields = {}
fields["AccountID"] = self.readLong()
fields["PassToken"] = self.readString()
fields["ClientMajor"] = self.readInt()
fields["ClientMinor"] = self.readInt()
fields["ClientBuild"] = self.readInt()
fields["ResourceSha"] = self.readString()
fields["Device"] = self.readString()
fields["PreferredLanguage"] = self.readDataReference()
fields["PreferredDeviceLanguage"] = self.readString()
fields["OSVersion"] = self.readString()
fields["isAndroid"] = self.readBoolean()
fields["IMEI"] = self.readString()
fields["AndroidID"] = self.readString()
fields["isAdvertisingEnabled"] = self.readBoolean()
fields["AppleIFV"] = self.readString()
fields["RndKey"] = self.readInt()
fields["AppStore"] = self.readVInt()
fields["ClientVersion"] = self.readString()
fields["TencentOpenId"] = self.readString()
fields["TencentToken"] = self.readString()
fields["TencentPlatform"] = self.readVInt()
fields["DeviceVerifierResponse"] = self.readString()
fields["AppLicensingSignature"] = self.readString()
fields["DeviceVerifierResponse"] = self.readString()
super().decode(fields)
return fields
def execute(message, calling_instance, fields, cryptoInit):
if fields["ClientMajor"]==53:
calling_instance.player.ClientVersion = f'{str(fields["ClientMajor"])}.{str(fields["ClientBuild"])}.{str(fields["ClientMinor"])}'
fields["Socket"] = calling_instance.client
|
class LoginMessage(PiranhaMessage):
def __init__(self, messageData):
super().__init__(messageData)
self.messageVersion = 0
def encode(self, fields):
pass
def decode(self):
fields = {}
fields["AccountID"] = self.readLong()
fields["PassToken"] = self.readString()
fields["ClientMajor"] = self.readInt()
fields["ClientMinor"] = self.readInt()
fields["ClientBuild"] = self.readInt()
fields["ResourceSha"] = self.readString()
fields["Device"] = self.readString()
fields["PreferredLanguage"] = self.readDataReference()
fields["PreferredDeviceLanguage"] = self.readString()
fields["OSVersion"] = self.readString()
fields["isAndroid"] = self.readBoolean()
fields["IMEI"] = self.readString()
fields["AndroidID"] = self.readString()
fields["isAdvertisingEnabled"] = self.readBoolean()
fields["AppleIFV"] = self.readString()
fields["RndKey"] = self.readInt()
fields["AppStore"] = self.readVInt()
fields["ClientVersion"] = self.readString()
fields["TencentOpenId"] = self.readString()
fields["TencentToken"] = self.readString()
fields["TencentPlatform"] = self.readVInt()
fields["DeviceVerifierResponse"] = self.readString()
fields["AppLicensingSignature"] = self.readString()
fields["DeviceVerifierResponse"] = self.readString()
super().decode(fields)
return fields
def execute(message, calling_instance, fields, cryptoInit):
if fields["ClientMajor"]==53:
calling_instance.player.ClientVersion = f'{str(fields["ClientMajor"])}.{str(fields["ClientBuild"])}.{str(fields["ClientMinor"])}'
fields["Socket"] = calling_instance.client | db_instance = DatabaseHandler() | 1 | 2023-12-14 18:57:56+00:00 | 4k |
sockheadrps/AIODesa | tests/test_table.py | [
{
"identifier": "ForeignKey",
"path": "aiodesa/utils/table.py",
"snippet": "class ForeignKey(NamedTuple):\n \"\"\"\n Represents a foreign key relationship in a database.\n Args:\n key: The column name representing the foreign key.\n table: The name of the referenced table.\n\n ... | from aiodesa.utils.table import (
ForeignKey,
PrimaryKey,
UniqueKey,
set_key,
make_schema,
)
from dataclasses import dataclass
from uuid import uuid4 | 1,686 |
def test_ForeignKey():
"""
Test the ForeignKey named tuple.
This test checks the correctness of the class by verifying that
the result is as expected for various input cases.
"""
table = uuid4()
key = uuid4()
foreign_key = ForeignKey(key, table)
assert foreign_key.table == table
assert foreign_key.key == key
def test_PrimaryKey():
"""
Test the PrimaryKey named tuple.
This test checks the correctness of the class by verifying that
the result is as expected for various input cases.
"""
column = uuid4()
primary_key = PrimaryKey(column)
assert primary_key.column == column
def test_UniqueKey():
"""
Test the UniqueKey named tuple.
This test checks the correctness of the class by verifying that
the result is as expected for various input cases.
"""
column = uuid4()
unique_key = UniqueKey(column)
assert unique_key.column == column
def test_set_key():
"""
Test the behavior of the set_key decorator.
This test checks that the set_key decorator correctly sets primary, unique, and foreign keys
on a class using PrimaryKey, UniqueKey, and ForeignKey attributes.
"""
test_column_1 = uuid4()
test_column_2 = uuid4()
foriegn_key_table = uuid4()
foriegn_key_key = uuid4()
@set_key(
PrimaryKey(test_column_1),
UniqueKey(test_column_2),
ForeignKey(foriegn_key_key, foriegn_key_table),
)
class TestTable:
test_column_1: str | None = None
test_column_2: int | None = None
assert TestTable.primary_key == test_column_1
assert TestTable.unique_key == test_column_2
assert TestTable.foreign_keys[0].table == foriegn_key_table
assert TestTable.foreign_keys[0].key == foriegn_key_key
def test_make_schema():
"""
Tests that the table SQL is generated correctly
"""
table_name = uuid4
@dataclass
class TestTable:
table_name: str
test_column_1: str = "Test"
table = TestTable(table_name)
|
def test_ForeignKey():
"""
Test the ForeignKey named tuple.
This test checks the correctness of the class by verifying that
the result is as expected for various input cases.
"""
table = uuid4()
key = uuid4()
foreign_key = ForeignKey(key, table)
assert foreign_key.table == table
assert foreign_key.key == key
def test_PrimaryKey():
"""
Test the PrimaryKey named tuple.
This test checks the correctness of the class by verifying that
the result is as expected for various input cases.
"""
column = uuid4()
primary_key = PrimaryKey(column)
assert primary_key.column == column
def test_UniqueKey():
"""
Test the UniqueKey named tuple.
This test checks the correctness of the class by verifying that
the result is as expected for various input cases.
"""
column = uuid4()
unique_key = UniqueKey(column)
assert unique_key.column == column
def test_set_key():
"""
Test the behavior of the set_key decorator.
This test checks that the set_key decorator correctly sets primary, unique, and foreign keys
on a class using PrimaryKey, UniqueKey, and ForeignKey attributes.
"""
test_column_1 = uuid4()
test_column_2 = uuid4()
foriegn_key_table = uuid4()
foriegn_key_key = uuid4()
@set_key(
PrimaryKey(test_column_1),
UniqueKey(test_column_2),
ForeignKey(foriegn_key_key, foriegn_key_table),
)
class TestTable:
test_column_1: str | None = None
test_column_2: int | None = None
assert TestTable.primary_key == test_column_1
assert TestTable.unique_key == test_column_2
assert TestTable.foreign_keys[0].table == foriegn_key_table
assert TestTable.foreign_keys[0].key == foriegn_key_key
def test_make_schema():
"""
Tests that the table SQL is generated correctly
"""
table_name = uuid4
@dataclass
class TestTable:
table_name: str
test_column_1: str = "Test"
table = TestTable(table_name) | schema = make_schema(table_name, table) | 4 | 2023-12-09 05:52:25+00:00 | 4k |
DavidBellamy/labrador | scripts/pretraining/train_labrador.py | [
{
"identifier": "get_dataset",
"path": "lab_transformers/data/read_labrador_tf_records.py",
"snippet": "def get_dataset(\n filenames: List[str],\n batch_size: int,\n pad_token: int,\n random_seed: int,\n shuffle_buffer_size: int,\n) -> tf.data.TFRecordDataset:\n dataset = (\n tf... | import os
import os.path as op
import sys
import time
import numpy as np
import tensorflow as tf
import wandb
from tensorflow.keras import mixed_precision
from lab_transformers.data.read_labrador_tf_records import get_dataset
from lab_transformers.models.labrador.loss import CategoricalMLMLoss, ContinuousMLMLoss
from lab_transformers.models.labrador.model import Labrador | 2,413 |
# Parse arguments
random_seed = int(sys.argv[1])
mask_token = int(sys.argv[2])
null_token = int(sys.argv[3])
pad_token = int(sys.argv[4])
vocab_size = int(sys.argv[5])
embed_dim = int(sys.argv[6])
use_wandb = True
# Set configuration
system_config = {
"random_seed": random_seed,
"wandb_project_name": "labrador_pretraining",
"wandb_run_name": "run2",
"use_mixed_precision": False,
}
data_config = {
"tfdata_shuffle_buffer_size": 2_560,
"max_seq_len": 90,
"tfrecords_dir_train": "data_full/labrador_tfrecords_train",
"tfrecords_dir_val": "data_full/labrador_tfrecords_val",
}
time_string = time.strftime("%Y%m%d-%H%M%S")
train_config = {
"steps_per_epoch": (20_000 * 182) // 256,
"num_train_epochs": 100,
"learning_rate": 1e-5,
"batch_size": 256,
"model_save_batch_frequency": 14_000, # save the model every n batches during training
"model_checkpoint_directory_name": f"labrador_{time_string}",
"validation_steps": ((20_000 * 27) // 256) // 2,
"validation_step_frequency": 3_500,
} # perform validation every n training batches
model_config = {
"mask_token": mask_token,
"null_token": null_token,
"pad_token": pad_token,
"vocab_size": vocab_size,
"embedding_dim": embed_dim,
"transformer_activation": "relu",
"transformer_heads": 4,
"transformer_blocks": 10,
"transformer_feedforward_dim": 1024,
"include_head": True,
"continuous_head_activation": "sigmoid",
"categorical_loss_fn": CategoricalMLMLoss(),
"continuous_loss_fn": ContinuousMLMLoss(),
"loss_weights": {"categorical_output": 1.0, "continuous_output": 1.0},
"dropout_rate": 0.1,
}
config = {
"data_config": data_config,
"train_config": train_config,
"model_config": model_config,
"system_config": system_config,
}
if config["system_config"]["use_mixed_precision"]:
mixed_precision.set_global_policy("mixed_float16")
if use_wandb:
wandb.login(key=os.environ["wandb_key"])
wandb.init(
project=config["system_config"]["wandb_project_name"],
settings=wandb.Settings(start_method="thread"),
config=config,
name=config["system_config"]["wandb_run_name"],
)
# Read TFRecord data
train_filenames = tf.io.gfile.glob(
op.join(config["data_config"]["tfrecords_dir_train"], "*.tfrec")
)
val_filenames = tf.io.gfile.glob(
op.join(config["data_config"]["tfrecords_dir_val"], "*.tfrec")
)
train_dataset = get_dataset(
train_filenames,
config["train_config"]["batch_size"],
pad_token,
random_seed,
config["data_config"]["tfdata_shuffle_buffer_size"],
)
val_dataset = get_dataset(
train_filenames,
config["train_config"]["batch_size"],
pad_token,
random_seed,
config["data_config"]["tfdata_shuffle_buffer_size"],
)
# Instantiate the transformer model
|
# Parse arguments
random_seed = int(sys.argv[1])
mask_token = int(sys.argv[2])
null_token = int(sys.argv[3])
pad_token = int(sys.argv[4])
vocab_size = int(sys.argv[5])
embed_dim = int(sys.argv[6])
use_wandb = True
# Set configuration
system_config = {
"random_seed": random_seed,
"wandb_project_name": "labrador_pretraining",
"wandb_run_name": "run2",
"use_mixed_precision": False,
}
data_config = {
"tfdata_shuffle_buffer_size": 2_560,
"max_seq_len": 90,
"tfrecords_dir_train": "data_full/labrador_tfrecords_train",
"tfrecords_dir_val": "data_full/labrador_tfrecords_val",
}
time_string = time.strftime("%Y%m%d-%H%M%S")
train_config = {
"steps_per_epoch": (20_000 * 182) // 256,
"num_train_epochs": 100,
"learning_rate": 1e-5,
"batch_size": 256,
"model_save_batch_frequency": 14_000, # save the model every n batches during training
"model_checkpoint_directory_name": f"labrador_{time_string}",
"validation_steps": ((20_000 * 27) // 256) // 2,
"validation_step_frequency": 3_500,
} # perform validation every n training batches
model_config = {
"mask_token": mask_token,
"null_token": null_token,
"pad_token": pad_token,
"vocab_size": vocab_size,
"embedding_dim": embed_dim,
"transformer_activation": "relu",
"transformer_heads": 4,
"transformer_blocks": 10,
"transformer_feedforward_dim": 1024,
"include_head": True,
"continuous_head_activation": "sigmoid",
"categorical_loss_fn": CategoricalMLMLoss(),
"continuous_loss_fn": ContinuousMLMLoss(),
"loss_weights": {"categorical_output": 1.0, "continuous_output": 1.0},
"dropout_rate": 0.1,
}
config = {
"data_config": data_config,
"train_config": train_config,
"model_config": model_config,
"system_config": system_config,
}
if config["system_config"]["use_mixed_precision"]:
mixed_precision.set_global_policy("mixed_float16")
if use_wandb:
wandb.login(key=os.environ["wandb_key"])
wandb.init(
project=config["system_config"]["wandb_project_name"],
settings=wandb.Settings(start_method="thread"),
config=config,
name=config["system_config"]["wandb_run_name"],
)
# Read TFRecord data
train_filenames = tf.io.gfile.glob(
op.join(config["data_config"]["tfrecords_dir_train"], "*.tfrec")
)
val_filenames = tf.io.gfile.glob(
op.join(config["data_config"]["tfrecords_dir_val"], "*.tfrec")
)
train_dataset = get_dataset(
train_filenames,
config["train_config"]["batch_size"],
pad_token,
random_seed,
config["data_config"]["tfdata_shuffle_buffer_size"],
)
val_dataset = get_dataset(
train_filenames,
config["train_config"]["batch_size"],
pad_token,
random_seed,
config["data_config"]["tfdata_shuffle_buffer_size"],
)
# Instantiate the transformer model | model = Labrador(config["model_config"]) | 3 | 2023-12-09 20:40:17+00:00 | 4k |
NLP-Core-Team/RealCode_eval | main.py | [
{
"identifier": "InfillGenerator",
"path": "lm_eval/generators.py",
"snippet": "class InfillGenerator:\n def __init__(self, \n model_path: str,\n num_samples: int,\n prefix_tokens: tp.Union[str, tp.List[int]] = [],\n middle_tokens: tp.Union[str, tp.List[int]] = [],\n ... | import hydra
import torch
import numpy as np
import random
import json
import os
import logging
from lm_eval.generators import InfillGenerator, LMGenerator
from lm_eval.evaluator import Evaluator
from lm_eval.context_parser import TrivialContextParser
from lm_eval.utils import load_dataset
from omegaconf import DictConfig, OmegaConf | 3,575 |
logger = logging.getLogger("RealCode")
logger.setLevel(logging.DEBUG)
def seed_all(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
@hydra.main(config_path="config", config_name="config")
def main(cfg: DictConfig) -> None:
seed_all(cfg.seed)
print(cfg)
dataset = load_dataset(cfg.dataset_root, cfg.dataset_meta_file, cfg.limit)
logger.info(f"loaded {cfg.dataset_root} {cfg.dataset_meta_file}")
if 'context_parser' in cfg:
parser = hydra.utils.instantiate(cfg.context_parser)
else:
|
logger = logging.getLogger("RealCode")
logger.setLevel(logging.DEBUG)
def seed_all(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
@hydra.main(config_path="config", config_name="config")
def main(cfg: DictConfig) -> None:
seed_all(cfg.seed)
print(cfg)
dataset = load_dataset(cfg.dataset_root, cfg.dataset_meta_file, cfg.limit)
logger.info(f"loaded {cfg.dataset_root} {cfg.dataset_meta_file}")
if 'context_parser' in cfg:
parser = hydra.utils.instantiate(cfg.context_parser)
else: | parser = TrivialContextParser() | 3 | 2023-12-12 12:43:06+00:00 | 4k |
centrifugal/grand-chat-tutorial | backend/chat/views.py | [
{
"identifier": "Message",
"path": "backend/chat/models.py",
"snippet": "class Message(models.Model):\n room = models.ForeignKey(Room, related_name='messages', on_delete=models.CASCADE)\n # Note, message may have null user – we consider such messages \"system\". These messages\n # initiated by ... | import json
import logging
import requests
from requests.adapters import HTTPAdapter, Retry
from django.conf import settings
from django.db import transaction
from django.db.models import Exists, OuterRef, Count
from django.shortcuts import get_object_or_404
from django.utils import timezone
from rest_framework import status, viewsets
from rest_framework.generics import ListCreateAPIView
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from .models import Message, Room, RoomMember, Outbox, CDC
from .serializers import MessageSerializer, RoomSearchSerializer, RoomSerializer, RoomMemberSerializer | 1,862 |
class RoomListViewSet(ListModelMixin, GenericViewSet):
serializer_class = RoomSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
return Room.objects.annotate(
member_count=Count('memberships__id')
).filter(
memberships__user_id=self.request.user.pk
).select_related('last_message', 'last_message__user').order_by('-bumped_at')
class RoomDetailViewSet(RetrieveModelMixin, GenericViewSet):
serializer_class = RoomSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
return Room.objects.annotate(
member_count=Count('memberships')
).filter(memberships__user_id=self.request.user.pk)
class RoomSearchViewSet(viewsets.ModelViewSet):
serializer_class = RoomSearchSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
user = self.request.user
user_membership = RoomMember.objects.filter(
room=OuterRef('pk'),
user=user
)
return Room.objects.annotate(
is_member=Exists(user_membership)
).order_by('name')
class CentrifugoMixin:
# A helper method to return the list of channels for all current members of specific room.
# So that the change in the room may be broadcasted to all the members.
def get_room_member_channels(self, room_id):
members = RoomMember.objects.filter(room_id=room_id).values_list('user', flat=True)
return [f'personal:{user_id}' for user_id in members]
def broadcast_room(self, room_id, broadcast_payload):
# Using Centrifugo HTTP API is the simplest way to send real-time message, and usually
# it provides the best latency. The trade-off here is that error here may result in
# lost real-time event. Depending on the application requirements this may be fine or not.
def broadcast():
session = requests.Session()
retries = Retry(total=1, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
session.mount('http://', HTTPAdapter(max_retries=retries))
try:
session.post(
settings.CENTRIFUGO_HTTP_API_ENDPOINT + '/api/broadcast',
data=json.dumps(broadcast_payload),
headers={
'Content-type': 'application/json',
'X-API-Key': settings.CENTRIFUGO_HTTP_API_KEY,
'X-Centrifugo-Error-Mode': 'transport'
}
)
except requests.exceptions.RequestException as e:
logging.error(e)
if settings.CENTRIFUGO_BROADCAST_MODE == 'api':
# We need to use on_commit here to not send notification to Centrifugo before
# changes applied to the database. Since we are inside transaction.atomic block
# broadcast will happen only after successful transaction commit.
transaction.on_commit(broadcast)
elif settings.CENTRIFUGO_BROADCAST_MODE == 'outbox':
# In outbox case we can set partition for parallel processing, but
# it must be in predefined range and match Centrifugo PostgreSQL
# consumer configuration.
partition = hash(room_id)%settings.CENTRIFUGO_OUTBOX_PARTITIONS
# Creating outbox object inside transaction will guarantee that Centrifugo will
# process the command at some point. In normal conditions – almost instantly.
Outbox.objects.create(method='broadcast', payload=broadcast_payload, partition=partition)
elif settings.CENTRIFUGO_BROADCAST_MODE == 'cdc':
# In cdc case Debezium will use this field for setting Kafka partition.
# We should not prepare proper partition ourselves in this case.
partition = hash(room_id)
# Creating outbox object inside transaction will guarantee that Centrifugo will
# process the command at some point. In normal conditions – almost instantly. In this
# app Debezium will perform CDC and send outbox events to Kafka, event will be then
# consumed by Centrifugo. The advantages here is that Debezium reads WAL changes and
# has a negligible overhead on database performance. And most efficient partitioning.
# The trade-off is that more hops add more real-time event delivery latency. May be
# still instant enough though.
|
class RoomListViewSet(ListModelMixin, GenericViewSet):
serializer_class = RoomSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
return Room.objects.annotate(
member_count=Count('memberships__id')
).filter(
memberships__user_id=self.request.user.pk
).select_related('last_message', 'last_message__user').order_by('-bumped_at')
class RoomDetailViewSet(RetrieveModelMixin, GenericViewSet):
serializer_class = RoomSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
return Room.objects.annotate(
member_count=Count('memberships')
).filter(memberships__user_id=self.request.user.pk)
class RoomSearchViewSet(viewsets.ModelViewSet):
serializer_class = RoomSearchSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
user = self.request.user
user_membership = RoomMember.objects.filter(
room=OuterRef('pk'),
user=user
)
return Room.objects.annotate(
is_member=Exists(user_membership)
).order_by('name')
class CentrifugoMixin:
# A helper method to return the list of channels for all current members of specific room.
# So that the change in the room may be broadcasted to all the members.
def get_room_member_channels(self, room_id):
members = RoomMember.objects.filter(room_id=room_id).values_list('user', flat=True)
return [f'personal:{user_id}' for user_id in members]
def broadcast_room(self, room_id, broadcast_payload):
# Using Centrifugo HTTP API is the simplest way to send real-time message, and usually
# it provides the best latency. The trade-off here is that error here may result in
# lost real-time event. Depending on the application requirements this may be fine or not.
def broadcast():
session = requests.Session()
retries = Retry(total=1, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
session.mount('http://', HTTPAdapter(max_retries=retries))
try:
session.post(
settings.CENTRIFUGO_HTTP_API_ENDPOINT + '/api/broadcast',
data=json.dumps(broadcast_payload),
headers={
'Content-type': 'application/json',
'X-API-Key': settings.CENTRIFUGO_HTTP_API_KEY,
'X-Centrifugo-Error-Mode': 'transport'
}
)
except requests.exceptions.RequestException as e:
logging.error(e)
if settings.CENTRIFUGO_BROADCAST_MODE == 'api':
# We need to use on_commit here to not send notification to Centrifugo before
# changes applied to the database. Since we are inside transaction.atomic block
# broadcast will happen only after successful transaction commit.
transaction.on_commit(broadcast)
elif settings.CENTRIFUGO_BROADCAST_MODE == 'outbox':
# In outbox case we can set partition for parallel processing, but
# it must be in predefined range and match Centrifugo PostgreSQL
# consumer configuration.
partition = hash(room_id)%settings.CENTRIFUGO_OUTBOX_PARTITIONS
# Creating outbox object inside transaction will guarantee that Centrifugo will
# process the command at some point. In normal conditions – almost instantly.
Outbox.objects.create(method='broadcast', payload=broadcast_payload, partition=partition)
elif settings.CENTRIFUGO_BROADCAST_MODE == 'cdc':
# In cdc case Debezium will use this field for setting Kafka partition.
# We should not prepare proper partition ourselves in this case.
partition = hash(room_id)
# Creating outbox object inside transaction will guarantee that Centrifugo will
# process the command at some point. In normal conditions – almost instantly. In this
# app Debezium will perform CDC and send outbox events to Kafka, event will be then
# consumed by Centrifugo. The advantages here is that Debezium reads WAL changes and
# has a negligible overhead on database performance. And most efficient partitioning.
# The trade-off is that more hops add more real-time event delivery latency. May be
# still instant enough though. | CDC.objects.create(method='broadcast', payload=broadcast_payload, partition=partition) | 4 | 2023-12-06 10:13:26+00:00 | 4k |
HACHIX-CORPORATION/LEDGO | zoomable_graphics_view.py | [
{
"identifier": "get_width_height_from_text",
"path": "utils.py",
"snippet": "def get_width_height_from_text(text, font_size, font_family):\n \"\"\"get Rectangle from text and font size\n\n Args:\n text (str): plain text\n font_size (int): font size\n font_family (str): font f... | import math
import numpy as np
import os
from PySide6.QtWidgets import QGraphicsView, QGraphicsRectItem, QInputDialog, \
QGraphicsItem, QMessageBox, QTextEdit, QHBoxLayout, QLabel, QPushButton
from PySide6.QtGui import QPainter, QFont, QBrush, QImage, QColor, QFontMetrics, QPen, QTransform, QFontDatabase
from PySide6.QtCore import Qt,QPointF, QRectF, QRect
from PySide6.QtWidgets import QDialog, QLineEdit, QFormLayout, QDialogButtonBox, QDoubleSpinBox, QVBoxLayout
from utils import get_width_height_from_text, get_max_font_size, get_font_family | 2,850 | return self.textLineEdit.toPlainText(), self.font_spinbox.value(), self.scaleVSpinBox.value(), self.scaleHSpinBox.value(), self.rotationSpinBox.value()
def handle_click_button(self, button):
color = button.palette().color(button.backgroundRole())
print(color)
print("Property: ",button.property("selected"))
if not button.property("selected"):
button.setStyleSheet(
f"background-color: {color.name()}; border: 4px solid green; border-radius: 5px;") # Add rounded corners and border
button.setProperty("selected", True)
else:
button.setStyleSheet(
f"background-color: {color.name()}; border: 1px solid black; border-radius: 5px;") # Add rounded corners
button.setProperty("selected", False)
for other_button in self.list_button_color:
if other_button != button and other_button.property("selected"):
other_button.setStyleSheet(
f"background-color: {other_button.palette().color(other_button.backgroundRole()).name()}; border: 1px solid black; border-radius: 5px;") # Add rounded corners
other_button.setProperty("selected", False)
self.selected_text_color = color.name()
def on_click_button_color(self, button):
return lambda: self.handle_click_button(button)
class InputTextDialog(QDialog):
def __init__(self, parent=None, default_text="", font_size=MIN_FONT_SIZE, topleft_pos = (0,0)):
super(InputTextDialog, self).__init__(parent)
self.zoomable = parent
self.topleft_pos = topleft_pos
self.setWindowTitle(self.zoomable.parent.data["input"]["Enter_text"].get(self.zoomable.parent.language_code))
self.initUI(default_text, font_size)
def initUI(self, default_text, font_size):
# Set up layout
vbox = QVBoxLayout()
# Create form layout to add fields
formLayout = QFormLayout()
# Text input field
self.textLineEdit = QTextEdit()
self.textLineEdit.setPlainText(default_text)
formLayout.addRow(self.zoomable.parent.data["input"]["Text"].get(self.zoomable.parent.language_code), self.textLineEdit)
# Scale input field
self.font_spinbox = QDoubleSpinBox()
self.font_spinbox.setRange(0, self.zoomable.parent.num_row)
self.font_spinbox.setValue(font_size)
formLayout.addRow(self.zoomable.parent.data["input"]["Font_size"].get(self.zoomable.parent.language_code), self.font_spinbox)
vbox.addLayout(formLayout)
# OK and Cancel buttons
buttons = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel, self)
# Connect the QDialogButtonBox's rejected event to the QDialog's reject method
buttons.rejected.connect(self.reject)
# Add QDialogButtonBox to the layout
vbox.addWidget(buttons)
# Access the "Ok" and "Cancel" buttons and set text for them
ok_button = buttons.button(QDialogButtonBox.Ok)
cancel_button = buttons.button(QDialogButtonBox.Cancel)
ok_button.setText(self.zoomable.parent.data["message"]["Yes"].get(self.zoomable.parent.language_code))
cancel_button.setText(self.zoomable.parent.data["message"]["Cancel"].get(self.zoomable.parent.language_code))
ok_button.clicked.connect(self.on_accept)
cancel_button.clicked.connect(self.reject)
self.setLayout(vbox)
def on_accept(self):
text = self.textLineEdit.toPlainText()
dot_size = self.zoomable.parent.dot_size
x_first, y_first = self.topleft_pos
x_coord, y_coord = math.ceil(x_first)/dot_size, math.ceil(y_first)/dot_size
# check height width oversize
width, height = get_width_height_from_text(text, self.font_spinbox.value(), self.zoomable.font_family)
max_height = self.zoomable.parent.num_row
max_width = self.zoomable.parent.num_col
if y_coord + height > max_height or x_coord + width > max_width:
font_size = get_max_font_size(text, max_width - x_coord, max_height - y_coord, self.zoomable.font_family)
self.font_spinbox.setValue(font_size)
self.accept()
def getInputs(self):
print("New font:", self.font_spinbox.value())
return self.textLineEdit.toPlainText(), self.font_spinbox.value()
class CustomRectItem(QGraphicsRectItem):
def __init__(self, rect, graphics_view, *args, **kwargs):
super().__init__(rect, *args, **kwargs) # rect is the rectangle dimensions
self.graphics_view = graphics_view
def mouseReleaseEvent(self, event):
super().mouseReleaseEvent(event)
print(f"Mouse released after move customRect: {event}")
self.graphics_view.handle_custom_rect_item_released()
class ZoomableGraphicsView(QGraphicsView):
def __init__(self, scene, parent=None):
super().__init__(scene)
self.parent = parent
self.setRenderHint(QPainter.Antialiasing)
self.setDragMode(QGraphicsView.NoDrag)
self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)
self.setResizeAnchor(QGraphicsView.AnchorUnderMouse)
self.zoom_factor_base = 1.25 # or any desired zoom factor
self.dot_size = 2
self.last_known_rubberband_rect = 0
self.layer_color = QColor.fromRgbF(0.313726, 0.313726, 0.313726, 1.000000)
self.transparent_color = QColor(0, 0, 0, 0)
self.num_col = self.parent.num_col
self.num_row = self.parent.num_row
self.text_color = None
self.is_selected = False
self.zoom_factor = 1
|
DEFAULT_VALUE_OF_ITEM = None # Default value item GraphicsScene for layer >0
MIN_FONT_SIZE = 8 # min size text of paint is 8
DEFAULT_FONT_SIZE = 16
class MultiInputDialogue(QDialog):
def __init__(self, parent=None, default_text="", default_scale_v=1.0,
default_scale_h=1.0, default_rotation=0.0, font_size=None, text_color=None):
super(MultiInputDialogue, self).__init__(parent)
self.zoomable = parent
self.selected_text_color = text_color
self.initUI(default_text, default_scale_v, default_scale_h, default_rotation, font_size)
if self.zoomable.parent.language_code == "eng":
self.setWindowTitle("Edit")
else:
self.setWindowTitle("編集")
def initUI(self, default_text, default_scale_v, default_scale_h, default_rotation, font_size=None):
# Set up layout
vbox = QVBoxLayout()
# Create form layout to add fields
formLayout = QFormLayout()
# Text input field
self.textLineEdit = QTextEdit()
self.textLineEdit.setPlainText(default_text)
formLayout.addRow(self.zoomable.parent.data["input"]["Text"].get(self.zoomable.parent.language_code), self.textLineEdit)
# Font size
self.font_spinbox = QDoubleSpinBox()
self.font_spinbox.setRange(MIN_FONT_SIZE, 384.0)
self.font_spinbox.setSingleStep(0.1)
if font_size is not None:
self.font_spinbox.setValue(font_size)
formLayout.addRow(self.zoomable.parent.data["input"]["Font_size"].get(self.zoomable.parent.language_code), self.font_spinbox)
# Scale input field
self.scaleVSpinBox = QDoubleSpinBox()
self.scaleVSpinBox.setRange(0.1, 100)
self.scaleVSpinBox.setSingleStep(0.1)
self.scaleVSpinBox.setValue(default_scale_v)
formLayout.addRow(self.zoomable.parent.data["input"]["Scale_v"].get(self.zoomable.parent.language_code), self.scaleVSpinBox)
self.scaleHSpinBox = QDoubleSpinBox()
self.scaleHSpinBox.setRange(0.1, 100)
self.scaleHSpinBox.setValue(default_scale_h)
self.scaleHSpinBox.setSingleStep(0.1)
formLayout.addRow(self.zoomable.parent.data["input"]["Scale_h"].get(self.zoomable.parent.language_code), self.scaleHSpinBox)
# Rotation input field
self.rotationSpinBox = QDoubleSpinBox()
self.rotationSpinBox.setRange(-360.0, 360.0)
self.rotationSpinBox.setValue(default_rotation)
self.rotationSpinBox.setSingleStep(0.1)
formLayout.addRow(self.zoomable.parent.data["input"]["Rotation"].get(self.zoomable.parent.language_code), self.rotationSpinBox)
# Select color
if self.selected_text_color is not None:
self.select_color = QHBoxLayout()
self.list_button_color = []
for color in self.zoomable.parent.colors:
button = QPushButton()
if color.lower() == self.selected_text_color:
button.setStyleSheet(
f"background-color: {color}; border: 4px solid green; border-radius: 5px;") # Add rounded corners and border
button.setProperty("selected", True)
else:
button.setStyleSheet(f"background-color: {color}; border: 1px solid black; border-radius: 5px;")
button.setProperty("selected", False)
button.clicked.connect(self.on_click_button_color(button))
self.list_button_color.append(button)
self.select_color.addWidget(button)
formLayout.addRow(self.zoomable.parent.data["label"]["topbar_widget.select_color_label"].get(self.zoomable.parent.language_code), self.select_color)
# end select color
vbox.addLayout(formLayout)
# OK and Cancel buttons
buttons = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel, self)
# Add QDialogButtonBox to layout
vbox.addWidget(buttons)
# Access the "Ok" and "Cancel" buttons and set text for them
ok_button = buttons.button(QDialogButtonBox.Ok)
cancel_button = buttons.button(QDialogButtonBox.Cancel)
ok_button.setText(self.zoomable.parent.data["message"]["Yes"].get(self.zoomable.parent.language_code))
cancel_button.setText(self.zoomable.parent.data["message"]["Cancel"].get(self.zoomable.parent.language_code))
ok_button.clicked.connect(self.accept)
cancel_button.clicked.connect(self.reject)
self.setLayout(vbox)
def getInputs(self):
return self.textLineEdit.toPlainText(), self.font_spinbox.value(), self.scaleVSpinBox.value(), self.scaleHSpinBox.value(), self.rotationSpinBox.value()
def handle_click_button(self, button):
color = button.palette().color(button.backgroundRole())
print(color)
print("Property: ",button.property("selected"))
if not button.property("selected"):
button.setStyleSheet(
f"background-color: {color.name()}; border: 4px solid green; border-radius: 5px;") # Add rounded corners and border
button.setProperty("selected", True)
else:
button.setStyleSheet(
f"background-color: {color.name()}; border: 1px solid black; border-radius: 5px;") # Add rounded corners
button.setProperty("selected", False)
for other_button in self.list_button_color:
if other_button != button and other_button.property("selected"):
other_button.setStyleSheet(
f"background-color: {other_button.palette().color(other_button.backgroundRole()).name()}; border: 1px solid black; border-radius: 5px;") # Add rounded corners
other_button.setProperty("selected", False)
self.selected_text_color = color.name()
def on_click_button_color(self, button):
return lambda: self.handle_click_button(button)
class InputTextDialog(QDialog):
def __init__(self, parent=None, default_text="", font_size=MIN_FONT_SIZE, topleft_pos = (0,0)):
super(InputTextDialog, self).__init__(parent)
self.zoomable = parent
self.topleft_pos = topleft_pos
self.setWindowTitle(self.zoomable.parent.data["input"]["Enter_text"].get(self.zoomable.parent.language_code))
self.initUI(default_text, font_size)
def initUI(self, default_text, font_size):
# Set up layout
vbox = QVBoxLayout()
# Create form layout to add fields
formLayout = QFormLayout()
# Text input field
self.textLineEdit = QTextEdit()
self.textLineEdit.setPlainText(default_text)
formLayout.addRow(self.zoomable.parent.data["input"]["Text"].get(self.zoomable.parent.language_code), self.textLineEdit)
# Scale input field
self.font_spinbox = QDoubleSpinBox()
self.font_spinbox.setRange(0, self.zoomable.parent.num_row)
self.font_spinbox.setValue(font_size)
formLayout.addRow(self.zoomable.parent.data["input"]["Font_size"].get(self.zoomable.parent.language_code), self.font_spinbox)
vbox.addLayout(formLayout)
# OK and Cancel buttons
buttons = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel, self)
# Connect the QDialogButtonBox's rejected event to the QDialog's reject method
buttons.rejected.connect(self.reject)
# Add QDialogButtonBox to the layout
vbox.addWidget(buttons)
# Access the "Ok" and "Cancel" buttons and set text for them
ok_button = buttons.button(QDialogButtonBox.Ok)
cancel_button = buttons.button(QDialogButtonBox.Cancel)
ok_button.setText(self.zoomable.parent.data["message"]["Yes"].get(self.zoomable.parent.language_code))
cancel_button.setText(self.zoomable.parent.data["message"]["Cancel"].get(self.zoomable.parent.language_code))
ok_button.clicked.connect(self.on_accept)
cancel_button.clicked.connect(self.reject)
self.setLayout(vbox)
def on_accept(self):
text = self.textLineEdit.toPlainText()
dot_size = self.zoomable.parent.dot_size
x_first, y_first = self.topleft_pos
x_coord, y_coord = math.ceil(x_first)/dot_size, math.ceil(y_first)/dot_size
# check height width oversize
width, height = get_width_height_from_text(text, self.font_spinbox.value(), self.zoomable.font_family)
max_height = self.zoomable.parent.num_row
max_width = self.zoomable.parent.num_col
if y_coord + height > max_height or x_coord + width > max_width:
font_size = get_max_font_size(text, max_width - x_coord, max_height - y_coord, self.zoomable.font_family)
self.font_spinbox.setValue(font_size)
self.accept()
def getInputs(self):
print("New font:", self.font_spinbox.value())
return self.textLineEdit.toPlainText(), self.font_spinbox.value()
class CustomRectItem(QGraphicsRectItem):
def __init__(self, rect, graphics_view, *args, **kwargs):
super().__init__(rect, *args, **kwargs) # rect is the rectangle dimensions
self.graphics_view = graphics_view
def mouseReleaseEvent(self, event):
super().mouseReleaseEvent(event)
print(f"Mouse released after move customRect: {event}")
self.graphics_view.handle_custom_rect_item_released()
class ZoomableGraphicsView(QGraphicsView):
def __init__(self, scene, parent=None):
super().__init__(scene)
self.parent = parent
self.setRenderHint(QPainter.Antialiasing)
self.setDragMode(QGraphicsView.NoDrag)
self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)
self.setResizeAnchor(QGraphicsView.AnchorUnderMouse)
self.zoom_factor_base = 1.25 # or any desired zoom factor
self.dot_size = 2
self.last_known_rubberband_rect = 0
self.layer_color = QColor.fromRgbF(0.313726, 0.313726, 0.313726, 1.000000)
self.transparent_color = QColor(0, 0, 0, 0)
self.num_col = self.parent.num_col
self.num_row = self.parent.num_row
self.text_color = None
self.is_selected = False
self.zoom_factor = 1 | self.font_family = get_font_family() | 2 | 2023-12-08 04:05:16+00:00 | 4k |
shinkungoo/SymbolicCDM | SCDM/model.py | [
{
"identifier": "StudentDataSet",
"path": "SCDM/utility.py",
"snippet": "class StudentDataSet(Dataset):\n def __init__(self, loaded_data):\n \"\"\"\n This class is designed for transforming loaded_data from np.ndarray to Dataset.\n \"\"\"\n self.data = loaded_data\n\n d... | import warnings
import numpy as np
import torch
import pprint
from torch.utils.data import random_split
from .utility import StudentDataSet, print_logs, transform
from .interaction import GeneticInteractionFunc
from .parameter import Parameter
from .eval import degree_of_agreement | 2,308 |
class SymbolicCDM:
def __init__(self,
q_matrix: np.ndarray,
student_number: int,
question_number: int,
knowledge_number: int,
response_logs: np.ndarray,
device="cpu"):
# dataset split
response_logs = StudentDataSet(response_logs)
# organize dataset
train_size = int(len(response_logs) * 0.75)
valid_size = len(response_logs) - train_size
train_set, valid_set = random_split(response_logs, [train_size, valid_size])
train_set = np.array(train_set)
valid_set = np.array(valid_set)
self.train_set = transform(train_set[:, 0], train_set[:, 1], train_set[:, 2], torch.Tensor(q_matrix))
self.train_size = train_size
self.valid_set = transform(valid_set[:, 0], valid_set[:, 1], valid_set[:, 2], torch.Tensor(q_matrix))
self.interaction = GeneticInteractionFunc(self.train_set, train_size)
|
class SymbolicCDM:
def __init__(self,
q_matrix: np.ndarray,
student_number: int,
question_number: int,
knowledge_number: int,
response_logs: np.ndarray,
device="cpu"):
# dataset split
response_logs = StudentDataSet(response_logs)
# organize dataset
train_size = int(len(response_logs) * 0.75)
valid_size = len(response_logs) - train_size
train_set, valid_set = random_split(response_logs, [train_size, valid_size])
train_set = np.array(train_set)
valid_set = np.array(valid_set)
self.train_set = transform(train_set[:, 0], train_set[:, 1], train_set[:, 2], torch.Tensor(q_matrix))
self.train_size = train_size
self.valid_set = transform(valid_set[:, 0], valid_set[:, 1], valid_set[:, 2], torch.Tensor(q_matrix))
self.interaction = GeneticInteractionFunc(self.train_set, train_size) | self.parameter = Parameter(student_number, | 4 | 2023-12-09 13:37:15+00:00 | 4k |
pan-x-c/EE-LLM | megatron/global_vars.py | [
{
"identifier": "dist_signal_handler",
"path": "megatron/dist_signal_handler.py",
"snippet": "def get_world_size():\ndef get_device(local_rank=None):\ndef all_gather_item(item, dtype, group=None, async_op=False, local_rank=None):\n def __init__(self, sig=signal.SIGTERM):\n def signals_received(sel... | import os
import sys
import torch
import wandb
from megatron import dist_signal_handler
from megatron.tokenizer import build_tokenizer
from .microbatches import build_num_microbatches_calculator
from .timers import Timers
from torch.utils.tensorboard import SummaryWriter
from userlib.auto_resume import AutoResume | 3,499 | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Megatron global variables."""
_GLOBAL_ARGS = None
_GLOBAL_RETRO_ARGS = None
_GLOBAL_NUM_MICROBATCHES_CALCULATOR = None
_GLOBAL_TOKENIZER = None
_GLOBAL_TENSORBOARD_WRITER = None
_GLOBAL_WANDB_WRITER = None
_GLOBAL_ADLR_AUTORESUME = None
_GLOBAL_TIMERS = None
_GLOBAL_SIGNAL_HANDLER = None
def get_args():
"""Return arguments."""
_ensure_var_is_initialized(_GLOBAL_ARGS, 'args')
return _GLOBAL_ARGS
def get_retro_args():
"""Return retro arguments."""
return _GLOBAL_RETRO_ARGS
def get_num_microbatches():
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get()
def get_current_global_batch_size():
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get_current_global_batch_size()
def update_num_microbatches(consumed_samples, consistency_check=True):
_GLOBAL_NUM_MICROBATCHES_CALCULATOR.update(consumed_samples,
consistency_check)
def get_tokenizer():
"""Return tokenizer."""
_ensure_var_is_initialized(_GLOBAL_TOKENIZER, 'tokenizer')
return _GLOBAL_TOKENIZER
def get_tensorboard_writer():
"""Return tensorboard writer. It can be None so no need
to check if it is initialized."""
return _GLOBAL_TENSORBOARD_WRITER
def get_wandb_writer():
"""Return tensorboard writer. It can be None so no need
to check if it is initialized."""
return _GLOBAL_WANDB_WRITER
def get_adlr_autoresume():
"""ADLR autoresume object. It can be None so no need
to check if it is initialized."""
return _GLOBAL_ADLR_AUTORESUME
def get_timers():
"""Return timers."""
_ensure_var_is_initialized(_GLOBAL_TIMERS, 'timers')
return _GLOBAL_TIMERS
def get_signal_handler():
_ensure_var_is_initialized(_GLOBAL_SIGNAL_HANDLER, 'signal handler')
return _GLOBAL_SIGNAL_HANDLER
def _set_signal_handler():
global _GLOBAL_SIGNAL_HANDLER
_ensure_var_is_not_initialized(_GLOBAL_SIGNAL_HANDLER, 'signal handler')
| # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Megatron global variables."""
_GLOBAL_ARGS = None
_GLOBAL_RETRO_ARGS = None
_GLOBAL_NUM_MICROBATCHES_CALCULATOR = None
_GLOBAL_TOKENIZER = None
_GLOBAL_TENSORBOARD_WRITER = None
_GLOBAL_WANDB_WRITER = None
_GLOBAL_ADLR_AUTORESUME = None
_GLOBAL_TIMERS = None
_GLOBAL_SIGNAL_HANDLER = None
def get_args():
"""Return arguments."""
_ensure_var_is_initialized(_GLOBAL_ARGS, 'args')
return _GLOBAL_ARGS
def get_retro_args():
"""Return retro arguments."""
return _GLOBAL_RETRO_ARGS
def get_num_microbatches():
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get()
def get_current_global_batch_size():
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get_current_global_batch_size()
def update_num_microbatches(consumed_samples, consistency_check=True):
_GLOBAL_NUM_MICROBATCHES_CALCULATOR.update(consumed_samples,
consistency_check)
def get_tokenizer():
"""Return tokenizer."""
_ensure_var_is_initialized(_GLOBAL_TOKENIZER, 'tokenizer')
return _GLOBAL_TOKENIZER
def get_tensorboard_writer():
"""Return tensorboard writer. It can be None so no need
to check if it is initialized."""
return _GLOBAL_TENSORBOARD_WRITER
def get_wandb_writer():
"""Return tensorboard writer. It can be None so no need
to check if it is initialized."""
return _GLOBAL_WANDB_WRITER
def get_adlr_autoresume():
"""ADLR autoresume object. It can be None so no need
to check if it is initialized."""
return _GLOBAL_ADLR_AUTORESUME
def get_timers():
"""Return timers."""
_ensure_var_is_initialized(_GLOBAL_TIMERS, 'timers')
return _GLOBAL_TIMERS
def get_signal_handler():
_ensure_var_is_initialized(_GLOBAL_SIGNAL_HANDLER, 'signal handler')
return _GLOBAL_SIGNAL_HANDLER
def _set_signal_handler():
global _GLOBAL_SIGNAL_HANDLER
_ensure_var_is_not_initialized(_GLOBAL_SIGNAL_HANDLER, 'signal handler') | _GLOBAL_SIGNAL_HANDLER = dist_signal_handler.DistributedSignalHandler().__enter__() | 0 | 2023-12-07 08:29:38+00:00 | 4k |
mitrefireline/simharness | simharness2/rewards/base_reward.py | [
{
"identifier": "ReactiveAgent",
"path": "simharness2/agents/agent.py",
"snippet": "class ReactiveAgent:\n \"\"\"A simple agent that reacts to its environment.\n\n FIXME: update docstring style, using llama2 suggestion for now.\n Parameters\n ----------\n agent_id : int\n The uniqu... | import logging
from abc import ABC, abstractmethod
from typing import Any, Dict
from simharness2.agents.agent import ReactiveAgent
from simharness2.analytics.harness_analytics import ReactiveHarnessAnalytics | 3,086 | """Base Reward Class for representing the modular reward function.
Reward Classes to be called in the main environment that derive rewards from the
ReactiveHarnessAnalytics object.
"""
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s")
)
logger.addHandler(handler)
logger.propagate = False
class BaseReward(ABC):
"""Abstract Class for Reward_Class template with the update functions implemented."""
| """Base Reward Class for representing the modular reward function.
Reward Classes to be called in the main environment that derive rewards from the
ReactiveHarnessAnalytics object.
"""
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s")
)
logger.addHandler(handler)
logger.propagate = False
class BaseReward(ABC):
"""Abstract Class for Reward_Class template with the update functions implemented."""
| def __init__(self, harness_analytics: ReactiveHarnessAnalytics): | 1 | 2023-12-08 19:13:31+00:00 | 4k |
racinette/querky | querky/backends/postgresql/name_type_mapper.py | [
{
"identifier": "TypeKnowledge",
"path": "querky/base_types.py",
"snippet": "class TypeKnowledge(GetImportsMixin):\n metadata: TypeMetaData\n is_array: bool\n is_optional: bool | None\n elem_is_optional: bool | None = None\n typehint: str | None = None\n userhint: typing.Any | None = N... | from querky.base_types import TypeKnowledge, TypeMetaData
from querky.contract import Contract
from querky.backends.postgresql.type_mapper import PostgresqlTypeMapper | 1,747 |
GET_PG_TYPE_SQL_QUERY = """
SELECT
oid::regtype::TEXT AS type_string,
typnamespace::regnamespace::TEXT AS namespace_string
FROM
pg_type
WHERE
oid = $1
"""
class PostgresqlNameTypeMapper(PostgresqlTypeMapper):
def __init__(self, typemap: dict[str, dict[str, TypeMetaData]]):
self.type_cache = dict()
# копируем
self.typemap = {
schema_name: {
type_name: type_metadata
for type_name, type_metadata in schema_map.items()
}
for schema_name, schema_map in typemap.items()
}
def set_mapping(self, schema: str, type_name: str, metadata: TypeMetaData) -> None:
if schema not in self.typemap:
self.typemap[schema] = dict()
s = self.typemap[schema]
s[type_name] = metadata
async def get_pg_type(self, contract: Contract, conn, oid: int):
if (pg_type := self.type_cache.get(oid, None)) is None:
pg_type = await contract.raw_fetchone(conn, GET_PG_TYPE_SQL_QUERY, (oid, ))
self.type_cache[pg_type] = pg_type
return pg_type
def get_pg_type_sync(self, contract: Contract, conn, oid: int):
if (pg_type := self.type_cache.get(oid, None)) is None:
pg_type = contract.raw_fetchone_sync(conn, GET_PG_TYPE_SQL_QUERY, (oid, ))
self.type_cache[pg_type] = pg_type
return pg_type
|
GET_PG_TYPE_SQL_QUERY = """
SELECT
oid::regtype::TEXT AS type_string,
typnamespace::regnamespace::TEXT AS namespace_string
FROM
pg_type
WHERE
oid = $1
"""
class PostgresqlNameTypeMapper(PostgresqlTypeMapper):
def __init__(self, typemap: dict[str, dict[str, TypeMetaData]]):
self.type_cache = dict()
# копируем
self.typemap = {
schema_name: {
type_name: type_metadata
for type_name, type_metadata in schema_map.items()
}
for schema_name, schema_map in typemap.items()
}
def set_mapping(self, schema: str, type_name: str, metadata: TypeMetaData) -> None:
if schema not in self.typemap:
self.typemap[schema] = dict()
s = self.typemap[schema]
s[type_name] = metadata
async def get_pg_type(self, contract: Contract, conn, oid: int):
if (pg_type := self.type_cache.get(oid, None)) is None:
pg_type = await contract.raw_fetchone(conn, GET_PG_TYPE_SQL_QUERY, (oid, ))
self.type_cache[pg_type] = pg_type
return pg_type
def get_pg_type_sync(self, contract: Contract, conn, oid: int):
if (pg_type := self.type_cache.get(oid, None)) is None:
pg_type = contract.raw_fetchone_sync(conn, GET_PG_TYPE_SQL_QUERY, (oid, ))
self.type_cache[pg_type] = pg_type
return pg_type
| def get_type_knowledge_impl(self, pg_type) -> TypeKnowledge: | 0 | 2023-12-13 15:16:34+00:00 | 4k |
RokasEl/mace-mp-umap | mace_mp_umap/cli.py | [
{
"identifier": "find_closest_training_points",
"path": "mace_mp_umap/analysis.py",
"snippet": "def find_closest_training_points(training_df, test_df):\n structure_groups = test_df.groupby(\"structure_index\")\n training_descriptors = np.vstack(\n training_df[\"descriptor\"]\n ) # num_m... | import pathlib
import typing as t
import typer
import warnings
import torch
from collections import defaultdict
from mace.calculators import mace_mp
from typing_extensions import Annotated
from enum import Enum
from .analysis import find_closest_training_points
from .chemiscope_handling import write_chemiscope_input
from .data_manipulations import get_cleaned_dataframe
from .dim_reduction import (
apply_dimensionality_reduction,
fit_dimensionality_reduction,
)
from .plotting import plot_dimensionality_reduction
from .utils import get_layer_specific_feature_slices | 2,742 |
app = typer.Typer()
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
class FilterType(str, Enum):
exclusive = "exclusive"
inclusive = "inclusive"
combinations = "combinations"
none = "none"
@app.command()
def produce_mace_chemiscope_input(
data_path: str = typer.Argument(
default=None,
help="Path to XYZ file containing your system",
),
mp_data_path: str = typer.Argument(default=None, help="Path to MP data"),
filtering: FilterType = typer.Option(
default=FilterType.none,
case_sensitive=False,
help="Whether to filter out structures that contain elements not in the subset or to include them.",
),
element_subset: Annotated[
t.List[str],
typer.Option(
"--add-element", "-e", help="List of elements to include in the subset."
),
] = [],
create_plots: bool = typer.Option(
default=False, help="Whether to create static UMAP and PCA plots."
),
):
if DEVICE != "cuda":
warnings.warn("CUDA not available, using CPU. Might be slow.")
if filtering == FilterType.none:
raise ValueError(
"You must specify filtering type (either `--filtering exclusive` or `--filtering inclusive`).\n"
"Combinations mode means that structures are kept if they're composed only of elements supplied via `-e` flags but don't need to contail all of the supplied elements.\n"
"Exclusive mode means those and only those structures are kept that contail all elements supplied via `-e` flags. This is a subset of `combinations`\n"
"Inclusive mode means that other elements are allowed in addition to those supplied via `-e` flags.\n"
"Most applications should use `--filtering inclusive`. However, for elemental compounds or molecular compounds like water `exclusive` or `combinations` modes are more appropriate."
)
# Load model
calc = mace_mp(
model="medium",
device=DEVICE,
default_dtype="float64",
)
print(
f"Using the MACE cutoff ({calc.r_max} A) for neighbour analysis for all elements."
)
cutoff_dict = defaultdict(lambda: calc.r_max)
# Load MP data
|
app = typer.Typer()
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
class FilterType(str, Enum):
exclusive = "exclusive"
inclusive = "inclusive"
combinations = "combinations"
none = "none"
@app.command()
def produce_mace_chemiscope_input(
data_path: str = typer.Argument(
default=None,
help="Path to XYZ file containing your system",
),
mp_data_path: str = typer.Argument(default=None, help="Path to MP data"),
filtering: FilterType = typer.Option(
default=FilterType.none,
case_sensitive=False,
help="Whether to filter out structures that contain elements not in the subset or to include them.",
),
element_subset: Annotated[
t.List[str],
typer.Option(
"--add-element", "-e", help="List of elements to include in the subset."
),
] = [],
create_plots: bool = typer.Option(
default=False, help="Whether to create static UMAP and PCA plots."
),
):
if DEVICE != "cuda":
warnings.warn("CUDA not available, using CPU. Might be slow.")
if filtering == FilterType.none:
raise ValueError(
"You must specify filtering type (either `--filtering exclusive` or `--filtering inclusive`).\n"
"Combinations mode means that structures are kept if they're composed only of elements supplied via `-e` flags but don't need to contail all of the supplied elements.\n"
"Exclusive mode means those and only those structures are kept that contail all elements supplied via `-e` flags. This is a subset of `combinations`\n"
"Inclusive mode means that other elements are allowed in addition to those supplied via `-e` flags.\n"
"Most applications should use `--filtering inclusive`. However, for elemental compounds or molecular compounds like water `exclusive` or `combinations` modes are more appropriate."
)
# Load model
calc = mace_mp(
model="medium",
device=DEVICE,
default_dtype="float64",
)
print(
f"Using the MACE cutoff ({calc.r_max} A) for neighbour analysis for all elements."
)
cutoff_dict = defaultdict(lambda: calc.r_max)
# Load MP data | train_atoms, training_data_df = get_cleaned_dataframe( | 2 | 2023-12-09 10:08:26+00:00 | 4k |
Shahzadnit/EZ-CLIP | clip/clip.py | [
{
"identifier": "build_model",
"path": "clip/model.py",
"snippet": "def build_model(state_dict: dict, config, tsm=False,T=8,dropout=0., joint=False,emb_dropout=0.,pretrain=True):\n vit = \"visual.proj\" in state_dict\n\n if vit:\n vision_width = state_dict[\"visual.conv1.weight\"].shape[0]\... | import hashlib
import os
import urllib
import warnings
import torch
from typing import Union, List
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer | 2,794 |
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
# "ViT-E/16": "https://huggingface.co/QuanSun/EVA-CLIP/resolve/main/EVA02_CLIP_B_psz16_s8B.pt?download=true"
}
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=Image.BICUBIC),
CenterCrop(n_px),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(name: str, config, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True, tsm=False, joint=False,T=8,dropout=0., emb_dropout=0.,pretrain=True):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name])
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
|
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
# "ViT-E/16": "https://huggingface.co/QuanSun/EVA-CLIP/resolve/main/EVA02_CLIP_B_psz16_s8B.pt?download=true"
}
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=Image.BICUBIC),
CenterCrop(n_px),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(name: str, config, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True, tsm=False, joint=False,T=8,dropout=0., emb_dropout=0.,pretrain=True):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name])
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
| model = build_model(state_dict or model.state_dict(),config, joint=joint,tsm=tsm,T=T,dropout=dropout, emb_dropout=emb_dropout,pretrain=pretrain).to(device) | 0 | 2023-12-12 13:11:20+00:00 | 4k |
javrtg/C2P | nonmin_pose/models/base.py | [
{
"identifier": "ConstraintConfig",
"path": "nonmin_pose/constraints/constraint_manager.py",
"snippet": "class ConstraintManager:\n CONSTRAINT_CLASSES = {\n \"manif_def_left\": cnt.ManifDefLeft,\n \"manif_def_right\": cnt.ManifDefRight,\n \"norm_t\": cnt.NormT,\n \"norm_q\... | from abc import ABC, abstractmethod
from typing import Dict, List, Optional, Tuple, Union
from nonmin_pose.constraints.constraint_manager import (
ConstraintConfig,
ConstraintManager,
)
from nonmin_pose.constraints.constraints import Parameter
from nonmin_pose.sdpa import SDPA
from nonmin_pose.utils import compute_data_matrix_C, decompose_essmat
import numpy as np | 2,817 |
class NonMinRelPoseBase(ABC):
"""Non-minimal Essential matrix estimation using SDPA solver."""
DEFAULT_CFG = {
# PARAMETER_STABLE_BUT_SLOW, PARAMETER_DEFAULT, PARAMETER_UNSTABLE_BUT_FAST
"sdpa_param_type": SDPA.PARAMETER_DEFAULT,
"th_rank_optimality": 1e-5,
"th_pure_rot_post": 1 - 1e-8, # for Zhao's and Garcia-Salguero's methods.
"th_pure_rot_sdp": 1e-3, # for C2P
"th_pure_rot_noisefree_sdp": 1e-4, # for C2P
# for computing the constraint coefficients that are determined at runtime.
"use_top_k": None,
}
SDP_COMPUTES_POSE: bool
def __init__(
self,
parameters: Optional[List[Parameter]] = None,
|
class NonMinRelPoseBase(ABC):
"""Non-minimal Essential matrix estimation using SDPA solver."""
DEFAULT_CFG = {
# PARAMETER_STABLE_BUT_SLOW, PARAMETER_DEFAULT, PARAMETER_UNSTABLE_BUT_FAST
"sdpa_param_type": SDPA.PARAMETER_DEFAULT,
"th_rank_optimality": 1e-5,
"th_pure_rot_post": 1 - 1e-8, # for Zhao's and Garcia-Salguero's methods.
"th_pure_rot_sdp": 1e-3, # for C2P
"th_pure_rot_noisefree_sdp": 1e-4, # for C2P
# for computing the constraint coefficients that are determined at runtime.
"use_top_k": None,
}
SDP_COMPUTES_POSE: bool
def __init__(
self,
parameters: Optional[List[Parameter]] = None, | constraints: Optional[ConstraintConfig] = None, | 0 | 2023-12-10 18:25:10+00:00 | 4k |
bluuewhale/nexon-openapi-python | src/nexon_openapi/utils/_transform.py | [
{
"identifier": "is_list",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def is_list(obj: object) -> TypeGuard[list[object]]:\n return isinstance(obj, list)"
},
{
"identifier": "is_mapping",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def is_mapping(obj: object... | from typing import Any, Mapping, Optional, TypeVar, Union, cast
from datetime import date, datetime
from typing_extensions import Literal, get_args, override, get_type_hints
from ._utils import (
is_list,
is_mapping,
is_list_type,
is_union_type,
extract_type_arg,
is_required_type,
is_annotated_type,
strip_annotated_type,
)
from .._compat import model_dump, is_typeddict
import pydantic | 1,674 | account_holder_name: Annotated[str, PropertyInfo(alias='accountHolderName')]
This means that {'account_holder_name': 'Robert'} will be transformed to {'accountHolderName': 'Robert'} before being sent to the API.
"""
alias: Optional[str]
format: Optional[PropertyFormat]
format_template: Optional[str]
def __init__(
self,
*,
alias: Optional[str] = None,
format: Optional[PropertyFormat] = None,
format_template: Optional[str] = None,
) -> None:
self.alias = alias
self.format = format
self.format_template = format_template
@override
def __repr__(self) -> str:
return f"{self.__class__.__name__}(alias='{self.alias}', format={self.format}, format_template='{self.format_template}')"
def maybe_transform(
data: object,
expected_type: object,
) -> Optional[Any]:
"""Wrapper over `transform()` that allows `None` to be passed.
See `transform()` for more details.
"""
if data is None:
return None
return transform(data, expected_type)
# Wrapper over _transform_recursive providing fake types
def transform(
data: _T,
expected_type: object,
) -> _T:
"""Transform dictionaries based off of type information from the given type, for example:
```py
class Params(TypedDict, total=False):
card_id: Required[Annotated[str, PropertyInfo(alias='cardID')]]
transformed = transform({'card_id': '<my card ID>'}, Params)
# {'cardID': '<my card ID>'}
```
Any keys / data that does not have type information given will be included as is.
It should be noted that the transformations that this function does are not represented in the type system.
"""
transformed = _transform_recursive(data, annotation=cast(type, expected_type))
return cast(_T, transformed)
def _get_annotated_type(type_: type) -> Union[type, None]:
"""If the given type is an `Annotated` type then it is returned, if not `None` is returned.
This also unwraps the type when applicable, e.g. `Required[Annotated[T, ...]]`
"""
if is_required_type(type_):
# Unwrap `Required[Annotated[T, ...]]` to `Annotated[T, ...]`
type_ = get_args(type_)[0]
if is_annotated_type(type_):
return type_
return None
def _maybe_transform_key(key: str, type_: type) -> str:
"""Transform the given `data` based on the annotations provided in `type_`.
Note: this function only looks at `Annotated` types that contain `PropertInfo` metadata.
"""
annotated_type = _get_annotated_type(type_)
if annotated_type is None:
# no `Annotated` definition for this type, no transformation needed
return key
# ignore the first argument as it is the actual type
annotations = get_args(annotated_type)[1:]
for annotation in annotations:
if isinstance(annotation, PropertyInfo) and annotation.alias is not None:
return annotation.alias
return key
def _transform_recursive(
data: object,
*,
annotation: type,
inner_type: Optional[type] = None,
) -> object:
"""Transform the given data against the expected type.
Args:
annotation: The direct type annotation given to the particular piece of data.
This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc
inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type
is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in
the list can be transformed using the metadata from the container type.
Defaults to the same value as the `annotation` argument.
"""
if inner_type is None:
inner_type = annotation
stripped_type = strip_annotated_type(inner_type)
if is_typeddict(stripped_type) and is_mapping(data):
return _transform_typeddict(data, stripped_type)
| from __future__ import annotations
_T = TypeVar("_T")
PropertyFormat = Literal["iso8601", "custom"]
class PropertyInfo:
"""Metadata class to be used in Annotated types to provide information about a given type.
For example:
class MyParams(TypedDict):
account_holder_name: Annotated[str, PropertyInfo(alias='accountHolderName')]
This means that {'account_holder_name': 'Robert'} will be transformed to {'accountHolderName': 'Robert'} before being sent to the API.
"""
alias: Optional[str]
format: Optional[PropertyFormat]
format_template: Optional[str]
def __init__(
self,
*,
alias: Optional[str] = None,
format: Optional[PropertyFormat] = None,
format_template: Optional[str] = None,
) -> None:
self.alias = alias
self.format = format
self.format_template = format_template
@override
def __repr__(self) -> str:
return f"{self.__class__.__name__}(alias='{self.alias}', format={self.format}, format_template='{self.format_template}')"
def maybe_transform(
data: object,
expected_type: object,
) -> Optional[Any]:
"""Wrapper over `transform()` that allows `None` to be passed.
See `transform()` for more details.
"""
if data is None:
return None
return transform(data, expected_type)
# Wrapper over _transform_recursive providing fake types
def transform(
data: _T,
expected_type: object,
) -> _T:
"""Transform dictionaries based off of type information from the given type, for example:
```py
class Params(TypedDict, total=False):
card_id: Required[Annotated[str, PropertyInfo(alias='cardID')]]
transformed = transform({'card_id': '<my card ID>'}, Params)
# {'cardID': '<my card ID>'}
```
Any keys / data that does not have type information given will be included as is.
It should be noted that the transformations that this function does are not represented in the type system.
"""
transformed = _transform_recursive(data, annotation=cast(type, expected_type))
return cast(_T, transformed)
def _get_annotated_type(type_: type) -> Union[type, None]:
"""If the given type is an `Annotated` type then it is returned, if not `None` is returned.
This also unwraps the type when applicable, e.g. `Required[Annotated[T, ...]]`
"""
if is_required_type(type_):
# Unwrap `Required[Annotated[T, ...]]` to `Annotated[T, ...]`
type_ = get_args(type_)[0]
if is_annotated_type(type_):
return type_
return None
def _maybe_transform_key(key: str, type_: type) -> str:
"""Transform the given `data` based on the annotations provided in `type_`.
Note: this function only looks at `Annotated` types that contain `PropertInfo` metadata.
"""
annotated_type = _get_annotated_type(type_)
if annotated_type is None:
# no `Annotated` definition for this type, no transformation needed
return key
# ignore the first argument as it is the actual type
annotations = get_args(annotated_type)[1:]
for annotation in annotations:
if isinstance(annotation, PropertyInfo) and annotation.alias is not None:
return annotation.alias
return key
def _transform_recursive(
data: object,
*,
annotation: type,
inner_type: Optional[type] = None,
) -> object:
"""Transform the given data against the expected type.
Args:
annotation: The direct type annotation given to the particular piece of data.
This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc
inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type
is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in
the list can be transformed using the metadata from the container type.
Defaults to the same value as the `annotation` argument.
"""
if inner_type is None:
inner_type = annotation
stripped_type = strip_annotated_type(inner_type)
if is_typeddict(stripped_type) and is_mapping(data):
return _transform_typeddict(data, stripped_type)
| if is_list_type(stripped_type) and is_list(data): | 2 | 2023-12-14 18:12:17+00:00 | 4k |
Jack24658735/FedLGT | models/CTran.py | [
{
"identifier": "SelfAttnLayer",
"path": "models/transformer_layers.py",
"snippet": "class SelfAttnLayer(nn.Module):\n def __init__(self, d_model, nhead = 4,dropout=0.1):\n super().__init__()\n self.transformer_layer = TransformerEncoderLayer(d_model, nhead, d_model*1, dropout=dropout, ... | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .transformer_layers import SelfAttnLayer
from .backbone import Backbone, BackboneCLIP
from .utils import custom_replace,weights_init
from .position_enc import PositionEmbeddingSine,positionalencoding2d
from .ml_decoder import MLDecoder | 3,382 |
class CTranModel(nn.Module):
def __init__(self,num_labels,use_lmt,pos_emb=False,layers=3,heads=4,dropout=0.1,int_loss=0,no_x_features=False, state_weight=None, label_weight=None):
super(CTranModel, self).__init__()
self.use_lmt = use_lmt
self.no_x_features = no_x_features # (for no image features)
# ResNet backbone
|
class CTranModel(nn.Module):
def __init__(self,num_labels,use_lmt,pos_emb=False,layers=3,heads=4,dropout=0.1,int_loss=0,no_x_features=False, state_weight=None, label_weight=None):
super(CTranModel, self).__init__()
self.use_lmt = use_lmt
self.no_x_features = no_x_features # (for no image features)
# ResNet backbone | self.backbone = Backbone() | 1 | 2023-12-09 09:16:59+00:00 | 4k |
AgriCodeHub/dairy-django-backend | tests/core/tests/conftest.py | [
{
"identifier": "CowAvailabilityChoices",
"path": "core/choices.py",
"snippet": "class CowAvailabilityChoices(models.TextChoices):\n \"\"\"\n Choices for the availability status of a cow.\n\n Choices:\n - `ALIVE`: Cow is alive and active.\n - `SOLD`: Cow has been sold.\n - `DEAD`: Cow ... | from datetime import timedelta
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.choices import (
CowAvailabilityChoices,
CowBreedChoices,
CowCategoryChoices,
CowPregnancyChoices,
CowProductionStatusChoices,
)
from users.choices import SexChoices
from core.utils import todays_date
import pytest | 2,540 | "is_farm_owner": True,
}
farm_owner_login_data = {
"username": "owner@example.com",
"password": "testpassword",
}
response = client.post("/auth/users/", farm_owner_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), farm_owner_login_data)
farm_owner_token = response.data["auth_token"]
# Create farm manager user
farm_manager_data = {
"username": "manager@example.com",
"email": "abc2@gmail.com",
"password": "testpassword",
"first_name": "Farm",
"last_name": "Manager",
"phone_number": "+254755555555",
"sex": SexChoices.MALE,
"is_farm_manager": True,
}
farm_manager_login_data = {
"username": "manager@example.com",
"password": "testpassword",
}
response = client.post("/auth/users/", farm_manager_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), farm_manager_login_data)
farm_manager_token = response.data["auth_token"]
# Create assistant farm manager user
asst_farm_manager_data = {
"username": "assistant@example.com",
"email": "abc3@gmail.com",
"password": "testpassword",
"first_name": "Assistant",
"last_name": "Farm Manager",
"phone_number": "+254744444444",
"sex": SexChoices.FEMALE,
"is_assistant_farm_manager": True,
}
asst_farm_manager_login_data = {
"username": "assistant@example.com",
"password": "testpassword",
}
response = client.post("/auth/users/", asst_farm_manager_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), asst_farm_manager_login_data)
asst_farm_manager_token = response.data["auth_token"]
# Create team leader user
team_leader_data = {
"username": "leader@example.com",
"email": "abc4@gmail.com",
"password": "testpassword",
"first_name": "Team",
"last_name": "Leader",
"phone_number": "+254733333333",
"sex": SexChoices.MALE,
"is_team_leader": True,
}
team_leader_login_data = {
"username": "leader@example.com",
"password": "testpassword",
}
response = client.post("/auth/users/", team_leader_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), team_leader_login_data)
assert response.status_code == status.HTTP_200_OK
team_leader_token = response.data["auth_token"]
# Create farm worker user
farm_worker_data = {
"username": "worker@example.com",
"email": "abc5@gmail.com",
"password": "testpassword",
"first_name": "Farm",
"last_name": "Worker",
"phone_number": "+254722222222",
"sex": SexChoices.FEMALE,
"is_farm_worker": True,
}
farm_worker_login_data = {
"username": "worker@example.com",
"password": "testpassword",
}
response = client.post("/auth/users/", farm_worker_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), farm_worker_login_data)
farm_worker_token = response.data["auth_token"]
return {
"client": client,
"farm_owner_token": farm_owner_token,
"farm_manager_token": farm_manager_token,
"asst_farm_manager_token": asst_farm_manager_token,
"team_leader_token": team_leader_token,
"farm_worker_token": farm_worker_token,
}
@pytest.fixture
def setup_cows():
"""
Fixture to create a sample cows object for testing.
"""
general_cow = {
"name": "General Cow",
"breed": {"name": CowBreedChoices.JERSEY},
"date_of_birth": todays_date - timedelta(days=370),
"gender": SexChoices.FEMALE,
"availability_status": CowAvailabilityChoices.ALIVE,
"current_pregnancy_status": CowPregnancyChoices.OPEN,
|
@pytest.fixture()
@pytest.mark.django_db
def setup_users():
client = APIClient()
# Create farm owner user
farm_owner_data = {
"username": "owner@example.com",
"email": "abc1@gmail.com",
"password": "testpassword",
"first_name": "Farm",
"last_name": "Owner",
"phone_number": "+254787654321",
"sex": SexChoices.MALE,
"is_farm_owner": True,
}
farm_owner_login_data = {
"username": "owner@example.com",
"password": "testpassword",
}
response = client.post("/auth/users/", farm_owner_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), farm_owner_login_data)
farm_owner_token = response.data["auth_token"]
# Create farm manager user
farm_manager_data = {
"username": "manager@example.com",
"email": "abc2@gmail.com",
"password": "testpassword",
"first_name": "Farm",
"last_name": "Manager",
"phone_number": "+254755555555",
"sex": SexChoices.MALE,
"is_farm_manager": True,
}
farm_manager_login_data = {
"username": "manager@example.com",
"password": "testpassword",
}
response = client.post("/auth/users/", farm_manager_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), farm_manager_login_data)
farm_manager_token = response.data["auth_token"]
# Create assistant farm manager user
asst_farm_manager_data = {
"username": "assistant@example.com",
"email": "abc3@gmail.com",
"password": "testpassword",
"first_name": "Assistant",
"last_name": "Farm Manager",
"phone_number": "+254744444444",
"sex": SexChoices.FEMALE,
"is_assistant_farm_manager": True,
}
asst_farm_manager_login_data = {
"username": "assistant@example.com",
"password": "testpassword",
}
response = client.post("/auth/users/", asst_farm_manager_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), asst_farm_manager_login_data)
asst_farm_manager_token = response.data["auth_token"]
# Create team leader user
team_leader_data = {
"username": "leader@example.com",
"email": "abc4@gmail.com",
"password": "testpassword",
"first_name": "Team",
"last_name": "Leader",
"phone_number": "+254733333333",
"sex": SexChoices.MALE,
"is_team_leader": True,
}
team_leader_login_data = {
"username": "leader@example.com",
"password": "testpassword",
}
response = client.post("/auth/users/", team_leader_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), team_leader_login_data)
assert response.status_code == status.HTTP_200_OK
team_leader_token = response.data["auth_token"]
# Create farm worker user
farm_worker_data = {
"username": "worker@example.com",
"email": "abc5@gmail.com",
"password": "testpassword",
"first_name": "Farm",
"last_name": "Worker",
"phone_number": "+254722222222",
"sex": SexChoices.FEMALE,
"is_farm_worker": True,
}
farm_worker_login_data = {
"username": "worker@example.com",
"password": "testpassword",
}
response = client.post("/auth/users/", farm_worker_data)
# Retrieve the token after login
response = client.post(reverse("users:login"), farm_worker_login_data)
farm_worker_token = response.data["auth_token"]
return {
"client": client,
"farm_owner_token": farm_owner_token,
"farm_manager_token": farm_manager_token,
"asst_farm_manager_token": asst_farm_manager_token,
"team_leader_token": team_leader_token,
"farm_worker_token": farm_worker_token,
}
@pytest.fixture
def setup_cows():
"""
Fixture to create a sample cows object for testing.
"""
general_cow = {
"name": "General Cow",
"breed": {"name": CowBreedChoices.JERSEY},
"date_of_birth": todays_date - timedelta(days=370),
"gender": SexChoices.FEMALE,
"availability_status": CowAvailabilityChoices.ALIVE,
"current_pregnancy_status": CowPregnancyChoices.OPEN, | "category": CowCategoryChoices.HEIFER, | 2 | 2023-12-09 06:56:42+00:00 | 4k |
facebookresearch/chat2map-official | habitat_audio/simulator.py | [
{
"identifier": "load_points_data",
"path": "habitat_audio/utils.py",
"snippet": "def load_points_data(parent_folder, graph_file, transform=True, scene_dataset=\"replica\"):\n \"\"\"\n Main method to load points data from files stored on disk and transform if necessary\n :param parent_folder: p... | from typing import List
from collections import defaultdict
from scipy.io import wavfile
from scipy.signal import fftconvolve
from habitat.core.registry import registry
from habitat.tasks.utils import (
cartesian_to_polar,
quaternion_from_coeff,
quaternion_rotate_vector,
)
from habitat_sim.utils.common import quat_from_angle_axis, quat_from_coeffs, quat_to_angle_axis
from habitat.sims.habitat_simulator.habitat_simulator import HabitatSim
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from habitat.core.simulator import (Config, AgentState, ShortestPathPoint)
from habitat_audio.utils import load_points_data, _to_tensor
import logging
import pickle
import os
import cv2
import torch
import librosa
import scipy
import numba
import numpy as np
import networkx as nx
import habitat_sim | 3,596 | self.rir_sampling_rate = self.audio_cfg.RIR_SAMPLING_RATE
self._max_valid_impulse_length = self.audio_cfg.MAX_VALID_IMPULSE_LENGTH_AFTER_REMOVING_LEADING_ZEROS
self.hop_length = self.audio_cfg.HOP_LENGTH
self.n_fft = self.audio_cfg.N_FFT
self.win_length = self.audio_cfg.WIN_LENGTH
self._anechoic_audio_slice_length = self.audio_cfg.ANECHOIC_AUDIO_SLICE_LENGTH
self._audio_wav_shape = self.task_cfg.CONTEXT_SELF_AUDIO_SENSOR.FEATURE_SHAPE
print(f"LOADING ANECHOIC AUDIO FOR train")
anechoic_audio_dir = self.audio_cfg.ANECHOIC_DIR
assert os.path.isdir(anechoic_audio_dir)
anechoic_audio_filenames = os.listdir(anechoic_audio_dir)
self._anechoic_filename_2_audioData = {}
for anechoic_audio_filename in anechoic_audio_filenames:
anechoic_audio_filePath = os.path.join(anechoic_audio_dir, anechoic_audio_filename)
assert os.path.isfile(anechoic_audio_filePath)
anechoic_audioSR, anechoic_audioData = wavfile.read(anechoic_audio_filePath)
assert anechoic_audioSR == self.rir_sampling_rate
assert anechoic_audio_filename.split(".")[0] not in self._anechoic_filename_2_audioData
self._anechoic_filename_2_audioData[anechoic_audio_filename.split(".")[0]] = anechoic_audioData
assert "CONTEXT_VIEW_POSE_SENSOR" in self.task_cfg.SENSORS
self._pose_feat_shape = self.task_cfg.CONTEXT_VIEW_POSE_SENSOR.FEATURE_SHAPE
self._add_truncated_gaussian_pose_noise = self.task_cfg.CONTEXT_VIEW_POSE_SENSOR.ADD_TRUNCATED_GAUSSIAN_NOISE
self._truncated_gaussian_pose_noise_cfg = self.task_cfg.CONTEXT_VIEW_POSE_SENSOR.TRUNCATED_GAUSSIAN_NOISE
# self._truncated_gaussian_pose_noise_random_multipliers = None
self._gaussian_pose_noise_multipliers = None
if self._add_truncated_gaussian_pose_noise:
assert os.path.isfile(self._truncated_gaussian_pose_noise_cfg.GAUSSIAN_NOISE_MULTIPLIERS_PATH)
with open(self._truncated_gaussian_pose_noise_cfg.GAUSSIAN_NOISE_MULTIPLIERS_PATH, "rb") as fi:
self._gaussian_pose_noise_multipliers = pickle.load(fi)
self.max_context_length = self.env_cfg.MAX_CONTEXT_LENGTH
self.visual_budget = self.env_cfg.VISUAL_BUDGET
self.max_query_length = self.env_cfg.MAX_QUERY_LENGTH
assert self.max_query_length == (self.config.ALL_AGENTS.NUM * self.max_context_length)
self.render_local_ego_occ_maps_from_depth_images = self.config.RENDER_LOCAL_EGO_OCC_MAPS_FROM_DEPTH_IMAGES
self.local_occMap_cfg = self.config.LOCAL_MAP
self.ego_mapper = None
self.redwood_depth_noise_dist_model = None
self.redwood_depth_noise_multiplier = None
if self.render_local_ego_occ_maps_from_depth_images:
self.ego_mapper = EgoMap(
map_size=self.local_occMap_cfg.SIZE,
map_scale=self.local_occMap_cfg.SCALE,
position=self.local_occMap_cfg.AGENT_POSITION,
depth_sensor_hfov=self.local_occMap_cfg.HFOV_DEPTH_IMG,
height_thresh=self.local_occMap_cfg.HEIGHT_THRESH,
depth_sensor_min_depth=self.local_occMap_cfg.MIN_DEPTH,
depth_sensor_max_depth=self.local_occMap_cfg.MAX_DEPTH,
depth_sensor_width=self.local_occMap_cfg.WIDTH_DEPTH_IMG,
depth_sensor_height=self.local_occMap_cfg.HEIGHT_DEPTH_IMG,
depth_sensor_normalize_depth=self.local_occMap_cfg.NORMALIZE_DEPTH_IMG,
)
if self.config.DEPTH_SENSOR.ADD_REDWOOD_NOISE:
"""src: https://github.com/facebookresearch/habitat-sim/blob/main/src_python/habitat_sim/sensors/noise_models/redwood_depth_noise_model.py"""
assert os.path.isfile(self.config.DEPTH_SENSOR.REDWOOD_DEPTH_NOISE_DIST_MODEL)
self.redwood_depth_noise_dist_model = np.load(self.config.DEPTH_SENSOR.REDWOOD_DEPTH_NOISE_DIST_MODEL)
self.redwood_depth_noise_dist_model = self.redwood_depth_noise_dist_model.reshape(80, 80, 5)
self.redwood_depth_noise_multiplier = self.config.DEPTH_SENSOR.REDWOOD_NOISE_MULTIPLIER
assert os.path.isfile(self.config.DEPTH_SENSOR.REDWOOD_NOISE_RAND_NUMS_PATH)
with open(self.config.DEPTH_SENSOR.REDWOOD_NOISE_RAND_NUMS_PATH, "rb") as fi:
self._redwood_depth_noise_rand_nums = pickle.load(fi)
self.stitch_top_down_maps = self.config.STITCH_TOP_DOWN_MAPS
self.rir_dir = self.audio_cfg.RIR_DIR
assert os.path.isdir(self.rir_dir)
self.num_agents = self.config.ALL_AGENTS.NUM
assert self.num_agents == 2
self.total_context_length = None
self.agent_utterance_allSwitches = None
self.lst_anechoicAudio_filenameNstartSamplingIdx = None
self.used_query_nodsNrots = None
self._current_context_rgb = None
self._current_context_ego_local_map = None
self._current_context_view_pose = None
self._current_context_view_rAz = None
self._previous_context_view_mask = None
self._current_context_selfAudio = None
self._current_context_otherAudio = None
self._current_context_otherAudio_pose = None
self._current_context_audio_mask = None
self._all_context_audio_mask = None
self._current_query_globCanMapEgoCrop_gt = None
self._current_query_globCanMapEgoCrop_gt_exploredPartMask = None
self._current_query_mask = None
self._all_query_mask = None
if self.stitch_top_down_maps:
self._current_stitched_query_globCanMapEgoCrop_gt = None
assert self.config.SCENE_DATASET in ["mp3d"],\
"SCENE_DATASET needs to be in ['mp3d']"
self._previous_receiver_position_indexs = [None] * self.num_agents
self._current_receiver_position_indexs = [None] * self.num_agents
self._previous_rotation_angles = [None] * self.num_agents
self._current_rotation_angles = [None] * self.num_agents
self._frame_cache = defaultdict(dict)
self._episode_count = 0
self._step_count = 0
self._view_count = self.num_agents
self._action = 1
self._is_episode_active = None
self._previous_step_collideds = [None] * self.num_agents
self._nodes_n_azimuths_lists = [None] * self.num_agents
self._position_to_index_mapping = dict()
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
EPS = 1e-8
SCENE_NAME_TO_IDX = {
"mp3d":
{'sT4fr6TAbpF': 0, 'E9uDoFAP3SH': 1, 'VzqfbhrpDEA': 2, 'kEZ7cmS4wCh': 3, '29hnd4uzFmX': 4, 'ac26ZMwG7aT': 5,
's8pcmisQ38h': 6, 'rPc6DW4iMge': 7, 'EDJbREhghzL': 8, 'mJXqzFtmKg4': 9, 'B6ByNegPMKs': 10, 'JeFG25nYj2p': 11,
'82sE5b5pLXE': 12, 'D7N2EKCX4Sj': 13, '7y3sRwLe3Va': 14, '5LpN3gDmAk7': 15, 'gTV8FGcVJC9': 16, 'ur6pFq6Qu1A': 17,
'qoiz87JEwZ2': 18, 'PuKPg4mmafe': 19, 'VLzqgDo317F': 20, 'aayBHfsNo7d': 21, 'JmbYfDe2QKZ': 22, 'XcA2TqTSSAj': 23,
'8WUmhLawc2A': 24, 'sKLMLpTHeUy': 25, 'r47D5H71a5s': 26, 'Uxmj2M2itWa': 27, 'Pm6F8kyY3z2': 28, 'p5wJjkQkbXX': 29,
'759xd9YjKW5': 30, 'JF19kD82Mey': 31, 'V2XKFyX4ASd': 32, '1LXtFkjw3qL': 33, '17DRP5sb8fy': 34, '5q7pvUzZiYa': 35,
'VVfe2KiqLaN': 36, 'Vvot9Ly1tCj': 37, 'ULsKaCPVFJR': 38, 'D7G3Y4RVNrH': 39, 'uNb9QFRL6hY': 40, 'ZMojNkEp431': 41,
'2n8kARJN3HM': 42, 'vyrNrziPKCB': 43, 'e9zR4mvMWw7': 44, 'r1Q1Z4BcV1o': 45, 'PX4nDJXEHrG': 46, 'YmJkqBEsHnH': 47,
'b8cTxDM8gDG': 48, 'GdvgFV5R1Z5': 49, 'pRbA3pwrgk9': 50, 'jh4fc5c5qoQ': 51, '1pXnuDYAj8r': 52, 'S9hNv5qa7GM': 53,
'VFuaQ6m2Qom': 54, 'cV4RVeZvu5T': 55, 'SN83YJsR3w2': 56, '2azQ1b91cZZ': 57, '5ZKStnWn8Zo': 58, '8194nk5LbLH': 59,
'ARNzJeq3xxb': 60, 'EU6Fwq7SyZv': 61, 'QUCTc6BB5sX': 62, 'TbHJrupSAjP': 63, 'UwV83HsGsw3': 64, 'Vt2qJdWjCF2': 65,
'WYY7iVyf5p8': 66, 'X7HyMhZNoso': 67, 'YFuZgdQ5vWj': 68, 'Z6MFQCViBuw': 69, 'fzynW3qQPVF': 70, 'gYvKGZ5eRqb': 71,
'gxdoqLR6rwA': 72, 'jtcxE69GiFV': 73, 'oLBMNvg9in8': 74, 'pLe4wQe7qrG': 75, 'pa4otMbVnkk': 76, 'q9vSo1VnCiC': 77,
'rqfALeAoiTq': 78, 'wc2JMjhGNzB': 79, 'x8F5xyUWy9e': 80, 'yqstnuAEVhm': 81, 'zsNo4HB9uLZ': 82},
}
SCENE_SPLITS = {
"mp3d":
{
"train": ['sT4fr6TAbpF', 'E9uDoFAP3SH', 'VzqfbhrpDEA', 'kEZ7cmS4wCh', '29hnd4uzFmX',
'ac26ZMwG7aT', 's8pcmisQ38h', 'rPc6DW4iMge', 'EDJbREhghzL', 'mJXqzFtmKg4',
'B6ByNegPMKs', 'JeFG25nYj2p', '82sE5b5pLXE', 'D7N2EKCX4Sj', '7y3sRwLe3Va',
'5LpN3gDmAk7', 'gTV8FGcVJC9', 'ur6pFq6Qu1A', 'qoiz87JEwZ2', 'PuKPg4mmafe',
'VLzqgDo317F', 'aayBHfsNo7d', 'JmbYfDe2QKZ', 'XcA2TqTSSAj', '8WUmhLawc2A',
'sKLMLpTHeUy', 'r47D5H71a5s', 'Uxmj2M2itWa', 'Pm6F8kyY3z2', 'p5wJjkQkbXX',
'759xd9YjKW5', 'JF19kD82Mey', 'V2XKFyX4ASd', '1LXtFkjw3qL', '17DRP5sb8fy',
'5q7pvUzZiYa', 'VVfe2KiqLaN', 'Vvot9Ly1tCj', 'ULsKaCPVFJR', 'D7G3Y4RVNrH',
'uNb9QFRL6hY', 'ZMojNkEp431', '2n8kARJN3HM', 'vyrNrziPKCB', 'e9zR4mvMWw7',
'r1Q1Z4BcV1o', 'PX4nDJXEHrG', 'YmJkqBEsHnH', 'b8cTxDM8gDG', 'GdvgFV5R1Z5',
'pRbA3pwrgk9', 'jh4fc5c5qoQ', '1pXnuDYAj8r', 'S9hNv5qa7GM', 'VFuaQ6m2Qom',
'cV4RVeZvu5T', 'SN83YJsR3w2', ],
"val": ['QUCTc6BB5sX', 'EU6Fwq7SyZv', '2azQ1b91cZZ', 'Z6MFQCViBuw', 'pLe4wQe7qrG', 'oLBMNvg9in8',
'X7HyMhZNoso', 'zsNo4HB9uLZ', 'TbHJrupSAjP', '8194nk5LbLH', ],
"test": ['pa4otMbVnkk', 'yqstnuAEVhm', '5ZKStnWn8Zo', 'Vt2qJdWjCF2', 'wc2JMjhGNzB', 'fzynW3qQPVF',
'UwV83HsGsw3', 'q9vSo1VnCiC', 'ARNzJeq3xxb', 'gYvKGZ5eRqb', 'jtcxE69GiFV', 'gxdoqLR6rwA',
'WYY7iVyf5p8', 'YFuZgdQ5vWj', 'rqfALeAoiTq', 'x8F5xyUWy9e',]
},
}
ALL_AZIMUTHS = [0, 90, 180, 270]
def asnumpy(v):
if torch.is_tensor(v):
return v.cpu().numpy()
elif isinstance(v, np.ndarray):
return v
else:
raise ValueError('Invalid input')
# Read about the noise model here: http://www.alexteichman.com/octo/clams/
# Original source code: http://redwood-data.org/indoor/data/simdepth.py
@numba.jit(nopython=True, fastmath=True)
def undistort_redwood_depth_noise(x, y, z, model):
i2 = int((z + 1) / 2)
i1 = int(i2 - 1)
a = (z - (i1 * 2.0 + 1.0)) / 2.0
x = x // 8
y = y // 6
f = (1.0 - a) * model[y, x, min(max(i1, 0), 4)] + a * model[y, x, min(i2, 4)]
if f < 1e-5:
return 0.0
else:
return z / f
@numba.jit(nopython=True, parallel=True, fastmath=True)
def simulate_redwood_depth_noise(gt_depth, model, noise_multiplier, rand_nums):
noisy_depth = np.empty_like(gt_depth)
H, W = gt_depth.shape
ymax, xmax = H - 1.0, W - 1.0
# rand_nums = np.random.randn(H, W, 3).astype(np.float32)
# Parallelize just the outer loop. This doesn't change the speed
# noticably but reduces CPU usage compared to two parallel loops
for j in numba.prange(H):
for i in range(W):
y = int(
min(max(j + rand_nums[j, i, 0] * 0.25 * noise_multiplier, 0.0), ymax)
+ 0.5
)
x = int(
min(max(i + rand_nums[j, i, 1] * 0.25 * noise_multiplier, 0.0), xmax)
+ 0.5
)
# Downsample
d = gt_depth[y - y % 2, x - x % 2]
# If the depth is greater than 10, the sensor will just return 0
if d >= 10.0:
noisy_depth[j, i] = 0.0
else:
# Distort
# The noise model was originally made for a 640x480 sensor,
# so re-map our arbitrarily sized sensor to that size!
undistorted_d = undistort_redwood_depth_noise(
int(x / xmax * 639.0 + 0.5), int(y / ymax * 479.0 + 0.5), d, model
)
if undistorted_d == 0.0:
noisy_depth[j, i] = 0.0
else:
denom = round(
(
35.130 / undistorted_d
+ rand_nums[j, i, 2] * 0.027778 * noise_multiplier
)
* 8.0
)
if denom <= 1e-5:
noisy_depth[j, i] = 0.0
else:
noisy_depth[j, i] = 35.130 * 8.0 / denom
return noisy_depth
class EgoMap:
r"""Estimates the top-down occupancy based on current depth-map.
Args:
sim: reference to the simulator for calculating task observations.
config: contains the MAP_SCALE, MAP_SIZE, HEIGHT_THRESH fields to
decide grid-size, extents of the projection, and the thresholds
for determining obstacles and explored space.
"""
def __init__(
self, map_size=31, map_scale=0.1, position=[0, 1.25, 0], depth_sensor_hfov=90,
height_thresh=(0.2, 1.5), depth_sensor_min_depth=0, depth_sensor_max_depth=10,
depth_sensor_width=128, depth_sensor_height=128, depth_sensor_normalize_depth=False,
):
# depth sensor attris
self.depth_sensor_normalize_depth = depth_sensor_normalize_depth
# Map statistics
self.map_size = map_size
self.map_scale = map_scale
# Agent height for pointcloud transformation
self.sensor_height = position[1]
# Compute intrinsic matrix
hfov = float(depth_sensor_hfov) * np.pi / 180
vfov = 2 * np.arctan((depth_sensor_height / depth_sensor_width) * np.tan(hfov / 2.0))
self.intrinsic_matrix = np.array([[1 / np.tan(hfov / 2.), 0., 0., 0.],
[0., 1 / np.tan(vfov / 2.), 0., 0.],
[0., 0., 1, 0],
[0., 0., 0, 1]])
self.inverse_intrinsic_matrix = np.linalg.inv(self.intrinsic_matrix)
# Height thresholds for obstacles
self.height_thresh = height_thresh
# Depth processing
self.min_depth = float(depth_sensor_min_depth)
self.max_depth = float(depth_sensor_max_depth)
# Pre-compute a grid of locations for depth projection
W = depth_sensor_width
H = depth_sensor_height
self.proj_xs, self.proj_ys = np.meshgrid(
np.linspace(-1, 1, W),
np.linspace(1, -1, H)
)
def convert_to_pointcloud(self, depth):
"""
Inputs:
depth = (H, W, 1) numpy array
Returns:
xyz_camera = (N, 3) numpy array for (X, Y, Z) in egocentric world coordinates
"""
depth_float = depth.astype(np.float32)[..., 0]
# =========== Convert to camera coordinates ============
W = depth.shape[1]
xs = np.copy(self.proj_xs).reshape(-1)
ys = np.copy(self.proj_ys).reshape(-1)
depth_float = depth_float.reshape(-1)
# Filter out invalid depths
max_forward_range = self.map_size * self.map_scale
valid_depths = (depth_float != 0.0) & (depth_float <= max_forward_range)
xs = xs[valid_depths]
ys = ys[valid_depths]
depth_float = depth_float[valid_depths]
# Unproject
# negate depth as the camera looks along -Z
xys = np.vstack((xs * depth_float,
ys * depth_float,
-depth_float, np.ones(depth_float.shape)))
inv_K = self.inverse_intrinsic_matrix
xyz_camera = np.matmul(inv_K, xys).T # XYZ in the camera coordinate system
xyz_camera = xyz_camera[:, :3] / xyz_camera[:, 3][:, np.newaxis]
return xyz_camera
def safe_assign(self, im_map, x_idx, y_idx, value):
try:
im_map[x_idx, y_idx] = value
except IndexError:
valid_idx1 = np.logical_and(x_idx >= 0, x_idx < im_map.shape[0])
valid_idx2 = np.logical_and(y_idx >= 0, y_idx < im_map.shape[1])
valid_idx = np.logical_and(valid_idx1, valid_idx2)
im_map[x_idx[valid_idx], y_idx[valid_idx]] = value
def _get_depth_projection(self, sim_depth):
"""
Project pixels visible in depth-map to ground-plane
"""
if self.depth_sensor_normalize_depth:
depth = sim_depth * (self.max_depth - self.min_depth) + self.min_depth
else:
depth = sim_depth
XYZ_ego = self.convert_to_pointcloud(depth)
# Adding agent's height to the pointcloud
XYZ_ego[:, 1] += self.sensor_height
# Convert to grid coordinate system
V = self.map_size
Vby2 = V // 2
points = XYZ_ego
grid_x = (points[:, 0] / self.map_scale) + Vby2
grid_y = (points[:, 2] / self.map_scale) + V
# Filter out invalid points
valid_idx = (grid_x >= 0) & (grid_x <= V-1) & (grid_y >= 0) & (grid_y <= V-1)
points = points[valid_idx, :]
grid_x = grid_x[valid_idx].astype(int)
grid_y = grid_y[valid_idx].astype(int)
# Create empty maps for the two channels
obstacle_mat = np.zeros((self.map_size, self.map_size), np.uint8)
explore_mat = np.zeros((self.map_size, self.map_size), np.uint8)
# Compute obstacle locations
high_filter_idx = points[:, 1] < self.height_thresh[1]
low_filter_idx = points[:, 1] > self.height_thresh[0]
obstacle_idx = np.logical_and(low_filter_idx, high_filter_idx)
self.safe_assign(obstacle_mat, grid_y[obstacle_idx], grid_x[obstacle_idx], 1)
kernel = np.ones((3, 3), np.uint8)
obstacle_mat = cv2.dilate(obstacle_mat, kernel, iterations=1)
# Compute explored locations
explored_idx = high_filter_idx
self.safe_assign(explore_mat, grid_y[explored_idx], grid_x[explored_idx], 1)
kernel = np.ones((3, 3), np.uint8)
explore_mat = cv2.dilate(explore_mat, kernel, iterations=1)
# Smoothen the maps
kernel = np.ones((3, 3), np.uint8)
obstacle_mat = cv2.morphologyEx(obstacle_mat, cv2.MORPH_CLOSE, kernel)
explore_mat = cv2.morphologyEx(explore_mat, cv2.MORPH_CLOSE, kernel)
# Ensure all expanded regions in obstacle_mat are accounted for in explored_mat
explore_mat = np.logical_or(explore_mat, obstacle_mat)
return np.stack([obstacle_mat, explore_mat], axis=2)
def get_observation(
self, depth_img,
) -> object:
# convert to numpy array
sim_depth = np.expand_dims(asnumpy(depth_img), axis=-1)
ego_map_gt = self._get_depth_projection(sim_depth)
return ego_map_gt
class DummySimulatorMultiAgent:
def __init__(self, num_agents=2):
self.num_agents = num_agents
self.positions = [None] * num_agents
self.rotations = [None] * num_agents
self._sim_obs = None
self.position = None
self.rotation = None
def seed(self, seed):
pass
def set_agent_state(self, positions=[], rotations=[]):
for i in range(len(positions)):
self.positions[i] = np.array(positions[i], dtype=np.float32)
self.rotations[i] = rotations[i]
self.position = np.array(positions[0], dtype=np.float32)
self.rotation = rotations[0]
def get_agent_state(self):
class State:
def __init__(self, positions=[], rotations=[]):
self.positions = []
self.rotations = []
for i in range(len(positions)):
self.positions.append(positions[i])
self.rotations.append(rotations[i])
self.position = positions[0]
self.rotation = rotations[0]
return State(self.positions, self.rotations)
def set_sensor_observations(self, sim_obs):
self._sim_obs = sim_obs
def get_sensor_observations(self):
return self._sim_obs
def close(self):
pass
@registry.register_simulator()
class HabitatSimAudioEnabledMultiAgentActiveMapping(HabitatSim):
def action_space_shortest_path(self, source: AgentState, targets: List[AgentState], agent_id: int = 0) -> List[
ShortestPathPoint]:
pass
def __init__(self, config: Config) -> None:
"""Changes made to simulator wrapper over habitat-sim
This simulator allows two agents to have a conversation episode between them as per the Chat2Map task
Args:
config: configuration for initializing the simulator.
"""
super().__init__(config)
self.env_cfg = self.config.SIM_ENV
self.task_cfg = self.config.SIM_TASK
self.audio_cfg = self.config.AUDIO
self.passive_mapping_cfg = self.config.SIM_TRAINER
self.scene_dataset = self.config.SCENE_DATASET
self.rir_sampling_rate = self.audio_cfg.RIR_SAMPLING_RATE
self._max_valid_impulse_length = self.audio_cfg.MAX_VALID_IMPULSE_LENGTH_AFTER_REMOVING_LEADING_ZEROS
self.hop_length = self.audio_cfg.HOP_LENGTH
self.n_fft = self.audio_cfg.N_FFT
self.win_length = self.audio_cfg.WIN_LENGTH
self._anechoic_audio_slice_length = self.audio_cfg.ANECHOIC_AUDIO_SLICE_LENGTH
self._audio_wav_shape = self.task_cfg.CONTEXT_SELF_AUDIO_SENSOR.FEATURE_SHAPE
print(f"LOADING ANECHOIC AUDIO FOR train")
anechoic_audio_dir = self.audio_cfg.ANECHOIC_DIR
assert os.path.isdir(anechoic_audio_dir)
anechoic_audio_filenames = os.listdir(anechoic_audio_dir)
self._anechoic_filename_2_audioData = {}
for anechoic_audio_filename in anechoic_audio_filenames:
anechoic_audio_filePath = os.path.join(anechoic_audio_dir, anechoic_audio_filename)
assert os.path.isfile(anechoic_audio_filePath)
anechoic_audioSR, anechoic_audioData = wavfile.read(anechoic_audio_filePath)
assert anechoic_audioSR == self.rir_sampling_rate
assert anechoic_audio_filename.split(".")[0] not in self._anechoic_filename_2_audioData
self._anechoic_filename_2_audioData[anechoic_audio_filename.split(".")[0]] = anechoic_audioData
assert "CONTEXT_VIEW_POSE_SENSOR" in self.task_cfg.SENSORS
self._pose_feat_shape = self.task_cfg.CONTEXT_VIEW_POSE_SENSOR.FEATURE_SHAPE
self._add_truncated_gaussian_pose_noise = self.task_cfg.CONTEXT_VIEW_POSE_SENSOR.ADD_TRUNCATED_GAUSSIAN_NOISE
self._truncated_gaussian_pose_noise_cfg = self.task_cfg.CONTEXT_VIEW_POSE_SENSOR.TRUNCATED_GAUSSIAN_NOISE
# self._truncated_gaussian_pose_noise_random_multipliers = None
self._gaussian_pose_noise_multipliers = None
if self._add_truncated_gaussian_pose_noise:
assert os.path.isfile(self._truncated_gaussian_pose_noise_cfg.GAUSSIAN_NOISE_MULTIPLIERS_PATH)
with open(self._truncated_gaussian_pose_noise_cfg.GAUSSIAN_NOISE_MULTIPLIERS_PATH, "rb") as fi:
self._gaussian_pose_noise_multipliers = pickle.load(fi)
self.max_context_length = self.env_cfg.MAX_CONTEXT_LENGTH
self.visual_budget = self.env_cfg.VISUAL_BUDGET
self.max_query_length = self.env_cfg.MAX_QUERY_LENGTH
assert self.max_query_length == (self.config.ALL_AGENTS.NUM * self.max_context_length)
self.render_local_ego_occ_maps_from_depth_images = self.config.RENDER_LOCAL_EGO_OCC_MAPS_FROM_DEPTH_IMAGES
self.local_occMap_cfg = self.config.LOCAL_MAP
self.ego_mapper = None
self.redwood_depth_noise_dist_model = None
self.redwood_depth_noise_multiplier = None
if self.render_local_ego_occ_maps_from_depth_images:
self.ego_mapper = EgoMap(
map_size=self.local_occMap_cfg.SIZE,
map_scale=self.local_occMap_cfg.SCALE,
position=self.local_occMap_cfg.AGENT_POSITION,
depth_sensor_hfov=self.local_occMap_cfg.HFOV_DEPTH_IMG,
height_thresh=self.local_occMap_cfg.HEIGHT_THRESH,
depth_sensor_min_depth=self.local_occMap_cfg.MIN_DEPTH,
depth_sensor_max_depth=self.local_occMap_cfg.MAX_DEPTH,
depth_sensor_width=self.local_occMap_cfg.WIDTH_DEPTH_IMG,
depth_sensor_height=self.local_occMap_cfg.HEIGHT_DEPTH_IMG,
depth_sensor_normalize_depth=self.local_occMap_cfg.NORMALIZE_DEPTH_IMG,
)
if self.config.DEPTH_SENSOR.ADD_REDWOOD_NOISE:
"""src: https://github.com/facebookresearch/habitat-sim/blob/main/src_python/habitat_sim/sensors/noise_models/redwood_depth_noise_model.py"""
assert os.path.isfile(self.config.DEPTH_SENSOR.REDWOOD_DEPTH_NOISE_DIST_MODEL)
self.redwood_depth_noise_dist_model = np.load(self.config.DEPTH_SENSOR.REDWOOD_DEPTH_NOISE_DIST_MODEL)
self.redwood_depth_noise_dist_model = self.redwood_depth_noise_dist_model.reshape(80, 80, 5)
self.redwood_depth_noise_multiplier = self.config.DEPTH_SENSOR.REDWOOD_NOISE_MULTIPLIER
assert os.path.isfile(self.config.DEPTH_SENSOR.REDWOOD_NOISE_RAND_NUMS_PATH)
with open(self.config.DEPTH_SENSOR.REDWOOD_NOISE_RAND_NUMS_PATH, "rb") as fi:
self._redwood_depth_noise_rand_nums = pickle.load(fi)
self.stitch_top_down_maps = self.config.STITCH_TOP_DOWN_MAPS
self.rir_dir = self.audio_cfg.RIR_DIR
assert os.path.isdir(self.rir_dir)
self.num_agents = self.config.ALL_AGENTS.NUM
assert self.num_agents == 2
self.total_context_length = None
self.agent_utterance_allSwitches = None
self.lst_anechoicAudio_filenameNstartSamplingIdx = None
self.used_query_nodsNrots = None
self._current_context_rgb = None
self._current_context_ego_local_map = None
self._current_context_view_pose = None
self._current_context_view_rAz = None
self._previous_context_view_mask = None
self._current_context_selfAudio = None
self._current_context_otherAudio = None
self._current_context_otherAudio_pose = None
self._current_context_audio_mask = None
self._all_context_audio_mask = None
self._current_query_globCanMapEgoCrop_gt = None
self._current_query_globCanMapEgoCrop_gt_exploredPartMask = None
self._current_query_mask = None
self._all_query_mask = None
if self.stitch_top_down_maps:
self._current_stitched_query_globCanMapEgoCrop_gt = None
assert self.config.SCENE_DATASET in ["mp3d"],\
"SCENE_DATASET needs to be in ['mp3d']"
self._previous_receiver_position_indexs = [None] * self.num_agents
self._current_receiver_position_indexs = [None] * self.num_agents
self._previous_rotation_angles = [None] * self.num_agents
self._current_rotation_angles = [None] * self.num_agents
self._frame_cache = defaultdict(dict)
self._episode_count = 0
self._step_count = 0
self._view_count = self.num_agents
self._action = 1
self._is_episode_active = None
self._previous_step_collideds = [None] * self.num_agents
self._nodes_n_azimuths_lists = [None] * self.num_agents
self._position_to_index_mapping = dict() | self.points, self.graph = load_points_data(self.meta_dir, self.config.AUDIO.GRAPH_FILE, | 0 | 2023-12-06 01:20:37+00:00 | 4k |
noirbizarre/pdm-dockerize | src/pdm_dockerize/commands.py | [
{
"identifier": "ProjectEntrypoint",
"path": "src/pdm_dockerize/entrypoint.py",
"snippet": "class ProjectEntrypoint:\n project: Project\n hooks: HookManager\n\n @cached_property\n def settings(self) -> DockerizeSettings:\n return self.project.pyproject.settings.get(\"dockerize\", {})\... | import argparse
import os
from pathlib import Path
from pdm.cli import actions
from pdm.cli.commands.base import BaseCommand
from pdm.cli.filters import GroupSelection
from pdm.cli.hooks import HookManager
from pdm.cli.options import Option, dry_run_option, groups_group, lockfile_option
from pdm.cli.utils import check_project_file
from pdm.environments import PythonLocalEnvironment
from pdm.project import Project
from .entrypoint import ProjectEntrypoint
from .installer import DockerizeSynchronizer | 2,629 | from __future__ import annotations
class DockerizeEnvironment(PythonLocalEnvironment):
"""An environment installaing into the dist/docker directory"""
def __init__(
self, project: Project, *, target: str | None = None, python: str | None = None
) -> None:
super().__init__(project, python=python)
self.target = Path(target) if target else None
@property
def packages_path(self) -> Path:
return self.target or self.project.root / "dist/docker"
class DockerizeCommand(BaseCommand):
"""Generate content for a Docker image"""
arguments = (
Option(
"target",
nargs="?",
help="The target into which the docker assets will be generated (default: dist/docker)",
),
*BaseCommand.arguments,
groups_group,
dry_run_option,
lockfile_option,
)
def handle(self, project: Project, options: argparse.Namespace) -> None:
check_project_file(project)
actions.check_lockfile(project)
selection = GroupSelection.from_options(project, options)
hooks = HookManager(project)
env = DockerizeEnvironment(project, target=options.target)
requirements = []
selection.validate()
for group in selection:
requirements.extend(project.get_dependencies(group).values())
candidates = actions.resolve_candidates_from_lockfile(project, requirements)
synchronizer = DockerizeSynchronizer(
candidates,
env,
dry_run=options.dry_run,
clean=False,
no_editable=True,
reinstall=False,
only_keep=False,
install_self=False,
fail_fast=True,
use_install_cache=False,
)
synchronizer.synchronize()
entrypoint = env.packages_path / "entrypoint"
| from __future__ import annotations
class DockerizeEnvironment(PythonLocalEnvironment):
"""An environment installaing into the dist/docker directory"""
def __init__(
self, project: Project, *, target: str | None = None, python: str | None = None
) -> None:
super().__init__(project, python=python)
self.target = Path(target) if target else None
@property
def packages_path(self) -> Path:
return self.target or self.project.root / "dist/docker"
class DockerizeCommand(BaseCommand):
"""Generate content for a Docker image"""
arguments = (
Option(
"target",
nargs="?",
help="The target into which the docker assets will be generated (default: dist/docker)",
),
*BaseCommand.arguments,
groups_group,
dry_run_option,
lockfile_option,
)
def handle(self, project: Project, options: argparse.Namespace) -> None:
check_project_file(project)
actions.check_lockfile(project)
selection = GroupSelection.from_options(project, options)
hooks = HookManager(project)
env = DockerizeEnvironment(project, target=options.target)
requirements = []
selection.validate()
for group in selection:
requirements.extend(project.get_dependencies(group).values())
candidates = actions.resolve_candidates_from_lockfile(project, requirements)
synchronizer = DockerizeSynchronizer(
candidates,
env,
dry_run=options.dry_run,
clean=False,
no_editable=True,
reinstall=False,
only_keep=False,
install_self=False,
fail_fast=True,
use_install_cache=False,
)
synchronizer.synchronize()
entrypoint = env.packages_path / "entrypoint" | entrypoint.write_text(ProjectEntrypoint(project, hooks).as_script()) | 0 | 2023-12-13 23:35:23+00:00 | 4k |
wrongbad/badcad | badcad/badcad.py | [
{
"identifier": "display",
"path": "badcad/utils.py",
"snippet": "def display(thing, \n vscode_fix=True, \n wireframe=False, \n color='#aaaa22', \n smoothing_threshold=-1,\n width=640,\n height=640,\n ):\n if vscode_fix:\n fix_vscode_style()\n \n... | import manifold3d
import numpy as np
from manifold3d import Manifold, CrossSection
from .utils import (
display,
triangle_normals,
polygon_nearest_alignment,
svg2polygons,
text2svg,
PolyPath
) | 1,921 |
# wrapper for Manifold
# adds jupyter preview & tweaks API
class Solid:
def __init__(self, manifold = Manifold()):
self.manifold = manifold
# TODO add visual properties (e.g. color, texture)
def _repr_mimebundle_(self, **kwargs):
if self.is_empty():
return None
raw_mesh = self.to_mesh()
verts = raw_mesh.vert_properties.astype(np.float32)
tris = raw_mesh.tri_verts.astype(np.uint32)
|
# wrapper for Manifold
# adds jupyter preview & tweaks API
class Solid:
def __init__(self, manifold = Manifold()):
self.manifold = manifold
# TODO add visual properties (e.g. color, texture)
def _repr_mimebundle_(self, **kwargs):
if self.is_empty():
return None
raw_mesh = self.to_mesh()
verts = raw_mesh.vert_properties.astype(np.float32)
tris = raw_mesh.tri_verts.astype(np.uint32) | renderer = display((verts, tris)) | 0 | 2023-12-11 01:48:22+00:00 | 4k |
Kokonico/ObjLog | tests/test_tests.py | [
{
"identifier": "LogMessage",
"path": "objlog/Base/LogMessage.py",
"snippet": "class LogMessage:\n \"\"\"a base message to be logged\n Attributes:\n color\n level (name)\n\n WARNING: this class should not be used directly, use a subclass instead\n it is designed to be used as a... | import unittest
import random
import os
from objlog import LogNode, LogMessage
from objlog.LogMessages import Debug, Info, Warn, Error, Fatal | 2,650 | """test the functionality of the logger"""
def gen_random_messages(amount: int, extra_classes: list | None = None):
"""generate random messages"""
messages = []
if extra_classes is None:
extra_classes = []
for i in range(amount):
| """test the functionality of the logger"""
def gen_random_messages(amount: int, extra_classes: list | None = None):
"""generate random messages"""
messages = []
if extra_classes is None:
extra_classes = []
for i in range(amount): | messages.append(random.choice([Debug, Info, Warn, Error, Fatal] + extra_classes)("This is a random message")) | 3 | 2023-12-08 20:41:18+00:00 | 4k |
anyquest/pyaq | aq/activities/generate.py | [
{
"identifier": "BaseActivity",
"path": "aq/activities/activity.py",
"snippet": "class BaseActivity:\n MAX_ITERATIONS = 42\n\n async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:\n pass\n\n @staticmethod\n def merge_inputs(inputs: Dict[str, Any]) -> str... | import json
import logging
import time
from typing import Dict, Any, List
from .activity import BaseActivity, ActivityError
from ..providers import ProviderManager
from ..providers.types import ChatCompletionMessage, ChatCompletionRequest, Choice, Tool, ResponseFormat, ToolCall
from ..tools import ToolManager
from ..types import ActivityJob, JobState, Activity, App | 2,405 |
class GenerateActivity(BaseActivity):
TOOL_NAME_DELIMITER = "__"
def __init__(self, provider_manager: ProviderManager, tool_manager: ToolManager):
self._logger = logging.getLogger(self.__class__.__name__)
self._provider_manager = provider_manager
self._tool_manager = tool_manager
async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:
try:
app = activity_job.app_job.app
activity = app.activities[activity_job.activity_name]
if len(activity.models) < 1:
raise ActivityError(f"A model is required")
model = app.models[activity.models[0]]
temperature = float(activity.parameters.get("temperature", model.parameters.get("temperature", 0.5)))
max_tokens = int(activity.parameters.get("max_words", model.parameters.get("max_words", 500))*4/3)
messages = []
profile = app.info.profile
if profile:
messages.append(ChatCompletionMessage(role="system", content=profile))
json_format = activity.parameters.get("format", None) == "json"
if json_format:
messages.append(ChatCompletionMessage(
role="system",
content="Provide your response as a JSON object."))
else:
messages.append(ChatCompletionMessage(
role="system",
content="Use the tab length of two spaces when formatting nested lists in markdown."))
tools = await self.get_tools(app, activity)
if tools:
messages.append(ChatCompletionMessage(
role="system",
content="Think step-by-step. Perform as many iterations as necessary "
"to accomplish your goal using the tools provided."))
prompt_template = activity.parameters["prompt"]
prompt = self.render_prompt(prompt_template, inputs)
messages.append(ChatCompletionMessage(role="user", content=prompt))
parts = []
start_time = time.perf_counter()
provider = self._provider_manager.get_provider(model.provider)
for x in range(self.MAX_ITERATIONS):
request = ChatCompletionRequest(
model=model.model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
tools=tools if tools else None,
tool_choice="auto" if tools else None,
response_format=ResponseFormat(type="json_object") if json_format else None
)
response = await provider.create_completion(request)
choice: Choice = response.choices[0]
message: ChatCompletionMessage = choice.message
messages.append(message)
if choice.finish_reason == "tool_calls":
for tool_call in message.tool_calls:
tool_result = await self.process_tool_call(tool_call, app)
messages.append(tool_result)
else:
if message.content:
parts.append(message.content)
if choice.finish_reason:
self._logger.debug(f"Finished with reason {choice.finish_reason} "
f"in {int(time.perf_counter()-start_time)} sec.")
break
activity_job.state = JobState.SUCCESS
activity_job.output = "\n\n".join(parts)
activity_job.output_type = "text/markdown"
except Exception as e:
self._logger.error(e)
activity_job.state = JobState.ERROR
activity_job.output = str(e)
|
class GenerateActivity(BaseActivity):
TOOL_NAME_DELIMITER = "__"
def __init__(self, provider_manager: ProviderManager, tool_manager: ToolManager):
self._logger = logging.getLogger(self.__class__.__name__)
self._provider_manager = provider_manager
self._tool_manager = tool_manager
async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:
try:
app = activity_job.app_job.app
activity = app.activities[activity_job.activity_name]
if len(activity.models) < 1:
raise ActivityError(f"A model is required")
model = app.models[activity.models[0]]
temperature = float(activity.parameters.get("temperature", model.parameters.get("temperature", 0.5)))
max_tokens = int(activity.parameters.get("max_words", model.parameters.get("max_words", 500))*4/3)
messages = []
profile = app.info.profile
if profile:
messages.append(ChatCompletionMessage(role="system", content=profile))
json_format = activity.parameters.get("format", None) == "json"
if json_format:
messages.append(ChatCompletionMessage(
role="system",
content="Provide your response as a JSON object."))
else:
messages.append(ChatCompletionMessage(
role="system",
content="Use the tab length of two spaces when formatting nested lists in markdown."))
tools = await self.get_tools(app, activity)
if tools:
messages.append(ChatCompletionMessage(
role="system",
content="Think step-by-step. Perform as many iterations as necessary "
"to accomplish your goal using the tools provided."))
prompt_template = activity.parameters["prompt"]
prompt = self.render_prompt(prompt_template, inputs)
messages.append(ChatCompletionMessage(role="user", content=prompt))
parts = []
start_time = time.perf_counter()
provider = self._provider_manager.get_provider(model.provider)
for x in range(self.MAX_ITERATIONS):
request = ChatCompletionRequest(
model=model.model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
tools=tools if tools else None,
tool_choice="auto" if tools else None,
response_format=ResponseFormat(type="json_object") if json_format else None
)
response = await provider.create_completion(request)
choice: Choice = response.choices[0]
message: ChatCompletionMessage = choice.message
messages.append(message)
if choice.finish_reason == "tool_calls":
for tool_call in message.tool_calls:
tool_result = await self.process_tool_call(tool_call, app)
messages.append(tool_result)
else:
if message.content:
parts.append(message.content)
if choice.finish_reason:
self._logger.debug(f"Finished with reason {choice.finish_reason} "
f"in {int(time.perf_counter()-start_time)} sec.")
break
activity_job.state = JobState.SUCCESS
activity_job.output = "\n\n".join(parts)
activity_job.output_type = "text/markdown"
except Exception as e:
self._logger.error(e)
activity_job.state = JobState.ERROR
activity_job.output = str(e)
| async def get_tools(self, app: App, activity: Activity) -> List[Tool]: | 6 | 2023-12-14 13:25:52+00:00 | 4k |
multimodallearning/DG-TTA | dg_tta/tta/augmentation_utils.py | [
{
"identifier": "MIND3D",
"path": "dg_tta/mind.py",
"snippet": "class MIND3D(torch.nn.Module):\n def __init__(self, delta=1, sigma=1, randn_weighting=0.05) -> None:\n super().__init__()\n self.delta = delta\n self.sigma = sigma\n self.out_channels = 12\n # define st... | import torch
import torch.nn.functional as F
from dg_tta.mind import MIND3D
from dg_tta.gin import gin_aug | 2,382 | # https://github.com/cwmok/LapIRN/blob/d8f96770a704b1f190955cc26297c7b01a270b0a/Code/miccai2020_model_stage.py#L761
# Vincent Arsigny, Olivier Commowick, Xavier Pennec, Nicholas Ayache: A Log-Euclidean Framework for Statistics on Diffeomorphisms
B, C, D, H, W = disp_field.size()
dimension_correction = torch.tensor([D, H, W], device=disp_field.device).view(
1, 3, 1, 1, 1
)
dt = 1.0 / time_steps
with torch.no_grad():
identity = (
F.affine_grid(
torch.eye(3, 4).unsqueeze(0), (1, 1, D, H, W), align_corners=True
)
.permute(0, 4, 1, 2, 3)
.to(disp_field)
)
if ensure_inverse_consistency:
out_disp_field = (
disp_field / dimension_correction / (2**time_steps) * dt
).clone()
out_inverse_disp_field = (
inverse_disp_field / dimension_correction / (2**time_steps) * dt
).clone()
for _ in range(
time_steps if not iter_steps_override else iter_steps_override
):
ds = out_disp_field.clone()
inverse_ds = out_inverse_disp_field.clone()
out_disp_field = +0.5 * ds - 0.5 * F.grid_sample(
inverse_ds,
(identity + ds).permute(0, 2, 3, 4, 1),
padding_mode="border",
align_corners=True,
)
out_inverse_disp_field = +0.5 * inverse_ds - 0.5 * F.grid_sample(
ds,
(identity + inverse_ds).permute(0, 2, 3, 4, 1),
padding_mode="border",
align_corners=True,
)
out_disp_field = out_disp_field * 2**time_steps * dimension_correction
out_inverse_disp_field = (
out_inverse_disp_field * 2**time_steps * dimension_correction
)
else:
# https://github.com/cwmok/LapIRN/blob/d8f96770a704b1f190955cc26297c7b01a270b0a/Code/miccai2020_model_stage.py#L761
ds_dt = (
disp_field / dimension_correction / (2**time_steps)
) # velocity = ds/dt
inverse_ds_dt = (
inverse_disp_field / dimension_correction / (2**time_steps)
)
ds = ds_dt * dt
inverse_ds = inverse_ds_dt * dt
for _ in range(
time_steps if not iter_steps_override else iter_steps_override
):
ds = ds + F.grid_sample(
ds,
(identity + ds).permute(0, 2, 3, 4, 1),
mode="bilinear",
padding_mode="zeros",
align_corners=True,
)
inverse_ds = inverse_ds + F.grid_sample(
inverse_ds,
(identity + inverse_ds).permute(0, 2, 3, 4, 1),
mode="bilinear",
padding_mode="zeros",
align_corners=True,
)
out_disp_field = ds * dimension_correction
out_inverse_disp_field = inverse_ds * dimension_correction
return out_disp_field, out_inverse_disp_field
def get_disp_field(
batch_num, size_3d, factor=0.1, interpolation_factor=5, device="cpu"
):
field = get_rf_field(
batch_num,
size_3d,
alternating_fields=False,
num_fields=3,
interpolation_factor=interpolation_factor,
device=device,
)
STEPS = 5
disp_field, inverse_disp_field = calc_consistent_diffeomorphic_field(
field * factor, torch.zeros_like(field), STEPS, ensure_inverse_consistency=True
)
return disp_field.permute(0, 2, 3, 4, 1), inverse_disp_field.permute(0, 2, 3, 4, 1)
def get_rand_affine(batch_size, strength=0.05, flip=False):
affine = torch.cat(
(
torch.randn(batch_size, 3, 4) * strength + torch.eye(3, 4).unsqueeze(0),
torch.tensor([0, 0, 0, 1]).view(1, 1, 4).repeat(batch_size, 1, 1),
),
1,
)
if flip:
flip_affine = torch.diag(
torch.cat([(2 * (torch.rand(3) > 0.5).float() - 1), torch.tensor([1.0])])
)
affine = affine @ flip_affine
return affine[:, :3], affine.inverse()[:, :3]
def gin_mind_aug(input):
|
def get_rf_field(
num_batch, size_3d, interpolation_factor=4, num_fields=4, device="cpu"
):
rf_field = F.interpolate(
F.avg_pool3d(
F.avg_pool3d(
F.avg_pool3d(
torch.randn(
num_batch,
num_fields,
size_3d[0] // interpolation_factor,
size_3d[1] // interpolation_factor,
size_3d[2] // interpolation_factor,
device=device,
),
interpolation_factor,
stride=1,
padding=interpolation_factor // 2,
),
interpolation_factor,
stride=1,
padding=interpolation_factor // 2,
),
interpolation_factor,
stride=1,
padding=interpolation_factor // 2,
),
size=size_3d,
mode="trilinear",
)
rf_field -= rf_field.mean((-3, -2, -1), keepdim=True)
rf_field /= 1e-3 + rf_field.view(num_batch * num_fields, -1).std(1).view(
num_batch, num_fields, 1, 1, 1
)
return rf_field
def calc_consistent_diffeomorphic_field(
disp_field,
inverse_disp_field,
time_steps=1,
ensure_inverse_consistency=True,
iter_steps_override=None,
):
# https://github.com/multimodallearning/convexAdam/blob/76a595914eb21ea17795e6cd19503ab447f0ea6b/l2r_2021_convexAdam_task1_docker.py#L166
# https://github.com/cwmok/LapIRN/blob/d8f96770a704b1f190955cc26297c7b01a270b0a/Code/miccai2020_model_stage.py#L761
# Vincent Arsigny, Olivier Commowick, Xavier Pennec, Nicholas Ayache: A Log-Euclidean Framework for Statistics on Diffeomorphisms
B, C, D, H, W = disp_field.size()
dimension_correction = torch.tensor([D, H, W], device=disp_field.device).view(
1, 3, 1, 1, 1
)
dt = 1.0 / time_steps
with torch.no_grad():
identity = (
F.affine_grid(
torch.eye(3, 4).unsqueeze(0), (1, 1, D, H, W), align_corners=True
)
.permute(0, 4, 1, 2, 3)
.to(disp_field)
)
if ensure_inverse_consistency:
out_disp_field = (
disp_field / dimension_correction / (2**time_steps) * dt
).clone()
out_inverse_disp_field = (
inverse_disp_field / dimension_correction / (2**time_steps) * dt
).clone()
for _ in range(
time_steps if not iter_steps_override else iter_steps_override
):
ds = out_disp_field.clone()
inverse_ds = out_inverse_disp_field.clone()
out_disp_field = +0.5 * ds - 0.5 * F.grid_sample(
inverse_ds,
(identity + ds).permute(0, 2, 3, 4, 1),
padding_mode="border",
align_corners=True,
)
out_inverse_disp_field = +0.5 * inverse_ds - 0.5 * F.grid_sample(
ds,
(identity + inverse_ds).permute(0, 2, 3, 4, 1),
padding_mode="border",
align_corners=True,
)
out_disp_field = out_disp_field * 2**time_steps * dimension_correction
out_inverse_disp_field = (
out_inverse_disp_field * 2**time_steps * dimension_correction
)
else:
# https://github.com/cwmok/LapIRN/blob/d8f96770a704b1f190955cc26297c7b01a270b0a/Code/miccai2020_model_stage.py#L761
ds_dt = (
disp_field / dimension_correction / (2**time_steps)
) # velocity = ds/dt
inverse_ds_dt = (
inverse_disp_field / dimension_correction / (2**time_steps)
)
ds = ds_dt * dt
inverse_ds = inverse_ds_dt * dt
for _ in range(
time_steps if not iter_steps_override else iter_steps_override
):
ds = ds + F.grid_sample(
ds,
(identity + ds).permute(0, 2, 3, 4, 1),
mode="bilinear",
padding_mode="zeros",
align_corners=True,
)
inverse_ds = inverse_ds + F.grid_sample(
inverse_ds,
(identity + inverse_ds).permute(0, 2, 3, 4, 1),
mode="bilinear",
padding_mode="zeros",
align_corners=True,
)
out_disp_field = ds * dimension_correction
out_inverse_disp_field = inverse_ds * dimension_correction
return out_disp_field, out_inverse_disp_field
def get_disp_field(
batch_num, size_3d, factor=0.1, interpolation_factor=5, device="cpu"
):
field = get_rf_field(
batch_num,
size_3d,
alternating_fields=False,
num_fields=3,
interpolation_factor=interpolation_factor,
device=device,
)
STEPS = 5
disp_field, inverse_disp_field = calc_consistent_diffeomorphic_field(
field * factor, torch.zeros_like(field), STEPS, ensure_inverse_consistency=True
)
return disp_field.permute(0, 2, 3, 4, 1), inverse_disp_field.permute(0, 2, 3, 4, 1)
def get_rand_affine(batch_size, strength=0.05, flip=False):
affine = torch.cat(
(
torch.randn(batch_size, 3, 4) * strength + torch.eye(3, 4).unsqueeze(0),
torch.tensor([0, 0, 0, 1]).view(1, 1, 4).repeat(batch_size, 1, 1),
),
1,
)
if flip:
flip_affine = torch.diag(
torch.cat([(2 * (torch.rand(3) > 0.5).float() - 1), torch.tensor([1.0])])
)
affine = affine @ flip_affine
return affine[:, :3], affine.inverse()[:, :3]
def gin_mind_aug(input): | return MIND3D()(gin_aug(input)) | 1 | 2023-12-08 08:43:11+00:00 | 4k |
chengkaiAcademyCity/EnvAwareAfford | code/models/model_env_aware_LineDisF.py | [
{
"identifier": "PointNetEncoder",
"path": "code/models/pointnet_utils.py",
"snippet": "class PointNetEncoder(nn.Module):\n def __init__(self, global_feat=True, feature_transform=False, channel=3):\n super(PointNetEncoder, self).__init__()\n self.stn = STN3d(channel)\n self.conv1... | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.utils.data import DataLoader
from pointnet2_ops import pointnet2_utils
from pointnet2_ops.pointnet2_modules import PointnetFPModule, PointnetSAModule
from pointnet2.models.pointnet2_ssg_cls import PointNet2ClassificationSSG
from .pointnet_utils import PointNetEncoder, feature_transform_reguliarzer | 2,463 | be formated as (x, y, z, features...)
"""
xyz, features = self._break_up_pc(pointcloud)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
)
return self.fc_layer(l_features[0])
class PointNet2SemSegSSGShape(PointNet2ClassificationSSG):
def _build_model(self):
self.SA_modules = nn.ModuleList()
self.SA_modules.append(
PointnetSAModule(
npoint=512,
radius=0.2,
nsample=64,
mlp=[3, 64, 64, 128],
use_xyz=True,
)
)
self.SA_modules.append(
PointnetSAModule(
npoint=128,
radius=0.4,
nsample=64,
mlp=[128, 128, 128, 256],
use_xyz=True,
)
)
self.SA_modules.append(
PointnetSAModule(
mlp=[256, 256, 256, 256],
use_xyz=True,
)
)
self.FP_modules = nn.ModuleList()
self.FP_modules.append(PointnetFPModule(mlp=[128 + 3, 128, 128, 128]))
self.FP_modules.append(PointnetFPModule(mlp=[256 + 128, 256, 128]))
self.FP_modules.append(PointnetFPModule(mlp=[256 + 256, 256, 256]))
self.fc_layer = nn.Sequential(
nn.Conv1d(128, self.hparams['feat_dim'], kernel_size=1, bias=False),
nn.BatchNorm1d(self.hparams['feat_dim']),
nn.ReLU(True),
)
self.fc_layer2 = nn.Sequential(
nn.Linear(256, self.hparams['feat_dim']),
nn.BatchNorm1d(self.hparams['feat_dim']),
nn.ReLU(True),
)
def forward(self, pointcloud):
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
"""
xyz, features = self._break_up_pc(pointcloud)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
bottleneck_feats = l_features[-1].squeeze(-1)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
)
return self.fc_layer(l_features[0]), self.fc_layer2(bottleneck_feats)
# class PointNet(nn.Module):
# def __init__(self, feat_dim):
# super(PointNet, self).__init__()
#
# self.conv1 = nn.Conv1d(feat_dim*2, feat_dim, 1)
# self.conv2 = nn.Conv1d(feat_dim, feat_dim, 1)
# self.conv3 = nn.Conv1d(feat_dim, feat_dim, 1)
#
# self.bn1 = nn.BatchNorm1d(feat_dim)
# self.bn2 = nn.BatchNorm1d(feat_dim)
# self.bn3 = nn.BatchNorm1d(feat_dim)
#
# # B x 2F x N
# # output: B x F
# def forward(self, x):
# x = torch.relu(self.bn1(self.conv1(x)))
# x = torch.relu(self.bn2(self.conv2(x)))
# x = torch.relu(self.bn3(self.conv3(x)))
# x = x.max(dim=-1)[0]
# return x
class PointNet(nn.Module):
def __init__(self, feat_dim, normal_channel=False):
super(PointNet, self).__init__()
if normal_channel:
channel = 6
else:
channel = 3
| """
This file borrows PointNet2 implementation: https://github.com/erikwijmans/Pointnet2_PyTorch
"""
class MyFPModule(nn.Module):
def __init__(self):
super(MyFPModule, self).__init__()
# B x N x 3, B x M X 3, B x F x M
# output: B x F x N
def forward(self, unknown, known, known_feats):
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(
known_feats, idx, weight
)
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
return new_features.squeeze(-1)
class PointNet2SemSegSSG(PointNet2ClassificationSSG):
def _build_model(self):
self.SA_modules = nn.ModuleList()
self.SA_modules.append(
PointnetSAModule(
npoint=1024,
radius=0.1,
nsample=32,
mlp=[3, 32, 32, 64],
use_xyz=True,
)
)
self.SA_modules.append(
PointnetSAModule(
npoint=256,
radius=0.2,
nsample=32,
mlp=[64, 64, 64, 128],
use_xyz=True,
)
)
self.SA_modules.append(
PointnetSAModule(
npoint=64,
radius=0.4,
nsample=32,
mlp=[128, 128, 128, 256],
use_xyz=True,
)
)
self.SA_modules.append(
PointnetSAModule(
npoint=16,
radius=0.8,
nsample=32,
mlp=[256, 256, 256, 512],
use_xyz=True,
)
)
self.FP_modules = nn.ModuleList()
self.FP_modules.append(PointnetFPModule(mlp=[128 + 3, 128, 128, 128]))
self.FP_modules.append(PointnetFPModule(mlp=[256 + 64, 256, 128]))
self.FP_modules.append(PointnetFPModule(mlp=[256 + 128, 256, 256]))
self.FP_modules.append(PointnetFPModule(mlp=[512 + 256, 256, 256]))
self.fc_layer = nn.Sequential(
nn.Conv1d(128, self.hparams['feat_dim'], kernel_size=1, bias=False),
nn.BatchNorm1d(self.hparams['feat_dim']),
nn.ReLU(True),
)
def forward(self, pointcloud):
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
"""
xyz, features = self._break_up_pc(pointcloud)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
)
return self.fc_layer(l_features[0])
class PointNet2SemSegSSGShape(PointNet2ClassificationSSG):
def _build_model(self):
self.SA_modules = nn.ModuleList()
self.SA_modules.append(
PointnetSAModule(
npoint=512,
radius=0.2,
nsample=64,
mlp=[3, 64, 64, 128],
use_xyz=True,
)
)
self.SA_modules.append(
PointnetSAModule(
npoint=128,
radius=0.4,
nsample=64,
mlp=[128, 128, 128, 256],
use_xyz=True,
)
)
self.SA_modules.append(
PointnetSAModule(
mlp=[256, 256, 256, 256],
use_xyz=True,
)
)
self.FP_modules = nn.ModuleList()
self.FP_modules.append(PointnetFPModule(mlp=[128 + 3, 128, 128, 128]))
self.FP_modules.append(PointnetFPModule(mlp=[256 + 128, 256, 128]))
self.FP_modules.append(PointnetFPModule(mlp=[256 + 256, 256, 256]))
self.fc_layer = nn.Sequential(
nn.Conv1d(128, self.hparams['feat_dim'], kernel_size=1, bias=False),
nn.BatchNorm1d(self.hparams['feat_dim']),
nn.ReLU(True),
)
self.fc_layer2 = nn.Sequential(
nn.Linear(256, self.hparams['feat_dim']),
nn.BatchNorm1d(self.hparams['feat_dim']),
nn.ReLU(True),
)
def forward(self, pointcloud):
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
"""
xyz, features = self._break_up_pc(pointcloud)
l_xyz, l_features = [xyz], [features]
for i in range(len(self.SA_modules)):
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
bottleneck_feats = l_features[-1].squeeze(-1)
for i in range(-1, -(len(self.FP_modules) + 1), -1):
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
)
return self.fc_layer(l_features[0]), self.fc_layer2(bottleneck_feats)
# class PointNet(nn.Module):
# def __init__(self, feat_dim):
# super(PointNet, self).__init__()
#
# self.conv1 = nn.Conv1d(feat_dim*2, feat_dim, 1)
# self.conv2 = nn.Conv1d(feat_dim, feat_dim, 1)
# self.conv3 = nn.Conv1d(feat_dim, feat_dim, 1)
#
# self.bn1 = nn.BatchNorm1d(feat_dim)
# self.bn2 = nn.BatchNorm1d(feat_dim)
# self.bn3 = nn.BatchNorm1d(feat_dim)
#
# # B x 2F x N
# # output: B x F
# def forward(self, x):
# x = torch.relu(self.bn1(self.conv1(x)))
# x = torch.relu(self.bn2(self.conv2(x)))
# x = torch.relu(self.bn3(self.conv3(x)))
# x = x.max(dim=-1)[0]
# return x
class PointNet(nn.Module):
def __init__(self, feat_dim, normal_channel=False):
super(PointNet, self).__init__()
if normal_channel:
channel = 6
else:
channel = 3 | self.feat = PointNetEncoder(global_feat=True, feature_transform=True, channel=channel) | 0 | 2023-12-08 09:29:36+00:00 | 4k |
tommy-xq/SA2VP | vpt_main/src/engine/trainer.py | [
{
"identifier": "Evaluator",
"path": "vpt_main/src/engine/evaluator.py",
"snippet": "class Evaluator():\n \"\"\"\n An evaluator with below logics:\n\n 1. find which eval module to use.\n 2. store the eval results, pretty print it in log file as well.\n \"\"\"\n\n def __init__(\n ... | import datetime
import time
import torch
import torch.nn as nn
import os
from fvcore.common.config import CfgNode
from fvcore.common.checkpoint import Checkpointer
from ..engine.evaluator import Evaluator
from ..solver.lr_scheduler import make_scheduler
from ..solver.optimizer import make_optimizer
from ..solver.losses import build_loss
from ..utils import logging
from ..utils.train_utils import AverageMeter, gpu_mem_usage | 2,958 | #!/usr/bin/env python3
"""
a trainer class
"""
logger = logging.get_logger("visual_prompt")
class Trainer():
"""
a trainer with below logics:
1. Build optimizer, scheduler
2. Load checkpoints if provided
3. Train and eval at each epoch
"""
def __init__(
self,
cfg: CfgNode,
model: nn.Module,
evaluator: Evaluator,
device: torch.device,
) -> None:
self.cfg = cfg
self.model = model
self.device = device
# solver related
logger.info("\tSetting up the optimizer...")
| #!/usr/bin/env python3
"""
a trainer class
"""
logger = logging.get_logger("visual_prompt")
class Trainer():
"""
a trainer with below logics:
1. Build optimizer, scheduler
2. Load checkpoints if provided
3. Train and eval at each epoch
"""
def __init__(
self,
cfg: CfgNode,
model: nn.Module,
evaluator: Evaluator,
device: torch.device,
) -> None:
self.cfg = cfg
self.model = model
self.device = device
# solver related
logger.info("\tSetting up the optimizer...") | self.optimizer = make_optimizer([self.model], cfg.SOLVER) | 2 | 2023-12-12 13:19:17+00:00 | 4k |
ChatClue/ChatClue | background/memory/tasks.py | [
{
"identifier": "ConversationMemoryManager",
"path": "database/conversations.py",
"snippet": "class ConversationMemoryManager:\n \"\"\"\n Manages database operations for the Conversation table,\n including insertions, updates, deletions, and queries.\n \"\"\"\n\n def __init__(self):\n ... | from celery import shared_task
from database.conversations import ConversationMemoryManager
from database.system_state import SystemStateManager
from integrations.openai.openai import OpenAIClient
from datetime import datetime | 3,125 |
@shared_task
def store_conversation_task(speaker_type, response):
"""
A Celery task for storing conversation parts in the database.
This asynchronous task takes a speaker type and a response, and stores them in the database
using the ConversationMemoryManager. It is designed to offload the database writing process
from the main execution thread, improving performance and responsiveness.
Args:
speaker_type (str): The type of speaker (e.g., 'user' or 'assistant'), indicating who is speaking.
response (str): The text of the response or conversation part to be stored.
"""
|
@shared_task
def store_conversation_task(speaker_type, response):
"""
A Celery task for storing conversation parts in the database.
This asynchronous task takes a speaker type and a response, and stores them in the database
using the ConversationMemoryManager. It is designed to offload the database writing process
from the main execution thread, improving performance and responsiveness.
Args:
speaker_type (str): The type of speaker (e.g., 'user' or 'assistant'), indicating who is speaking.
response (str): The text of the response or conversation part to be stored.
""" | openai_client = OpenAIClient() | 2 | 2023-12-06 09:10:06+00:00 | 4k |
GXNU-ZhongLab/ODTrack | lib/train/actors/odtrack.py | [
{
"identifier": "BaseActor",
"path": "lib/train/actors/base_actor.py",
"snippet": "class BaseActor:\n \"\"\" Base class for actor. The actor class handles the passing of the data through the network\n and calculation the loss\"\"\"\n def __init__(self, net, objective):\n \"\"\"\n ... | from . import BaseActor
from lib.utils.misc import NestedTensor, interpolate
from lib.utils.box_ops import box_cxcywh_to_xyxy, box_xywh_to_xyxy
from lib.utils.merge import merge_template_search
from ...utils.heapmap_utils import generate_heatmap
from ...utils.ce_utils import generate_mask_cond, adjust_keep_rate
import torch | 3,251 |
class ODTrackActor(BaseActor):
""" Actor for training ODTrack models """
def __init__(self, net, objective, loss_weight, settings, cfg=None):
super().__init__(net, objective)
self.loss_weight = loss_weight
self.settings = settings
self.bs = self.settings.batchsize # batch size
self.cfg = cfg
def __call__(self, data):
"""
args:
data - The input data, should contain the fields 'template', 'search', 'gt_bbox'.
template_images: (N_t, batch, 3, H, W)
search_images: (N_s, batch, 3, H, W)
returns:
loss - the training loss
status - dict containing detailed losses
"""
# forward pass
out_dict = self.forward_pass(data)
# compute losses
loss, status = self.compute_losses(out_dict, data)
return loss, status
def forward_pass(self, data):
template_list = []
search_list = []
for i in range(self.settings.num_template):
template_img_i = data['template_images'][i].view(-1, *data['template_images'].shape[2:]) # (batch, 3, 128, 128)
# template_att_i = data['template_att'][i].view(-1, *data['template_att'].shape[2:]) # (batch, 128, 128)
template_list.append(template_img_i)
for i in range(self.settings.num_search):
search_img_i = data['search_images'][i].view(-1, *data['search_images'].shape[2:]) # (batch, 3, 320, 320)
# search_att = data['search_att'][0].view(-1, *data['search_att'].shape[2:]) # (batch, 320, 320)
search_list.append(search_img_i)
box_mask_z = []
ce_keep_rate = None
if self.cfg.MODEL.BACKBONE.CE_LOC:
for i in range(self.settings.num_template):
box_mask_z.append(generate_mask_cond(self.cfg, template_list[i].shape[0], template_list[i].device,
data['template_anno'][i]))
box_mask_z = torch.cat(box_mask_z, dim=1)
ce_start_epoch = self.cfg.TRAIN.CE_START_EPOCH
ce_warm_epoch = self.cfg.TRAIN.CE_WARM_EPOCH
ce_keep_rate = adjust_keep_rate(data['epoch'], warmup_epochs=ce_start_epoch,
total_epochs=ce_start_epoch + ce_warm_epoch,
ITERS_PER_EPOCH=1,
base_keep_rate=self.cfg.MODEL.BACKBONE.CE_KEEP_RATIO[0])
# if len(template_list) == 1:
# template_list = template_list[0]
out_dict = self.net(template=template_list,
search=search_list,
ce_template_mask=box_mask_z,
ce_keep_rate=ce_keep_rate,
return_last_attn=False)
return out_dict
def compute_losses(self, pred_dict, gt_dict, return_status=True):
# currently only support the type of pred_dict is list
assert isinstance(pred_dict, list)
loss_dict = {}
total_status = {}
total_loss = torch.tensor(0., dtype=torch.float).cuda() # 定义 0 tensor,并指定GPU设备
# generate gt gaussian map
gt_gaussian_maps_list = generate_heatmap(gt_dict['search_anno'], self.cfg.DATA.SEARCH.SIZE, self.cfg.MODEL.BACKBONE.STRIDE)
for i in range(len(pred_dict)):
# get GT
gt_bbox = gt_dict['search_anno'][i] # (Ns, batch, 4) (x1,y1,w,h) -> (batch, 4)
gt_gaussian_maps = gt_gaussian_maps_list[i].unsqueeze(1)
# Get boxes
pred_boxes = pred_dict[i]['pred_boxes']
if torch.isnan(pred_boxes).any():
raise ValueError("Network outputs is NAN! Stop Training")
num_queries = pred_boxes.size(1)
|
class ODTrackActor(BaseActor):
""" Actor for training ODTrack models """
def __init__(self, net, objective, loss_weight, settings, cfg=None):
super().__init__(net, objective)
self.loss_weight = loss_weight
self.settings = settings
self.bs = self.settings.batchsize # batch size
self.cfg = cfg
def __call__(self, data):
"""
args:
data - The input data, should contain the fields 'template', 'search', 'gt_bbox'.
template_images: (N_t, batch, 3, H, W)
search_images: (N_s, batch, 3, H, W)
returns:
loss - the training loss
status - dict containing detailed losses
"""
# forward pass
out_dict = self.forward_pass(data)
# compute losses
loss, status = self.compute_losses(out_dict, data)
return loss, status
def forward_pass(self, data):
template_list = []
search_list = []
for i in range(self.settings.num_template):
template_img_i = data['template_images'][i].view(-1, *data['template_images'].shape[2:]) # (batch, 3, 128, 128)
# template_att_i = data['template_att'][i].view(-1, *data['template_att'].shape[2:]) # (batch, 128, 128)
template_list.append(template_img_i)
for i in range(self.settings.num_search):
search_img_i = data['search_images'][i].view(-1, *data['search_images'].shape[2:]) # (batch, 3, 320, 320)
# search_att = data['search_att'][0].view(-1, *data['search_att'].shape[2:]) # (batch, 320, 320)
search_list.append(search_img_i)
box_mask_z = []
ce_keep_rate = None
if self.cfg.MODEL.BACKBONE.CE_LOC:
for i in range(self.settings.num_template):
box_mask_z.append(generate_mask_cond(self.cfg, template_list[i].shape[0], template_list[i].device,
data['template_anno'][i]))
box_mask_z = torch.cat(box_mask_z, dim=1)
ce_start_epoch = self.cfg.TRAIN.CE_START_EPOCH
ce_warm_epoch = self.cfg.TRAIN.CE_WARM_EPOCH
ce_keep_rate = adjust_keep_rate(data['epoch'], warmup_epochs=ce_start_epoch,
total_epochs=ce_start_epoch + ce_warm_epoch,
ITERS_PER_EPOCH=1,
base_keep_rate=self.cfg.MODEL.BACKBONE.CE_KEEP_RATIO[0])
# if len(template_list) == 1:
# template_list = template_list[0]
out_dict = self.net(template=template_list,
search=search_list,
ce_template_mask=box_mask_z,
ce_keep_rate=ce_keep_rate,
return_last_attn=False)
return out_dict
def compute_losses(self, pred_dict, gt_dict, return_status=True):
# currently only support the type of pred_dict is list
assert isinstance(pred_dict, list)
loss_dict = {}
total_status = {}
total_loss = torch.tensor(0., dtype=torch.float).cuda() # 定义 0 tensor,并指定GPU设备
# generate gt gaussian map
gt_gaussian_maps_list = generate_heatmap(gt_dict['search_anno'], self.cfg.DATA.SEARCH.SIZE, self.cfg.MODEL.BACKBONE.STRIDE)
for i in range(len(pred_dict)):
# get GT
gt_bbox = gt_dict['search_anno'][i] # (Ns, batch, 4) (x1,y1,w,h) -> (batch, 4)
gt_gaussian_maps = gt_gaussian_maps_list[i].unsqueeze(1)
# Get boxes
pred_boxes = pred_dict[i]['pred_boxes']
if torch.isnan(pred_boxes).any():
raise ValueError("Network outputs is NAN! Stop Training")
num_queries = pred_boxes.size(1) | pred_boxes_vec = box_cxcywh_to_xyxy(pred_boxes).view(-1, 4) # (B,N,4) --> (BN,4) (x1,y1,x2,y2) | 3 | 2023-12-10 03:57:19+00:00 | 4k |
lumina-test/lumina | lumina/analyzer/main.py | [
{
"identifier": "SwitchCounter",
"path": "lumina/analyzer/counter/switch_counter.py",
"snippet": "class SwitchCounter:\n \"\"\" Class to parse switch counter files\n\n Attributes:\n _counter (dict of dict): the switch counters with the following format:\n {'requester': {'ingress'... | import argparse, sys, yaml, os, math, logging
import lumina.analyzer.checker.integrity_check as integrity_check
import lumina.analyzer.checker.host_check as host_check
import lumina.analyzer.checker.gbn_check as gbn_check
import lumina.analyzer.checker.read_gbn_check as read_gbn_check
import lumina.analyzer.checker.cnp_check as cnp_check
import lumina.orchestrator.host as host
import lumina.orchestrator.switch as switch
from lumina.analyzer.counter.switch_counter import SwitchCounter
from lumina.analyzer.counter.host_counter import MLNXHostCounter, IntelHostCounter
from lumina.analyzer.pcap_processor.pcap_process import get_packet_list
from lumina.utils.config_loggers import config_stream_handler, config_file_handler | 3,271 | """
This is the main entry point for the offline analyzer. It takes a config file as input and
performs the following tasks:
1. Check the integrity of the trace according to pcap files, and timestamps
2. Check the host counters
3. Check the traces and counters according to Go-Back-N (GBN) and Congestion Notification Packet (CNP) checkers
"""
## All logs will be logged into file LOG_FILENAME
LOG_FILENAME = "analysis.log"
## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME
RESULT_FILENAME = "result.out"
def get_qp_info_list(switch_msg_snapshot):
""" Get the list of QP info from the switch message snapshot
Args:
switch_msg_snapshot (str): The path to the switch message snapshot
Returns:
list of dict: The list of queue pair (QP) information if successful or None otherwise.
The list of QP information is in the following format:
[{'psn_rcv': initial packet sequence number from the receiver qp,
'psn_snd': initial packet sequence number from the sender qp,
'qpn_rcv': receiver qp number,
'qpn_snd': sender qp number,
'ip_rcv' : receiver IP
'ip_snd' : sender IP}]
"""
try:
with open(switch_msg_snapshot, 'r') as stream:
qp_info_list = yaml.safe_load(stream)
except:
logging.error("Read switch message snapshot %s error." % switch_msg_snapshot)
return None
logging.info("Read switch message snapshot %s." % switch_msg_snapshot)
return qp_info_list
def main(args):
""" Main function of the offline analyzer
Args:
args (argparser.Namespace): The parsed arguments
Returns:
N/A
"""
with open(args.config_file, "r") as stream:
conf = yaml.safe_load(stream)
try:
result_dir = conf['result-path']
num_repeats = conf['num-repeats']
mtu = conf['traffic']['mtu']
msg_size = conf['traffic']['message-size']
num_msgs_per_qp = conf['traffic']['num-msgs-per-qp']
port_map = {'requester': conf['requester']['nic']['switch-port'],
'responder': conf['responder']['nic']['switch-port'],
'requester-mirror': conf['requester-mirror']['nic']['switch-port'],
'responder-mirror': conf['responder-mirror']['nic']['switch-port']}
requester_nic_type = conf['requester']['nic']['type']
responder_nic_type = conf['responder']['nic']['type']
requester_nic_vendor = host.NIC_TYPE2VENDOR_MAP[requester_nic_type] \
if requester_nic_type in host.NIC_TYPE2VENDOR_MAP.keys() \
else host.NICVendor.Unkown
responder_nic_vendor = host.NIC_TYPE2VENDOR_MAP[responder_nic_type] \
if responder_nic_type in host.NIC_TYPE2VENDOR_MAP.keys() \
else host.NICVendor.Unkown
nic_vendor_map = {'requester': requester_nic_vendor, 'responder': responder_nic_vendor}
except KeyError as e:
print("Config file %s has a bad yaml format (key error: %s)" % (args.config_file, e))
sys.exit(1)
root_logger = logging.getLogger()
root_logger.handlers.clear()
| """
This is the main entry point for the offline analyzer. It takes a config file as input and
performs the following tasks:
1. Check the integrity of the trace according to pcap files, and timestamps
2. Check the host counters
3. Check the traces and counters according to Go-Back-N (GBN) and Congestion Notification Packet (CNP) checkers
"""
## All logs will be logged into file LOG_FILENAME
LOG_FILENAME = "analysis.log"
## Results (checkers and measurements) will also be dumped into file RESULT_FILENAME
RESULT_FILENAME = "result.out"
def get_qp_info_list(switch_msg_snapshot):
""" Get the list of QP info from the switch message snapshot
Args:
switch_msg_snapshot (str): The path to the switch message snapshot
Returns:
list of dict: The list of queue pair (QP) information if successful or None otherwise.
The list of QP information is in the following format:
[{'psn_rcv': initial packet sequence number from the receiver qp,
'psn_snd': initial packet sequence number from the sender qp,
'qpn_rcv': receiver qp number,
'qpn_snd': sender qp number,
'ip_rcv' : receiver IP
'ip_snd' : sender IP}]
"""
try:
with open(switch_msg_snapshot, 'r') as stream:
qp_info_list = yaml.safe_load(stream)
except:
logging.error("Read switch message snapshot %s error." % switch_msg_snapshot)
return None
logging.info("Read switch message snapshot %s." % switch_msg_snapshot)
return qp_info_list
def main(args):
""" Main function of the offline analyzer
Args:
args (argparser.Namespace): The parsed arguments
Returns:
N/A
"""
with open(args.config_file, "r") as stream:
conf = yaml.safe_load(stream)
try:
result_dir = conf['result-path']
num_repeats = conf['num-repeats']
mtu = conf['traffic']['mtu']
msg_size = conf['traffic']['message-size']
num_msgs_per_qp = conf['traffic']['num-msgs-per-qp']
port_map = {'requester': conf['requester']['nic']['switch-port'],
'responder': conf['responder']['nic']['switch-port'],
'requester-mirror': conf['requester-mirror']['nic']['switch-port'],
'responder-mirror': conf['responder-mirror']['nic']['switch-port']}
requester_nic_type = conf['requester']['nic']['type']
responder_nic_type = conf['responder']['nic']['type']
requester_nic_vendor = host.NIC_TYPE2VENDOR_MAP[requester_nic_type] \
if requester_nic_type in host.NIC_TYPE2VENDOR_MAP.keys() \
else host.NICVendor.Unkown
responder_nic_vendor = host.NIC_TYPE2VENDOR_MAP[responder_nic_type] \
if responder_nic_type in host.NIC_TYPE2VENDOR_MAP.keys() \
else host.NICVendor.Unkown
nic_vendor_map = {'requester': requester_nic_vendor, 'responder': responder_nic_vendor}
except KeyError as e:
print("Config file %s has a bad yaml format (key error: %s)" % (args.config_file, e))
sys.exit(1)
root_logger = logging.getLogger()
root_logger.handlers.clear() | config_stream_handler(root_logger) | 4 | 2023-12-09 08:21:14+00:00 | 4k |
yilin-bao/nnanim | TestingCode/vit.py | [
{
"identifier": "EmbeddingStem",
"path": "TestingCode/patch_embed.py",
"snippet": "class EmbeddingStem(nn.Module):\n def __init__(\n self,\n image_size=224,\n patch_size=16,\n channels=3,\n embedding_dim=768,\n hidden_dims=None,\n conv_patch=False,\n ... | import torch.nn as nn
from TestingCode.patch_embed import EmbeddingStem
from TestingCode.transformer import Transformer
from TestingCode.modules import OutputLayer | 2,458 |
class VisionTransformer(nn.Module):
def __init__(
self,
image_size=224,
patch_size=16,
in_channels=3,
embedding_dim=768,
num_layers=12,
num_heads=12,
qkv_bias=True,
mlp_ratio=4.0,
use_revised_ffn=False,
dropout_rate=0.0,
attn_dropout_rate=0.0,
use_conv_stem=True,
use_conv_patch=False,
use_linear_patch=False,
use_conv_stem_original=True,
use_stem_scaled_relu=False,
hidden_dims=None,
cls_head=False,
num_classes=1000,
representation_size=None,
):
super(VisionTransformer, self).__init__()
# embedding layer
self.embedding_layer = EmbeddingStem(
image_size=image_size,
patch_size=patch_size,
channels=in_channels,
embedding_dim=embedding_dim,
hidden_dims=hidden_dims,
conv_patch=use_conv_patch,
linear_patch=use_linear_patch,
conv_stem=use_conv_stem,
conv_stem_original=use_conv_stem_original,
conv_stem_scaled_relu=use_stem_scaled_relu,
position_embedding_dropout=dropout_rate,
cls_head=cls_head,
)
# transformer
self.transformer = Transformer(
dim=embedding_dim,
depth=num_layers,
heads=num_heads,
mlp_ratio=mlp_ratio,
attn_dropout=attn_dropout_rate,
dropout=dropout_rate,
qkv_bias=qkv_bias,
revised=use_revised_ffn,
)
self.post_transformer_ln = nn.LayerNorm(embedding_dim)
# output layer
|
class VisionTransformer(nn.Module):
def __init__(
self,
image_size=224,
patch_size=16,
in_channels=3,
embedding_dim=768,
num_layers=12,
num_heads=12,
qkv_bias=True,
mlp_ratio=4.0,
use_revised_ffn=False,
dropout_rate=0.0,
attn_dropout_rate=0.0,
use_conv_stem=True,
use_conv_patch=False,
use_linear_patch=False,
use_conv_stem_original=True,
use_stem_scaled_relu=False,
hidden_dims=None,
cls_head=False,
num_classes=1000,
representation_size=None,
):
super(VisionTransformer, self).__init__()
# embedding layer
self.embedding_layer = EmbeddingStem(
image_size=image_size,
patch_size=patch_size,
channels=in_channels,
embedding_dim=embedding_dim,
hidden_dims=hidden_dims,
conv_patch=use_conv_patch,
linear_patch=use_linear_patch,
conv_stem=use_conv_stem,
conv_stem_original=use_conv_stem_original,
conv_stem_scaled_relu=use_stem_scaled_relu,
position_embedding_dropout=dropout_rate,
cls_head=cls_head,
)
# transformer
self.transformer = Transformer(
dim=embedding_dim,
depth=num_layers,
heads=num_heads,
mlp_ratio=mlp_ratio,
attn_dropout=attn_dropout_rate,
dropout=dropout_rate,
qkv_bias=qkv_bias,
revised=use_revised_ffn,
)
self.post_transformer_ln = nn.LayerNorm(embedding_dim)
# output layer | self.cls_layer = OutputLayer( | 2 | 2023-12-05 22:01:06+00:00 | 4k |
equilibration/equipy | equipy/fairness/_wasserstein.py | [
{
"identifier": "_check_epsilon",
"path": "equipy/utils/checkers.py",
"snippet": "def _check_epsilon(epsilon):\n \"\"\"\n Check if epsilon (fairness parameter) is within the valid range [0, 1].\n\n Parameters\n ----------\n epsilon : float\n Fairness parameter controlling the trade... | import numpy as np
from ..utils.checkers import _check_epsilon, _check_epsilon_size, _check_mod, _check_shape, _check_nb_observations
from ._base import BaseHelper | 3,486 |
class FairWasserstein(BaseHelper):
"""
Class implementing Wasserstein distance-based fairness adjustment for binary classification tasks.
Parameters
----------
sigma : float, optional (default=0.0001)
Standard deviation of the random noise added during fairness adjustment.
Attributes
----------
sigma : float
Standard deviation of the random noise added during fairness adjustment.
modalities_calib : dict
Dictionary storing modality values obtained from calibration data.
weights : dict
Dictionary storing weights (probabilities) for each modality based on their occurrences in calibration data.
ecdf : dict
Dictionary storing ECDF (Empirical Cumulative Distribution Function) objects for each sensitive modality.
eqf : dict
Dictionary storing EQF (Empirical Quantile Function) objects for each sensitive modality.
Methods
-------
fit(y, sensitive_feature)
Fit the fairness adjustment model using calibration data.
transform(y, sensitive_feature, epsilon=0)
Transform test data to enforce fairness using Wasserstein distance.
"""
def __init__(self, sigma=0.0001):
super().__init__()
self.sigma = sigma
self.modalities_calib = None
def fit(self, y, sensitive_feature):
"""
Perform fit on the calibration data and save the ECDF, EQF, and weights of the sensitive variable.
Parameters
----------
y : array-like, shape (n_samples,)
The calibration labels.
sensitive_feature : array-like, shape (n_samples,)
The calibration samples representing one single sensitive attribute.
Returns
-------
None
Notes
-----
This method computes the ECDF (Empirical Cumulative Distribution Function),
EQF (Empirical Quantile Function), and weights for the sensitive variable
based on the provided calibration data. These computed values are used
during the transformation process to ensure fairness in predictions.
Examples
--------
>>> wasserstein = FairWasserstein(sigma=0.001)
>>> y = np.array([0.0, 1.0, 1.0, 0.0])
>>> sensitive_feature = np.array([1, 2, 0, 2])
>>> wasserstein.fit(y, sensitive_feature)
"""
_check_shape(y, sensitive_feature)
self.modalities_calib = self._get_modalities(sensitive_feature)
self._compute_weights(sensitive_feature)
self._estimate_ecdf_eqf(y, sensitive_feature, self.sigma)
def transform(self, y, sensitive_feature, epsilon=0):
"""
Transform the test data to enforce fairness using Wasserstein distance.
Parameters
----------
y : array-like, shape (n_samples,)
The target values of the test data.
sensitive_feature : array-like, shape (n_samples,)
The test samples representing a single sensitive attribute.
epsilon : float, optional (default=0)
The fairness parameter controlling the trade-off between fairness and accuracy.
It represents the fraction of the original predictions retained after fairness adjustment.
Epsilon should be a value between 0 and 1, where 0 means full fairness and 1 means no fairness constraint.
Returns
-------
y_fair : array-like, shape (n_samples,)
Fair predictions for the test data after enforcing fairness constraints.
Notes
-----
This method applies Wasserstein distance-based fairness adjustment to the test data
using the precomputed ECDF (Empirical Cumulative Distribution Function),
EQF (Empirical Quantile Function), and weights obtained from the calibration data.
Random noise within the range of [-sigma, sigma] is added to the test data to ensure fairness.
The parameter epsilon controls the trade-off between fairness and accuracy,
with 0 enforcing full fairness and 1 retaining the original predictions.
Examples
--------
>>> y = np.array([0.05, 0.08, 0.9, 0.9, 0.01, 0.88])
>>> sensitive_feature = np.array([1, 3, 2, 3, 1, 2])
>>> wasserstein = FairWasserstein(sigma=0.001)
>>> wasserstein.fit(y, sensitive_feature)
>>> y = np.array([0.01, 0.99, 0.98, 0.04])
>>> sensitive_feature = np.array([3, 1, 2, 3])
>>> print(wasserstein.transform(y, sensitive_feature, epsilon=0.2))
[0.26063673 0.69140959 0.68940959 0.26663673]
"""
_check_epsilon(epsilon)
_check_shape(y, sensitive_feature)
modalities_test = self._get_modalities(sensitive_feature)
|
class FairWasserstein(BaseHelper):
"""
Class implementing Wasserstein distance-based fairness adjustment for binary classification tasks.
Parameters
----------
sigma : float, optional (default=0.0001)
Standard deviation of the random noise added during fairness adjustment.
Attributes
----------
sigma : float
Standard deviation of the random noise added during fairness adjustment.
modalities_calib : dict
Dictionary storing modality values obtained from calibration data.
weights : dict
Dictionary storing weights (probabilities) for each modality based on their occurrences in calibration data.
ecdf : dict
Dictionary storing ECDF (Empirical Cumulative Distribution Function) objects for each sensitive modality.
eqf : dict
Dictionary storing EQF (Empirical Quantile Function) objects for each sensitive modality.
Methods
-------
fit(y, sensitive_feature)
Fit the fairness adjustment model using calibration data.
transform(y, sensitive_feature, epsilon=0)
Transform test data to enforce fairness using Wasserstein distance.
"""
def __init__(self, sigma=0.0001):
super().__init__()
self.sigma = sigma
self.modalities_calib = None
def fit(self, y, sensitive_feature):
"""
Perform fit on the calibration data and save the ECDF, EQF, and weights of the sensitive variable.
Parameters
----------
y : array-like, shape (n_samples,)
The calibration labels.
sensitive_feature : array-like, shape (n_samples,)
The calibration samples representing one single sensitive attribute.
Returns
-------
None
Notes
-----
This method computes the ECDF (Empirical Cumulative Distribution Function),
EQF (Empirical Quantile Function), and weights for the sensitive variable
based on the provided calibration data. These computed values are used
during the transformation process to ensure fairness in predictions.
Examples
--------
>>> wasserstein = FairWasserstein(sigma=0.001)
>>> y = np.array([0.0, 1.0, 1.0, 0.0])
>>> sensitive_feature = np.array([1, 2, 0, 2])
>>> wasserstein.fit(y, sensitive_feature)
"""
_check_shape(y, sensitive_feature)
self.modalities_calib = self._get_modalities(sensitive_feature)
self._compute_weights(sensitive_feature)
self._estimate_ecdf_eqf(y, sensitive_feature, self.sigma)
def transform(self, y, sensitive_feature, epsilon=0):
"""
Transform the test data to enforce fairness using Wasserstein distance.
Parameters
----------
y : array-like, shape (n_samples,)
The target values of the test data.
sensitive_feature : array-like, shape (n_samples,)
The test samples representing a single sensitive attribute.
epsilon : float, optional (default=0)
The fairness parameter controlling the trade-off between fairness and accuracy.
It represents the fraction of the original predictions retained after fairness adjustment.
Epsilon should be a value between 0 and 1, where 0 means full fairness and 1 means no fairness constraint.
Returns
-------
y_fair : array-like, shape (n_samples,)
Fair predictions for the test data after enforcing fairness constraints.
Notes
-----
This method applies Wasserstein distance-based fairness adjustment to the test data
using the precomputed ECDF (Empirical Cumulative Distribution Function),
EQF (Empirical Quantile Function), and weights obtained from the calibration data.
Random noise within the range of [-sigma, sigma] is added to the test data to ensure fairness.
The parameter epsilon controls the trade-off between fairness and accuracy,
with 0 enforcing full fairness and 1 retaining the original predictions.
Examples
--------
>>> y = np.array([0.05, 0.08, 0.9, 0.9, 0.01, 0.88])
>>> sensitive_feature = np.array([1, 3, 2, 3, 1, 2])
>>> wasserstein = FairWasserstein(sigma=0.001)
>>> wasserstein.fit(y, sensitive_feature)
>>> y = np.array([0.01, 0.99, 0.98, 0.04])
>>> sensitive_feature = np.array([3, 1, 2, 3])
>>> print(wasserstein.transform(y, sensitive_feature, epsilon=0.2))
[0.26063673 0.69140959 0.68940959 0.26663673]
"""
_check_epsilon(epsilon)
_check_shape(y, sensitive_feature)
modalities_test = self._get_modalities(sensitive_feature) | _check_mod(self.modalities_calib, modalities_test) | 2 | 2023-12-06 14:43:41+00:00 | 4k |
Tlntin/booking_simulator | modelscope_agent/tools/code_interpreter_utils/language_map.py | [
{
"identifier": "AppleScript",
"path": "modelscope_agent/tools/code_interpreter_utils/languages/applescript.py",
"snippet": "class AppleScript(SubprocessCodeInterpreter):\n file_extension = 'applescript'\n proper_name = 'AppleScript'\n\n def __init__(self):\n super().__init__()\n ... | from .languages.applescript import AppleScript
from .languages.html import HTML
from .languages.javascript import JavaScript
from .languages.powershell import PowerShell
from .languages.python import Python
from .languages.r import R
from .languages.shell import Shell | 2,419 |
language_map = {
'python': Python,
'bash': Shell,
'shell': Shell,
'zsh': Shell,
|
language_map = {
'python': Python,
'bash': Shell,
'shell': Shell,
'zsh': Shell, | 'javascript': JavaScript, | 2 | 2023-12-12 04:24:00+00:00 | 4k |
chenchenygu/watermark-learnability | kgw_watermarking/watermark_reliability_release/normalizers.py | [
{
"identifier": "Categories",
"path": "kgw_watermarking/watermark_reliability_release/homoglyphs.py",
"snippet": "class Categories:\n \"\"\"\n Work with aliases from ISO 15924.\n https://en.wikipedia.org/wiki/ISO_15924#List_of_codes\n \"\"\"\n\n fpath = os.path.join(DATA_LOCATION, \"categ... | from collections import defaultdict
from functools import cache
from .homoglyphs import Categories, Languages, Homoglyphs
from nltk import pos_tag, word_tokenize # noqa
from nltk import pos_tag, word_tokenize
import re
import unicodedata
import spacy
import nltk
import nltk | 2,390 | """ Text-based normalizers, used to mitigate simple attacks against watermarking.
This implementation is unlikely to be a complete list of all possible exploits within the unicode standard,
it represents our best effort at the time of writing.
These normalizers can be used as stand-alone normalizers. They could be made to conform to HF tokenizers standard, but that would
require messing with the limited rust interface of tokenizers.NormalizedString
"""
def normalization_strategy_lookup(strategy_name: str) -> object:
if strategy_name == "unicode":
return UnicodeSanitizer()
elif strategy_name == "homoglyphs":
return HomoglyphCanonizer()
elif strategy_name == "truecase":
return TrueCaser()
class HomoglyphCanonizer:
"""Attempts to detect homoglyph attacks and find a consistent canon.
This function does so on a per-ISO-category level. Language-level would also be possible (see commented code).
"""
def __init__(self):
self.homoglyphs = None
def __call__(self, homoglyphed_str: str) -> str:
# find canon:
target_category, all_categories = self._categorize_text(homoglyphed_str)
homoglyph_table = self._select_canon_category_and_load(target_category, all_categories)
return self._sanitize_text(target_category, homoglyph_table, homoglyphed_str)
def _categorize_text(self, text: str) -> dict:
iso_categories = defaultdict(int)
# self.iso_languages = defaultdict(int)
for char in text:
| """ Text-based normalizers, used to mitigate simple attacks against watermarking.
This implementation is unlikely to be a complete list of all possible exploits within the unicode standard,
it represents our best effort at the time of writing.
These normalizers can be used as stand-alone normalizers. They could be made to conform to HF tokenizers standard, but that would
require messing with the limited rust interface of tokenizers.NormalizedString
"""
def normalization_strategy_lookup(strategy_name: str) -> object:
if strategy_name == "unicode":
return UnicodeSanitizer()
elif strategy_name == "homoglyphs":
return HomoglyphCanonizer()
elif strategy_name == "truecase":
return TrueCaser()
class HomoglyphCanonizer:
"""Attempts to detect homoglyph attacks and find a consistent canon.
This function does so on a per-ISO-category level. Language-level would also be possible (see commented code).
"""
def __init__(self):
self.homoglyphs = None
def __call__(self, homoglyphed_str: str) -> str:
# find canon:
target_category, all_categories = self._categorize_text(homoglyphed_str)
homoglyph_table = self._select_canon_category_and_load(target_category, all_categories)
return self._sanitize_text(target_category, homoglyph_table, homoglyphed_str)
def _categorize_text(self, text: str) -> dict:
iso_categories = defaultdict(int)
# self.iso_languages = defaultdict(int)
for char in text: | iso_categories[Categories.detect(char)] += 1 | 0 | 2023-12-07 16:45:33+00:00 | 4k |
skyoux/SemAIM | models/models_semaim.py | [
{
"identifier": "get_2d_sincos_pos_embed",
"path": "util/pos_embed.py",
"snippet": "def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):\n \"\"\"\n grid_size: int of the grid height and width\n return:\n pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, em... | import math
import torch
import torch.nn as nn
from functools import partial
from timm.models.vision_transformer import PatchEmbed, Mlp
from util.pos_embed import get_2d_sincos_pos_embed
from util.blocks import GaussianConv2d
from util.blocks import Block_SelfMask, Block_SelfCrossMask | 1,795 | # References:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# DeiT: https://github.com/facebookresearch/deit
# MAE: https://github.com/facebookresearch/mae
# --------------------------------------------------------
class AimViT(nn.Module):
"""
Pretrain vision transformer backbone with AIM
parall encoder-decoder architecture
Modified by sky: use the blocks in ViT (+ mask) for encoders, which is more convinent for finetune, linear
modify the permutation form stochastic mask to center-out mask
"""
def __init__(self,
# vision transformer backbone
img_size=224, patch_size=16, in_chans=3,
embed_dim=1024, depth=24, num_heads=16, drop_path_rate=0., out_dim=768,
mlp_ratio=4., norm_layer=partial(nn.LayerNorm, eps=1e-6),
# aim
permutation_type='center2out', attention_type='cls',
# decoder
query_depth=12, share_weight=False,
prediction_head_type='MLP',
# loss function
gaussian_kernel_size=None, gaussian_sigma=None,
loss_type='L2', predict_feature='none', norm_pix_loss=True):
super().__init__()
# patch embedding
self.patch_embed = PatchEmbed(img_size, patch_size, in_chans, embed_dim)
num_patches = self.patch_embed.num_patches
self.patch_size = patch_size
# cls token
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
# position embedding
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim), requires_grad=False)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
# encoder
self.blocks = nn.ModuleList([
Block_SelfMask(embed_dim, num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer, drop_path=dpr[i])
for i in range(depth)])
# decoder
if share_weight:
self.query_blocks = self.blocks
else:
self.query_blocks = nn.ModuleList([
| # References:
# timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm
# DeiT: https://github.com/facebookresearch/deit
# MAE: https://github.com/facebookresearch/mae
# --------------------------------------------------------
class AimViT(nn.Module):
"""
Pretrain vision transformer backbone with AIM
parall encoder-decoder architecture
Modified by sky: use the blocks in ViT (+ mask) for encoders, which is more convinent for finetune, linear
modify the permutation form stochastic mask to center-out mask
"""
def __init__(self,
# vision transformer backbone
img_size=224, patch_size=16, in_chans=3,
embed_dim=1024, depth=24, num_heads=16, drop_path_rate=0., out_dim=768,
mlp_ratio=4., norm_layer=partial(nn.LayerNorm, eps=1e-6),
# aim
permutation_type='center2out', attention_type='cls',
# decoder
query_depth=12, share_weight=False,
prediction_head_type='MLP',
# loss function
gaussian_kernel_size=None, gaussian_sigma=None,
loss_type='L2', predict_feature='none', norm_pix_loss=True):
super().__init__()
# patch embedding
self.patch_embed = PatchEmbed(img_size, patch_size, in_chans, embed_dim)
num_patches = self.patch_embed.num_patches
self.patch_size = patch_size
# cls token
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
# position embedding
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim), requires_grad=False)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
# encoder
self.blocks = nn.ModuleList([
Block_SelfMask(embed_dim, num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer, drop_path=dpr[i])
for i in range(depth)])
# decoder
if share_weight:
self.query_blocks = self.blocks
else:
self.query_blocks = nn.ModuleList([ | Block_SelfCrossMask(embed_dim, num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer, drop_path=dpr[i]) | 3 | 2023-12-10 15:17:11+00:00 | 4k |
boweniac/autogan | autogan/utils/compressed_messages_utils.py | [
{
"identifier": "generate_chat_completion",
"path": "autogan/oai/generate_utils.py",
"snippet": "def generate_chat_completion(llm_config: LLMConfig, messages: List, agent_name: str, gen: str,\n response_func: ResponseFuncType, stream_mode: Optional[bool] = None)\\\n ->... | import json
from typing import Dict, Optional, List
from autogan.oai.generate_utils import generate_chat_completion
from autogan.oai.config_utils import LLMConfig
from autogan.utils.response import ResponseFuncType | 3,398 |
Note: The compression process does not treat messages from the 'system' role specially, and they should be excluded from 'messages'.
注意:压缩过程并未对 system 角色的消息进行特殊处理,应将其排除在 messages 之外。
:param messages: The conversation content to be compressed, excluding 'system message' and 'focus message'. It should include 'role', 'content', 'tokens' fields.
待压缩的会话内容,应排除掉 system message 和 focus message。需包含 'role','content','tokens' 字段。
:param focus: The focus direction when compressing distant conversation records
压缩远期会话记录时的专注方向
:param summary_model_config: The LLM model configuration used to compress distant conversation records
用于压缩远期会话记录的 LLM 模型配置
:param agent_name:
:param response_func: Used to return results to the interface or terminal.
用于向接口或终端返回结果
:param stream_mode:
:param safe_size: 'max_messages_tokens' of 'agent main model' minus the tokens of 'system message' and 'focus message'. When 'safe_size' is less than 0, it will be forcibly defined as 1024
agent main model 的 max_messages_tokens 减去 system message 和 focus message 的 tokens,当 safe_size 小于 0 时,将被强制定义为 1024
:return:
--conversation_messages: The compressed conversation records, the difference from 'request_messages' is that the 'tokens' field of each message is retained
压缩后的会话记录,与 request_messages 的区别是保留了每条消息的 tokens 字段
--request_messages: The message content requested to 'llm', removed the 'tokens' field of each message
用于向 llm 请求的消息内容,去掉了每条消息的 tokens 字段
--total_tokens: The total tokens after compression
压缩后的整体tokens
"""
conversation_messages = []
request_messages = []
total_tokens = 0
if len(messages) == 0:
return None, None, None
if safe_size < 0:
safe_size = 1024
# Reverse traverse the message to extract recent original conversation content.
i = 0
for message in reversed(messages):
tokens = message["tokens"]
if total_tokens + tokens > int(safe_size * 0.5) and i != 0:
break
message_copy = message.copy()
message_copy.pop('tokens', None)
conversation_messages.insert(0, message)
request_messages.insert(0, message_copy)
total_tokens += tokens
i -= 1
# Compress the remaining messages as distant conversation records.
if len(messages) > (i * -1):
compressed_size = safe_size - total_tokens
if compressed_size <= 0:
compressed_size = 1024
# 压缩剩余 messages
content, tokens = generate_messages_summary(messages[:i], focus, summary_model_config, compressed_size, agent_name, response_func, stream_mode)
if content:
conversation_messages.insert(
0,
{'role': 'assistant', 'content': f'Earlier historical conversation records: {content}',
'tokens': tokens}
)
request_messages.insert(
0,
{'role': 'assistant', 'content': f'Earlier historical conversation records: {content}'}
)
total_tokens += tokens
if conversation_messages and request_messages:
return conversation_messages, request_messages, total_tokens
else:
return None, None, None
def generate_messages_summary(messages: List[Dict], focus: str, summary_model_config: LLMConfig, summary_size: int, agent_name: str,
response_func: ResponseFuncType, stream_mode: Optional[bool] = None) -> tuple[str, int]:
"""Generate message summary
生成消息摘要
First, traverse the content of messages in reverse order, extract the long-term conversation records to be compressed, until the cumulative tokens of the long-term conversation records to be compressed exceed the value of max_messages_tokens in summary_model_config
先反向遍历 messages 中的内容,提取出待压缩的远期会话记录,直至待压缩远期会话记录的累计 tokens 超过 summary_model_config 中 max_messages_tokens 的值
If the tokens of the first record of the long-term conversation to be compressed exceed max_messages_tokens, then directly extract the first conversation record
如待压缩远期会话的第一条记录,其 tokens 就超过了 max_messages_tokens, 则直接提取第一条会话记录
Then compress the extracted long-term conversation records. The size after compression is expected to be kept within the range of summary_size
之后对提取出的远期会话记录进行压缩,压缩后的大小被期望保持在 summary_size 范围之内
:param messages: Messages to be compressed
待压缩的消息
:param focus: The focus direction when generating a summary
生成摘要时的专注方向
:param summary_model_config: The LLM model configuration for compressing long-term conversation records
用于压缩远期会话记录的 LLM 模型配置
:param agent_name:
:param response_func: Used to return results to the interface or terminal.
用于向接口或终端返回结果
:param stream_mode:
:return:
--content: Compressed content
压缩后的内容
--tokens: tokens of compressed content
压缩内容的tokens
"""
system_prompt = "Please make a concise summary based on the following historical information. Make sure your summary does not exceed the max_tokens limit. And when summarizing, please focus on the latest message sent by the user."
# system_prompt = "请根据以下的历史信息,进行简洁的总结。请确保您的总结不超过 max_tokens 的限制。并且在总结时,请将你的关注点集中在用户最新发送的消息上。"
summary_messages = []
total_tokens = 0
# 反向遍历 message 提取内容
for index, message in enumerate(reversed(messages)):
tokens = message["tokens"]
if total_tokens + tokens > summary_model_config.max_messages_tokens and index != 0:
break
message_copy = message.copy()
message_copy.pop('tokens', None)
summary_messages.insert(0, message_copy)
total_tokens += tokens
# 设置用户提示词
user_prompt = f"""max_tokens: {summary_size}\n\nHistorical information: {json.dumps(summary_messages)}\n\nUser's latest message: {focus}"""
chat_messages = [{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content': user_prompt}]
|
def compressed_messages(messages: List[Dict], focus: str, summary_model_config: LLMConfig, agent_name: str,
response_func: ResponseFuncType, stream_mode: Optional[bool] = None,
safe_size: Optional[int] = 4096) -> tuple[Optional[list], Optional[list], Optional[int]]:
"""Compress Conversation Context
压缩会话上下文
The content to be compressed is divided into: recent original conversation content, and distant content that needs to be compressed.
待压缩的会话内容会被分为:近期的原始会话内容、远期需要压缩的会话内容。
When compressing distant conversation records, attention is focused on the 'focus'
在压缩远期会话记录时,会将注意力集中于 focus
**Recent Original Conversation Content:**
近期原始会话内容:
First, traverse the 'messages' in reverse order, extract the recent conversation records, until the cumulative tokens of the conversation records exceed 50% of the 'safe_size'
先反向遍历 messages,提取近期的会话记录,直至会话记录的累计 tokens 超过 safe_size 的 50%
If the tokens of the first recent conversation record exceed 50% of the 'safe_size', then directly extract the first recent conversation record
如近期第一条会话记录的 tokens 就超过了 safe_size 的 50% 则直接提取近期第一条会话记录
**Distant Compressed Conversation Content:**
远期压缩会话内容:
The remaining conversation records will be compressed as distant conversation records. The size after compression is expected to be within the range of ('safe_size' - cumulative original conversation tokens)
剩余的会话记录将作为远期会话记录进行压缩,压缩后的大小被期望保持在 (safe_size - 累计原始会话 tokens) 范围之内
If the value of 'safe_size' - cumulative original conversation tokens is less than 0, then the size after compression is expected to be 1024 tokens
如 safe_size - 累计原始会话 tokens 的值小于 0 则压缩后的大小被期望保持在 1024 tokens
Note: The compression process does not treat messages from the 'system' role specially, and they should be excluded from 'messages'.
注意:压缩过程并未对 system 角色的消息进行特殊处理,应将其排除在 messages 之外。
:param messages: The conversation content to be compressed, excluding 'system message' and 'focus message'. It should include 'role', 'content', 'tokens' fields.
待压缩的会话内容,应排除掉 system message 和 focus message。需包含 'role','content','tokens' 字段。
:param focus: The focus direction when compressing distant conversation records
压缩远期会话记录时的专注方向
:param summary_model_config: The LLM model configuration used to compress distant conversation records
用于压缩远期会话记录的 LLM 模型配置
:param agent_name:
:param response_func: Used to return results to the interface or terminal.
用于向接口或终端返回结果
:param stream_mode:
:param safe_size: 'max_messages_tokens' of 'agent main model' minus the tokens of 'system message' and 'focus message'. When 'safe_size' is less than 0, it will be forcibly defined as 1024
agent main model 的 max_messages_tokens 减去 system message 和 focus message 的 tokens,当 safe_size 小于 0 时,将被强制定义为 1024
:return:
--conversation_messages: The compressed conversation records, the difference from 'request_messages' is that the 'tokens' field of each message is retained
压缩后的会话记录,与 request_messages 的区别是保留了每条消息的 tokens 字段
--request_messages: The message content requested to 'llm', removed the 'tokens' field of each message
用于向 llm 请求的消息内容,去掉了每条消息的 tokens 字段
--total_tokens: The total tokens after compression
压缩后的整体tokens
"""
conversation_messages = []
request_messages = []
total_tokens = 0
if len(messages) == 0:
return None, None, None
if safe_size < 0:
safe_size = 1024
# Reverse traverse the message to extract recent original conversation content.
i = 0
for message in reversed(messages):
tokens = message["tokens"]
if total_tokens + tokens > int(safe_size * 0.5) and i != 0:
break
message_copy = message.copy()
message_copy.pop('tokens', None)
conversation_messages.insert(0, message)
request_messages.insert(0, message_copy)
total_tokens += tokens
i -= 1
# Compress the remaining messages as distant conversation records.
if len(messages) > (i * -1):
compressed_size = safe_size - total_tokens
if compressed_size <= 0:
compressed_size = 1024
# 压缩剩余 messages
content, tokens = generate_messages_summary(messages[:i], focus, summary_model_config, compressed_size, agent_name, response_func, stream_mode)
if content:
conversation_messages.insert(
0,
{'role': 'assistant', 'content': f'Earlier historical conversation records: {content}',
'tokens': tokens}
)
request_messages.insert(
0,
{'role': 'assistant', 'content': f'Earlier historical conversation records: {content}'}
)
total_tokens += tokens
if conversation_messages and request_messages:
return conversation_messages, request_messages, total_tokens
else:
return None, None, None
def generate_messages_summary(messages: List[Dict], focus: str, summary_model_config: LLMConfig, summary_size: int, agent_name: str,
response_func: ResponseFuncType, stream_mode: Optional[bool] = None) -> tuple[str, int]:
"""Generate message summary
生成消息摘要
First, traverse the content of messages in reverse order, extract the long-term conversation records to be compressed, until the cumulative tokens of the long-term conversation records to be compressed exceed the value of max_messages_tokens in summary_model_config
先反向遍历 messages 中的内容,提取出待压缩的远期会话记录,直至待压缩远期会话记录的累计 tokens 超过 summary_model_config 中 max_messages_tokens 的值
If the tokens of the first record of the long-term conversation to be compressed exceed max_messages_tokens, then directly extract the first conversation record
如待压缩远期会话的第一条记录,其 tokens 就超过了 max_messages_tokens, 则直接提取第一条会话记录
Then compress the extracted long-term conversation records. The size after compression is expected to be kept within the range of summary_size
之后对提取出的远期会话记录进行压缩,压缩后的大小被期望保持在 summary_size 范围之内
:param messages: Messages to be compressed
待压缩的消息
:param focus: The focus direction when generating a summary
生成摘要时的专注方向
:param summary_model_config: The LLM model configuration for compressing long-term conversation records
用于压缩远期会话记录的 LLM 模型配置
:param agent_name:
:param response_func: Used to return results to the interface or terminal.
用于向接口或终端返回结果
:param stream_mode:
:return:
--content: Compressed content
压缩后的内容
--tokens: tokens of compressed content
压缩内容的tokens
"""
system_prompt = "Please make a concise summary based on the following historical information. Make sure your summary does not exceed the max_tokens limit. And when summarizing, please focus on the latest message sent by the user."
# system_prompt = "请根据以下的历史信息,进行简洁的总结。请确保您的总结不超过 max_tokens 的限制。并且在总结时,请将你的关注点集中在用户最新发送的消息上。"
summary_messages = []
total_tokens = 0
# 反向遍历 message 提取内容
for index, message in enumerate(reversed(messages)):
tokens = message["tokens"]
if total_tokens + tokens > summary_model_config.max_messages_tokens and index != 0:
break
message_copy = message.copy()
message_copy.pop('tokens', None)
summary_messages.insert(0, message_copy)
total_tokens += tokens
# 设置用户提示词
user_prompt = f"""max_tokens: {summary_size}\n\nHistorical information: {json.dumps(summary_messages)}\n\nUser's latest message: {focus}"""
chat_messages = [{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content': user_prompt}] | return generate_chat_completion(summary_model_config, chat_messages, agent_name, "text_summary", response_func, | 0 | 2023-12-06 03:24:34+00:00 | 4k |
JingHao99/IDR-Ingredients-oriented-Degradation-Reformulation | models/archs/IDR_restormer_arch.py | [
{
"identifier": "LayerNorm",
"path": "models/utils/arch_util.py",
"snippet": "class LayerNorm(nn.Module):\r\n \"\"\"\r\n x: B,C,H,W\r\n return: B,C,H,W\r\n process: LayerNorm(C)\r\n Adopted: Restormer\r\n \"\"\"\r\n def __init__(self, dim, LayerNorm_type):\r\n super(LayerNorm... | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torchvision.transforms import Resize
from pdb import set_trace as stx
from models.utils.arch_util import LayerNorm
from models.utils.transformerBCHW_util import Downsample, Upsample, MDTA_TransformerBlock, OverlapPatchEmbed_Keep
from models.utils.module import Key_TransformerBlock, PI_MLP_Mixer, process_USV
from einops import rearrange | 2,501 |
##########################################################################
class IDR_restormer(nn.Module):
def __init__(self,
inp_channels=3,
out_channels=3,
dim=48,
num_blocks=[4, 6, 6, 8],
num_refinement_blocks=4,
heads=[1, 2, 4, 8],
ffn_expansion_factor=2.66,
bias=False,
LayerNorm_type='WithBias', ## Other option 'BiasFree'
num_degra_queries = 24,
keep_degra = 48,
degra_type = 5,
sam = True,
ops_type = 5,
pred = True
):
super(IDR_restormer, self).__init__()
self.de_dict = {'denoise': 0, 'denoise_15': 0, 'denoise_25': 0, 'denoise_50': 0, 'derain': 1, 'dehaze': 2, 'deblur': 3, 'delowlight': 4, 'clean': 5}
self.patch_embed =OverlapPatchEmbed_Keep(inp_channels, dim)
self.encoder_level1 = nn.Sequential(*[
MDTA_TransformerBlock(dim=dim, num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor, bias=bias,
LayerNorm_type=LayerNorm_type) for i in range(num_blocks[0])])
|
##########################################################################
class IDR_restormer(nn.Module):
def __init__(self,
inp_channels=3,
out_channels=3,
dim=48,
num_blocks=[4, 6, 6, 8],
num_refinement_blocks=4,
heads=[1, 2, 4, 8],
ffn_expansion_factor=2.66,
bias=False,
LayerNorm_type='WithBias', ## Other option 'BiasFree'
num_degra_queries = 24,
keep_degra = 48,
degra_type = 5,
sam = True,
ops_type = 5,
pred = True
):
super(IDR_restormer, self).__init__()
self.de_dict = {'denoise': 0, 'denoise_15': 0, 'denoise_25': 0, 'denoise_50': 0, 'derain': 1, 'dehaze': 2, 'deblur': 3, 'delowlight': 4, 'clean': 5}
self.patch_embed =OverlapPatchEmbed_Keep(inp_channels, dim)
self.encoder_level1 = nn.Sequential(*[
MDTA_TransformerBlock(dim=dim, num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor, bias=bias,
LayerNorm_type=LayerNorm_type) for i in range(num_blocks[0])])
| self.down1_2 = Downsample(dim) ## From Level 1 to Level 2 | 1 | 2023-12-07 10:58:34+00:00 | 4k |
TACJu/Compositor | Compositor_Mask2Former/mask2former/modeling/criterion.py | [
{
"identifier": "is_dist_avail_and_initialized",
"path": "Compositor_Mask2Former/mask2former/utils/misc.py",
"snippet": "def is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True"
},
{
"id... | import logging
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.utils.comm import get_world_size
from detectron2.projects.point_rend.point_features import (
get_uncertain_point_coords_with_randomness,
point_sample,
)
from ..utils.misc import is_dist_avail_and_initialized, nested_tensor_from_tensor_list | 1,682 | classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(-1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_masks
dice_loss_jit = torch.jit.script(
dice_loss
) # type: torch.jit.ScriptModule
def sigmoid_ce_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
Loss tensor
"""
loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
return loss.mean(1).sum() / num_masks
sigmoid_ce_loss_jit = torch.jit.script(
sigmoid_ce_loss
) # type: torch.jit.ScriptModule
def calculate_uncertainty(logits):
"""
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the
foreground class in `classes`.
Args:
logits (Tensor): A tensor of shape (R, 1, ...) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images and C is
the number of foreground classes. The values are logits.
Returns:
scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with
the most uncertain locations having the highest uncertainty score.
"""
assert logits.shape[1] == 1
gt_class_logits = logits.clone()
return -(torch.abs(gt_class_logits))
class SetCriterion(nn.Module):
"""This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses,
num_points, oversample_ratio, importance_sample_ratio):
"""Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer("empty_weight", empty_weight)
# pointwise mask loss parameters
self.num_points = num_points
self.oversample_ratio = oversample_ratio
self.importance_sample_ratio = importance_sample_ratio
def loss_labels(self, outputs, targets, indices, num_masks):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert "pred_logits" in outputs
src_logits = outputs["pred_logits"].float()
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(
src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device
)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {"loss_ce": loss_ce}
return losses
def loss_masks(self, outputs, targets, indices, num_masks):
"""Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
"""
assert "pred_masks" in outputs
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs["pred_masks"]
src_masks = src_masks[src_idx]
masks = [t["masks"] for t in targets]
# TODO use valid to mask invalid areas due to padding in loss
| # Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/models/detr.py
"""
MaskFormer criterion.
"""
def dice_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Compute the DICE loss, similar to generalized IOU for masks
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
"""
inputs = inputs.sigmoid()
inputs = inputs.flatten(1)
numerator = 2 * (inputs * targets).sum(-1)
denominator = inputs.sum(-1) + targets.sum(-1)
loss = 1 - (numerator + 1) / (denominator + 1)
return loss.sum() / num_masks
dice_loss_jit = torch.jit.script(
dice_loss
) # type: torch.jit.ScriptModule
def sigmoid_ce_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
num_masks: float,
):
"""
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
Returns:
Loss tensor
"""
loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
return loss.mean(1).sum() / num_masks
sigmoid_ce_loss_jit = torch.jit.script(
sigmoid_ce_loss
) # type: torch.jit.ScriptModule
def calculate_uncertainty(logits):
"""
We estimate uncerainty as L1 distance between 0.0 and the logit prediction in 'logits' for the
foreground class in `classes`.
Args:
logits (Tensor): A tensor of shape (R, 1, ...) for class-specific or
class-agnostic, where R is the total number of predicted masks in all images and C is
the number of foreground classes. The values are logits.
Returns:
scores (Tensor): A tensor of shape (R, 1, ...) that contains uncertainty scores with
the most uncertain locations having the highest uncertainty score.
"""
assert logits.shape[1] == 1
gt_class_logits = logits.clone()
return -(torch.abs(gt_class_logits))
class SetCriterion(nn.Module):
"""This class computes the loss for DETR.
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses,
num_points, oversample_ratio, importance_sample_ratio):
"""Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer("empty_weight", empty_weight)
# pointwise mask loss parameters
self.num_points = num_points
self.oversample_ratio = oversample_ratio
self.importance_sample_ratio = importance_sample_ratio
def loss_labels(self, outputs, targets, indices, num_masks):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
"""
assert "pred_logits" in outputs
src_logits = outputs["pred_logits"].float()
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(
src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device
)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {"loss_ce": loss_ce}
return losses
def loss_masks(self, outputs, targets, indices, num_masks):
"""Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
"""
assert "pred_masks" in outputs
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs["pred_masks"]
src_masks = src_masks[src_idx]
masks = [t["masks"] for t in targets]
# TODO use valid to mask invalid areas due to padding in loss | target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() | 1 | 2023-12-12 11:49:28+00:00 | 4k |
Mirascope/mirascope | mirascope/cli/commands.py | [
{
"identifier": "MirascopeCommand",
"path": "mirascope/enums.py",
"snippet": "class MirascopeCommand(_Enum):\n \"\"\"CLI commands to be executed.\"\"\"\n\n ADD = \"add\"\n USE = \"use\"\n STATUS = \"status\"\n INIT = \"init\""
},
{
"identifier": "CURRENT_REVISION_KEY",
"path":... | import os
from importlib.resources import files
from pathlib import Path
from jinja2 import Template
from ..enums import MirascopeCommand
from .constants import CURRENT_REVISION_KEY, LATEST_REVISION_KEY
from .schemas import MirascopeSettings
from .utils import (
check_status,
find_prompt_path,
get_prompt_versions,
get_user_mirascope_settings,
update_version_text_file,
write_prompt_to_template,
) | 2,103 | """Commands for Mirascope CLI.
This module contains the commands for the Mirascope CLI. The commands are add, status,
use, and init. See the documentation for each command for more information.
"""
# TODO: Add something like Typer to make commands easier to implement
def add(args) -> None:
"""Adds the given prompt to the specified version directory.
The contents of the prompt in the user's prompts directory are copied to the version
directory with the next revision number, and the version file is updated with the
new revision.
Args:
args: The command line arguments for the `add` command, containing:
- `prompt`: The name of the prompt to add.
Raises:
FileNotFoundError: If the file is not found in the specified prompts directory.
"""
mirascope_settings = get_user_mirascope_settings()
version_directory_path = mirascope_settings.versions_location
prompt_directory_path = mirascope_settings.prompts_location
version_file_name = mirascope_settings.version_file_name
directory_name: str = args.prompt
# Check status before continuing
used_prompt_path = check_status(mirascope_settings, directory_name)
if not used_prompt_path:
print("No changes detected.")
return
class_directory = os.path.join(version_directory_path, directory_name)
# Create version directory if it doesn't exist
if not os.path.exists(class_directory):
os.makedirs(class_directory)
version_file_path = os.path.join(class_directory, version_file_name)
versions = get_prompt_versions(version_file_path)
# Open user's prompt file
with open(
f"{prompt_directory_path}/{directory_name}.py", "r+", encoding="utf-8"
) as file:
# Increment revision id
if versions.latest_revision is None:
# first revision
revision_id = "0001"
else:
# default branch with incrementation
latest_revision_id = versions.latest_revision
revision_id = f"{int(latest_revision_id)+1:04}"
# Create revision file
with open(
f"{class_directory}/{revision_id}_{directory_name}.py",
"w+",
encoding="utf-8",
) as file2:
custom_variables = {
"prev_revision_id": versions.current_revision,
"revision_id": revision_id,
}
file2.write(
write_prompt_to_template(
file.read(), MirascopeCommand.ADD, custom_variables
)
)
keys_to_update = {
CURRENT_REVISION_KEY: revision_id,
LATEST_REVISION_KEY: revision_id,
}
| """Commands for Mirascope CLI.
This module contains the commands for the Mirascope CLI. The commands are add, status,
use, and init. See the documentation for each command for more information.
"""
# TODO: Add something like Typer to make commands easier to implement
def add(args) -> None:
"""Adds the given prompt to the specified version directory.
The contents of the prompt in the user's prompts directory are copied to the version
directory with the next revision number, and the version file is updated with the
new revision.
Args:
args: The command line arguments for the `add` command, containing:
- `prompt`: The name of the prompt to add.
Raises:
FileNotFoundError: If the file is not found in the specified prompts directory.
"""
mirascope_settings = get_user_mirascope_settings()
version_directory_path = mirascope_settings.versions_location
prompt_directory_path = mirascope_settings.prompts_location
version_file_name = mirascope_settings.version_file_name
directory_name: str = args.prompt
# Check status before continuing
used_prompt_path = check_status(mirascope_settings, directory_name)
if not used_prompt_path:
print("No changes detected.")
return
class_directory = os.path.join(version_directory_path, directory_name)
# Create version directory if it doesn't exist
if not os.path.exists(class_directory):
os.makedirs(class_directory)
version_file_path = os.path.join(class_directory, version_file_name)
versions = get_prompt_versions(version_file_path)
# Open user's prompt file
with open(
f"{prompt_directory_path}/{directory_name}.py", "r+", encoding="utf-8"
) as file:
# Increment revision id
if versions.latest_revision is None:
# first revision
revision_id = "0001"
else:
# default branch with incrementation
latest_revision_id = versions.latest_revision
revision_id = f"{int(latest_revision_id)+1:04}"
# Create revision file
with open(
f"{class_directory}/{revision_id}_{directory_name}.py",
"w+",
encoding="utf-8",
) as file2:
custom_variables = {
"prev_revision_id": versions.current_revision,
"revision_id": revision_id,
}
file2.write(
write_prompt_to_template(
file.read(), MirascopeCommand.ADD, custom_variables
)
)
keys_to_update = {
CURRENT_REVISION_KEY: revision_id,
LATEST_REVISION_KEY: revision_id,
} | update_version_text_file(version_file_path, keys_to_update) | 8 | 2023-12-05 01:22:34+00:00 | 4k |
Prismadic/magnet | magnet/ron/llm.py | [
{
"identifier": "_f",
"path": "magnet/utils/globals.py",
"snippet": "def _f(\n tag: str = None,\n body: any = None,\n no_print: bool = False,\n luxe: bool = False\n):\n \"\"\"\n The `_f` function is a logging utility that prints messages with different tags and colors based on\n the... | from magnet.utils.globals import _f
from magnet.utils.huggingface import InferenceAPI
from magnet.utils.local import LocalInference
from magnet.utils.prompts import *
from magnet.utils.data_classes import *
import requests, json | 2,240 |
class Generate:
def __init__(self, server: str = None, field = None, hf_token: str = None):
"""
Initializes the Generate class.
Args:
server (str): The URL of the server to be used for generating the response. Default is None.
field: Placeholder field that can be used for future implementation.
hf_token (str): The Hugging Face token to be used for authentication when using the local inference API. Default is None.
"""
self.server = server if not hf_token else None
self.field = field
self.token = hf_token
async def on(self):
"""
Placeholder method that can be used for future implementation.
"""
if self.field:
pass # todo
async def ask(self
, m: str = "mistralai/Mistral-7B-Instruct-v0.1"
, q: str = "What is your itinerary?"
, t: float = 1.0
, n: int = 8096
, p: str = "qa_ref"
, cb: object = None
, docs: list = []
, v: bool = False
):
"""
Generates a response based on a given prompt using a language model.
Args:
m (str): The model name or identifier to be used for generating the response. Default is "mistralai/Mistral-7B-Instruct-v0.1".
q (str): The question or prompt for which a response is to be generated. Default is "What is your itinerary?".
t (float): The temperature parameter controlling the randomness of the generated response. Default is 1.0.
n (int): The maximum number of new tokens to be generated in the response. Default is 8096.
p (str): The type of prompt to be used for generating the response. Default is "qa_ref".
cb (object): An optional callback function to be executed with the generated response. Default is None.
docs (list): A list of additional context or documents to be used for generating the response. Default is an empty list.
v (bool): A flag indicating whether to use the server for generating the response. Default is False.
Returns:
str: The generated response.
Raises:
Exception: If an error occurs during the execution of the method.
"""
prompt = getattr(globals()['Prompts'](), p)(docs,q)
_f('warn', '(p + q + d) > n') if len(prompt) > n else None
payload = json.dumps({
"model": m,
"prompt": prompt,
"inputs": prompt,
"parameters": {
"max_new_tokens": n
, "temperature": t,
}
})
headers = {
'Content-Type': 'application/json'
}
if self.token:
|
class Generate:
def __init__(self, server: str = None, field = None, hf_token: str = None):
"""
Initializes the Generate class.
Args:
server (str): The URL of the server to be used for generating the response. Default is None.
field: Placeholder field that can be used for future implementation.
hf_token (str): The Hugging Face token to be used for authentication when using the local inference API. Default is None.
"""
self.server = server if not hf_token else None
self.field = field
self.token = hf_token
async def on(self):
"""
Placeholder method that can be used for future implementation.
"""
if self.field:
pass # todo
async def ask(self
, m: str = "mistralai/Mistral-7B-Instruct-v0.1"
, q: str = "What is your itinerary?"
, t: float = 1.0
, n: int = 8096
, p: str = "qa_ref"
, cb: object = None
, docs: list = []
, v: bool = False
):
"""
Generates a response based on a given prompt using a language model.
Args:
m (str): The model name or identifier to be used for generating the response. Default is "mistralai/Mistral-7B-Instruct-v0.1".
q (str): The question or prompt for which a response is to be generated. Default is "What is your itinerary?".
t (float): The temperature parameter controlling the randomness of the generated response. Default is 1.0.
n (int): The maximum number of new tokens to be generated in the response. Default is 8096.
p (str): The type of prompt to be used for generating the response. Default is "qa_ref".
cb (object): An optional callback function to be executed with the generated response. Default is None.
docs (list): A list of additional context or documents to be used for generating the response. Default is an empty list.
v (bool): A flag indicating whether to use the server for generating the response. Default is False.
Returns:
str: The generated response.
Raises:
Exception: If an error occurs during the execution of the method.
"""
prompt = getattr(globals()['Prompts'](), p)(docs,q)
_f('warn', '(p + q + d) > n') if len(prompt) > n else None
payload = json.dumps({
"model": m,
"prompt": prompt,
"inputs": prompt,
"parameters": {
"max_new_tokens": n
, "temperature": t,
}
})
headers = {
'Content-Type': 'application/json'
}
if self.token: | llm = InferenceAPI(self.token) | 1 | 2023-12-12 14:11:21+00:00 | 4k |
9tailwolf/Racing-Line-Optimization | classes/TrackPlotter.py | [
{
"identifier": "Vector",
"path": "classes/Vector.py",
"snippet": "class Vector:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __str__(self):\n return 'Vector(' + str(self.x) + ',' + str(self.y) + ')'\n\n def __mul__(self, other):\n return Vector(self... | import matplotlib.pyplot as plt
import pandas as pd
from classes.Vector import Vector
from classes.Optimizer import Optimizer | 2,386 |
class TrackPlotter:
def __init__(self, circuit):
self.circuit = circuit
self.track = pd.read_csv('./tracks/' + self.circuit + '.csv')
self.track.columns = ['x','y','r','l']
self.make_track()
def make_track(self):
right_x, right_y, left_x, left_y,vector = [], [], [], [],[]
for i in range(self.track.shape[0]):
if i == self.track.shape[0] - 1:
|
class TrackPlotter:
def __init__(self, circuit):
self.circuit = circuit
self.track = pd.read_csv('./tracks/' + self.circuit + '.csv')
self.track.columns = ['x','y','r','l']
self.make_track()
def make_track(self):
right_x, right_y, left_x, left_y,vector = [], [], [], [],[]
for i in range(self.track.shape[0]):
if i == self.track.shape[0] - 1: | v = Vector(self.track['x'][0] - self.track['x'][i], self.track['y'][0] - self.track['y'][i]) | 0 | 2023-12-12 13:35:42+00:00 | 4k |
pdelboca/django-dcat | dcat/management/commands/import_from_datajson.py | [
{
"identifier": "Catalog",
"path": "dcat/models.py",
"snippet": "class Catalog(models.Model):\n \"\"\"A catalogue that hosts the Datasets or Data Services being described.\"\"\"\n\n # Mandatory properties\n title = models.CharField(max_length=255, help_text=\"A name given to the Catalogue.\")\n... | import json
import pathlib
from os import listdir
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.base import ContentFile
from django.core.management.base import BaseCommand
from django.utils.text import slugify
from dcat.models import (
Catalog,
Dataset,
Distribution,
Agent,
MediaType,
LicenceDocument,
DataTheme,
Keyword,
) | 2,765 |
class Command(BaseCommand):
help = "Import data from a DCAT-US file provided by ckanext-datajson."
def _get_content_file(self, dataset, distribution, datapath="data"):
"""Returns a ContentFile to be added to the django model.
Requires the following structure:
- datapath/
- {dataset_identifier}/
- {distribution_identifier}/
- some-file.csv
"""
file_folder = (
f'{datapath}/{dataset.get("identifier")}/{distribution.get("identifier")}'
)
file = None
try:
local_file_name = listdir(file_folder)[0]
file_path = f"{file_folder}/{local_file_name}"
file = ContentFile(
open(file_path, mode="rb").read(), name=distribution.get("fileName")
)
except IndexError:
msg = f'{distribution.get("identifier")} folder does not have a file'
self.stdout.write(self.style.ERROR(msg))
return file
def add_arguments(self, parser):
parser.add_argument(
"--file", type=open, help="Path to the data.json file", default="data.json"
)
parser.add_argument(
"--datapath",
type=pathlib.Path,
help="Path to the data folder",
default="data",
)
def handle(self, *args, **options):
datapath = options.get("datapath")
if not datapath.exists():
msg = f"{datapath} path to data does not exist."
self.stdout.write(self.style.ERROR(msg))
return
with options.get("file") as file:
data = json.load(file)
title = data.get("title")
description = data.get("description")
publisher, _ = Agent.objects.get_or_create(
name=data.get("publisher").get("name"),
mbox=data.get("publisher").get("mbox", ""),
)
catalog_licence, _ = LicenceDocument.objects.get_or_create(
label=data.get("license")
)
catalog = Catalog.objects.create(
title=title,
description=description,
publisher=publisher,
licence=catalog_licence,
)
for theme in data.get("themeTaxonomy", []):
theme_id = theme.get("id")
theme_label = theme.get("label")
theme_description = theme.get("description")
theme_obj, _ = DataTheme.objects.get_or_create(
code=theme_id,
label=theme_label,
description=theme_description,
)
catalog.themes.add(theme_obj)
# Import Datasets
datasets = data.get("dataset")
for dataset in datasets:
dataset_info = {}
dataset_info["title"] = dataset.get("title")
dataset_info["description"] = dataset.get("description")
dataset_info["publisher"], _ = Agent.objects.get_or_create(
name=dataset.get("publisher").get("name"),
mbox=dataset.get("publisher").get("mbox", ""),
)
dataset_info["catalog"] = catalog
|
class Command(BaseCommand):
help = "Import data from a DCAT-US file provided by ckanext-datajson."
def _get_content_file(self, dataset, distribution, datapath="data"):
"""Returns a ContentFile to be added to the django model.
Requires the following structure:
- datapath/
- {dataset_identifier}/
- {distribution_identifier}/
- some-file.csv
"""
file_folder = (
f'{datapath}/{dataset.get("identifier")}/{distribution.get("identifier")}'
)
file = None
try:
local_file_name = listdir(file_folder)[0]
file_path = f"{file_folder}/{local_file_name}"
file = ContentFile(
open(file_path, mode="rb").read(), name=distribution.get("fileName")
)
except IndexError:
msg = f'{distribution.get("identifier")} folder does not have a file'
self.stdout.write(self.style.ERROR(msg))
return file
def add_arguments(self, parser):
parser.add_argument(
"--file", type=open, help="Path to the data.json file", default="data.json"
)
parser.add_argument(
"--datapath",
type=pathlib.Path,
help="Path to the data folder",
default="data",
)
def handle(self, *args, **options):
datapath = options.get("datapath")
if not datapath.exists():
msg = f"{datapath} path to data does not exist."
self.stdout.write(self.style.ERROR(msg))
return
with options.get("file") as file:
data = json.load(file)
title = data.get("title")
description = data.get("description")
publisher, _ = Agent.objects.get_or_create(
name=data.get("publisher").get("name"),
mbox=data.get("publisher").get("mbox", ""),
)
catalog_licence, _ = LicenceDocument.objects.get_or_create(
label=data.get("license")
)
catalog = Catalog.objects.create(
title=title,
description=description,
publisher=publisher,
licence=catalog_licence,
)
for theme in data.get("themeTaxonomy", []):
theme_id = theme.get("id")
theme_label = theme.get("label")
theme_description = theme.get("description")
theme_obj, _ = DataTheme.objects.get_or_create(
code=theme_id,
label=theme_label,
description=theme_description,
)
catalog.themes.add(theme_obj)
# Import Datasets
datasets = data.get("dataset")
for dataset in datasets:
dataset_info = {}
dataset_info["title"] = dataset.get("title")
dataset_info["description"] = dataset.get("description")
dataset_info["publisher"], _ = Agent.objects.get_or_create(
name=dataset.get("publisher").get("name"),
mbox=dataset.get("publisher").get("mbox", ""),
)
dataset_info["catalog"] = catalog | dataset_created = Dataset.objects.create(**dataset_info) | 1 | 2023-12-10 17:26:39+00:00 | 4k |
ebb-earl-co/tidal-wave | tidal_wave/login.py | [
{
"identifier": "BearerAuth",
"path": "tidal_wave/models.py",
"snippet": "class BearerAuth(AuthBase):\n \"\"\"A class to be passed to the `auth` argument in a `requests.Session`\n constructor\"\"\"\n\n def __init__(self, token: str):\n self.token = token\n\n def __call__(self, r):\n ... | import base64
import json
import logging
import sys
import requests
import typer
from enum import Enum
from pathlib import Path
from typing import Dict, Optional, Set, Tuple
from .models import BearerAuth, SessionsEndpointResponseJSON
from .oauth import (
TOKEN_DIR_PATH,
BearerToken,
TidalOauth,
TokenException,
)
from .utils import TIDAL_API_URL | 3,227 |
COMMON_HEADERS: Dict[str, str] = {"Accept-Encoding": "gzip, deflate, br"}
logger = logging.getLogger(__name__)
class AudioFormat(str, Enum):
sony_360_reality_audio = "360"
dolby_atmos = "Atmos"
hi_res = "HiRes"
mqa = "MQA"
lossless = "Lossless"
high = "High"
low = "Low"
class LogLevel(str, Enum):
debug = "DEBUG" # 10
info = "INFO" # 20
warning = "WARNING" # 30
error = "ERROR" # 40
critical = "CRITICAL" # 50
def load_token_from_disk(
token_path: Path = TOKEN_DIR_PATH / "android-tidal.token",
) -> Optional[str]:
"""Attempt to read `token_path` from disk and decoded its contents
as JSON"""
if not token_path.exists():
logger.warning(f"FileNotFoundError: {str(token_path.absolute())}")
return
token_file_contents: str = token_path.read_bytes()
decoded_token_file_contents: str = base64.b64decode(token_file_contents).decode(
"utf-8"
)
try:
bearer_token_json: dict = json.loads(decoded_token_file_contents)
except json.decoder.JSONDecodeError:
logger.warning(f"File '{token_path.absolute()}' cannot be parsed as JSON")
return
else:
return bearer_token_json.get("access_token")
def validate_token(
token: str, headers: Dict[str, str] = COMMON_HEADERS
) -> Optional[requests.Session]:
"""Send a GET request to the /sessions endpoint of Tidal's API.
If `token` is valid, use the SessionsEndpointResponseJSON object
that was returned from the API to create a requests.Session object with
some additional attributes. Otherwise, return None"""
auth_headers: Dict[str, str] = {**headers, "Authorization": f"Bearer {token}"}
sess: Optional[requests.Session] = None
with requests.get(url=f"{TIDAL_API_URL}/sessions", headers=auth_headers) as r:
try:
r.raise_for_status()
except requests.HTTPError as h:
if r.status_code == 401:
logger.error("Token is not authorized")
return sess
else:
logger.exception(h)
return sess
serj = SessionsEndpointResponseJSON.from_dict(r.json())
logger.debug("Adding data from API reponse to session object:")
logger.debug(serj)
sess: requests.Session = requests.Session()
sess.headers: Dict[str, str] = headers
|
COMMON_HEADERS: Dict[str, str] = {"Accept-Encoding": "gzip, deflate, br"}
logger = logging.getLogger(__name__)
class AudioFormat(str, Enum):
sony_360_reality_audio = "360"
dolby_atmos = "Atmos"
hi_res = "HiRes"
mqa = "MQA"
lossless = "Lossless"
high = "High"
low = "Low"
class LogLevel(str, Enum):
debug = "DEBUG" # 10
info = "INFO" # 20
warning = "WARNING" # 30
error = "ERROR" # 40
critical = "CRITICAL" # 50
def load_token_from_disk(
token_path: Path = TOKEN_DIR_PATH / "android-tidal.token",
) -> Optional[str]:
"""Attempt to read `token_path` from disk and decoded its contents
as JSON"""
if not token_path.exists():
logger.warning(f"FileNotFoundError: {str(token_path.absolute())}")
return
token_file_contents: str = token_path.read_bytes()
decoded_token_file_contents: str = base64.b64decode(token_file_contents).decode(
"utf-8"
)
try:
bearer_token_json: dict = json.loads(decoded_token_file_contents)
except json.decoder.JSONDecodeError:
logger.warning(f"File '{token_path.absolute()}' cannot be parsed as JSON")
return
else:
return bearer_token_json.get("access_token")
def validate_token(
token: str, headers: Dict[str, str] = COMMON_HEADERS
) -> Optional[requests.Session]:
"""Send a GET request to the /sessions endpoint of Tidal's API.
If `token` is valid, use the SessionsEndpointResponseJSON object
that was returned from the API to create a requests.Session object with
some additional attributes. Otherwise, return None"""
auth_headers: Dict[str, str] = {**headers, "Authorization": f"Bearer {token}"}
sess: Optional[requests.Session] = None
with requests.get(url=f"{TIDAL_API_URL}/sessions", headers=auth_headers) as r:
try:
r.raise_for_status()
except requests.HTTPError as h:
if r.status_code == 401:
logger.error("Token is not authorized")
return sess
else:
logger.exception(h)
return sess
serj = SessionsEndpointResponseJSON.from_dict(r.json())
logger.debug("Adding data from API reponse to session object:")
logger.debug(serj)
sess: requests.Session = requests.Session()
sess.headers: Dict[str, str] = headers | sess.auth: BearerAuth = BearerAuth(token=token) | 0 | 2023-12-12 21:50:25+00:00 | 4k |
Subsets and Splits
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have consistent code formatting levels across multiple scales (2k, 4k, 8k, 12k) and reveals the structured formatting patterns within these repositories.
SQL Console for tianyang/repobench_python_v1.1
Compares cross-file and in-file code structure patterns across different complexity levels, revealing how file organization strategies vary with code size and potentially informing better code architecture decisions.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have complete performance data across all seven code complexity levels, revealing consistent benchmarking patterns across different code sizes.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that contain all 7 distinct quality levels (2k through 32k), revealing complete datasets that might be useful for comprehensive analysis.