repo_name stringlengths 7 71 | file_path stringlengths 5 118 | context list | import_statement stringlengths 45 12.5k | token_num int64 641 99.4k | cropped_code stringlengths 44 17k | all_code stringlengths 43 754k | next_line stringlengths 2 330 | gold_snippet_index int64 0 68 | created_at stringlengths 25 25 | level stringclasses 9 values |
|---|---|---|---|---|---|---|---|---|---|---|
zju3dv/nr_in_a_room | optim/patch_perceptual.py | [
{
"identifier": "perceptual_model",
"path": "models/perceptual_model.py",
"snippet": "class VGG16_for_Perceptual(nn.Module):\nclass CLIP_for_Perceptual(nn.Module):\n def __init__(self, requires_grad=False, n_layers=[2, 4, 14, 21]):\n def forward(self, x):\n def perceptual_loss(\n self,\n... | import torch
import numpy as np
import cv2
from models import perceptual_model
from models.perceptual_model import get_perceptual_loss, VGG16_for_Perceptual
from typing import List, Optional, Any, Dict, Union | 1,380 |
# import lpips
# loss_fn_vgg = lpips.LPIPS(net="vgg").cuda()
def get_mask_bbox(mask):
# crop image
true_indices = np.nonzero(mask)
min_h, min_w = np.min(true_indices[0]), np.min(true_indices[1])
max_h, max_w = np.max(true_indices[0]), np.max(true_indices[1])
# print(min_h, min_w)
# print(max_h, max_w)
# img = img[min_h:max_h+1,min_w:max_w+1,:]
return min_h, max_h, min_w, max_w
def patch_perceptual_loss(
|
# import lpips
# loss_fn_vgg = lpips.LPIPS(net="vgg").cuda()
def get_mask_bbox(mask):
# crop image
true_indices = np.nonzero(mask)
min_h, min_w = np.min(true_indices[0]), np.min(true_indices[1])
max_h, max_w = np.max(true_indices[0]), np.max(true_indices[1])
# print(min_h, min_w)
# print(max_h, max_w)
# img = img[min_h:max_h+1,min_w:max_w+1,:]
return min_h, max_h, min_w, max_w
def patch_perceptual_loss( | perceptual_net: VGG16_for_Perceptual, | 2 | 2023-10-15 08:41:29+00:00 | 2k |
ShramanPramanick/VoLTA | Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/roi_heads/mask_head/mask_head.py | [
{
"identifier": "make_roi_mask_feature_extractor",
"path": "Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/roi_heads/mask_head/roi_mask_feature_extractors.py",
"snippet": "def make_roi_mask_feature_extractor(cfg):\n func = _ROI_MASK_FEATURE_EXTRACTORS[cfg.MODEL.ROI_MASK_HEAD.FEATURE_EXTRACTOR]\n... | import torch
from torch import nn
from maskrcnn_benchmark.structures.bounding_box import BoxList
from .roi_mask_feature_extractors import make_roi_mask_feature_extractor
from .roi_mask_predictors import make_roi_mask_predictor
from .inference import make_roi_mask_post_processor
from .loss import make_roi_mask_loss_evaluator | 801 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
def keep_only_positive_boxes(boxes):
"""
Given a set of BoxList containing the `labels` field,
return a set of BoxList for which `labels > 0`.
Arguments:
boxes (list of BoxList)
"""
assert isinstance(boxes, (list, tuple))
assert isinstance(boxes[0], BoxList)
assert boxes[0].has_field("labels")
positive_boxes = []
positive_inds = []
num_boxes = 0
for boxes_per_image in boxes:
labels = boxes_per_image.get_field("labels")
inds_mask = labels > 0
inds = inds_mask.nonzero().squeeze(1)
positive_boxes.append(boxes_per_image[inds])
positive_inds.append(inds_mask)
return positive_boxes, positive_inds
class ROIMaskHead(torch.nn.Module):
def __init__(self, cfg):
super(ROIMaskHead, self).__init__()
self.cfg = cfg.clone()
self.feature_extractor = make_roi_mask_feature_extractor(cfg)
self.predictor = make_roi_mask_predictor(cfg)
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
def keep_only_positive_boxes(boxes):
"""
Given a set of BoxList containing the `labels` field,
return a set of BoxList for which `labels > 0`.
Arguments:
boxes (list of BoxList)
"""
assert isinstance(boxes, (list, tuple))
assert isinstance(boxes[0], BoxList)
assert boxes[0].has_field("labels")
positive_boxes = []
positive_inds = []
num_boxes = 0
for boxes_per_image in boxes:
labels = boxes_per_image.get_field("labels")
inds_mask = labels > 0
inds = inds_mask.nonzero().squeeze(1)
positive_boxes.append(boxes_per_image[inds])
positive_inds.append(inds_mask)
return positive_boxes, positive_inds
class ROIMaskHead(torch.nn.Module):
def __init__(self, cfg):
super(ROIMaskHead, self).__init__()
self.cfg = cfg.clone()
self.feature_extractor = make_roi_mask_feature_extractor(cfg)
self.predictor = make_roi_mask_predictor(cfg) | self.post_processor = make_roi_mask_post_processor(cfg) | 2 | 2023-10-23 04:07:08+00:00 | 2k |
earthcube-lab/textnoisr | tests/textnoisr/test_noise_dataset.py | [
{
"identifier": "noise",
"path": "textnoisr/noise.py",
"snippet": "class CharNoiseAugmenter:\n _AVAILABLE_ACTIONS = (\"insert\", \"swap\", \"substitute\", \"delete\")\n def __init__(\n self,\n noise_level: float,\n actions: tuple[str, ...] = _AVAILABLE_ACTIONS,\n charac... | from math import isclose
from datasets import load_dataset as hf_load_dataset
from evaluate import load
from textnoisr import noise, noise_dataset
import pytest | 851 |
ABS_TOLERANCE = 1.5e-2
REL_TOLERANCE = 1.5e-2
@pytest.fixture()
def dataset100_text():
return hf_load_dataset("rotten_tomatoes", split="train")
@pytest.fixture()
def dataset100(dataset100_text):
def split_tokens(item):
item["tokens"] = item["text"].split(" ")
return item
return dataset100_text.map(split_tokens)
cer = load("cer")
@pytest.mark.nightly
@pytest.mark.parametrize(
"noise_level,actions",
[
(0.001, ["substitute"]),
(0.001, ["insert"]),
(0.001, ["delete"]),
(0.001, ["swap"]),
(0.001, ["delete", "insert", "substitute", "swap"]),
(0.01, ["substitute"]),
(0.01, ["insert"]),
(0.01, ["delete"]),
(0.01, ["swap"]),
(0.01, ["delete", "insert", "substitute", "swap"]),
(0.1, ["substitute"]),
(0.1, ["insert"]),
(0.1, ["delete"]),
(0.1, ["swap"]),
(0.1, ["delete", "insert", "substitute", "swap"]),
(0.15, ["substitute"]),
(0.15, ["insert"]),
(0.15, ["delete"]),
(0.15, ["swap"]),
(0.15, ["delete", "insert", "substitute", "swap"]),
(0.20, ["substitute"]),
(0.20, ["insert"]),
(0.20, ["delete"]),
(0.20, ["swap"]),
(0.20, ["delete", "insert", "substitute", "swap"]),
],
)
@pytest.mark.filterwarnings("ignore:jiwer.compute_measures")
def test_add_noise_on_split_into_words(dataset100, noise_level, actions):
noised_dataset = noise_dataset.add_noise(
dataset100,
|
ABS_TOLERANCE = 1.5e-2
REL_TOLERANCE = 1.5e-2
@pytest.fixture()
def dataset100_text():
return hf_load_dataset("rotten_tomatoes", split="train")
@pytest.fixture()
def dataset100(dataset100_text):
def split_tokens(item):
item["tokens"] = item["text"].split(" ")
return item
return dataset100_text.map(split_tokens)
cer = load("cer")
@pytest.mark.nightly
@pytest.mark.parametrize(
"noise_level,actions",
[
(0.001, ["substitute"]),
(0.001, ["insert"]),
(0.001, ["delete"]),
(0.001, ["swap"]),
(0.001, ["delete", "insert", "substitute", "swap"]),
(0.01, ["substitute"]),
(0.01, ["insert"]),
(0.01, ["delete"]),
(0.01, ["swap"]),
(0.01, ["delete", "insert", "substitute", "swap"]),
(0.1, ["substitute"]),
(0.1, ["insert"]),
(0.1, ["delete"]),
(0.1, ["swap"]),
(0.1, ["delete", "insert", "substitute", "swap"]),
(0.15, ["substitute"]),
(0.15, ["insert"]),
(0.15, ["delete"]),
(0.15, ["swap"]),
(0.15, ["delete", "insert", "substitute", "swap"]),
(0.20, ["substitute"]),
(0.20, ["insert"]),
(0.20, ["delete"]),
(0.20, ["swap"]),
(0.20, ["delete", "insert", "substitute", "swap"]),
],
)
@pytest.mark.filterwarnings("ignore:jiwer.compute_measures")
def test_add_noise_on_split_into_words(dataset100, noise_level, actions):
noised_dataset = noise_dataset.add_noise(
dataset100, | noise.CharNoiseAugmenter(noise_level=noise_level, actions=actions, seed=42), | 0 | 2023-10-18 19:28:34+00:00 | 2k |
oven-lab/tuya_cloud_map_extractor | custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/tuya.py | [
{
"identifier": "ServerError",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/const.py",
"snippet": "class ServerError(Exception):\n pass"
},
{
"identifier": "ClientIDError",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/const... | import datetime
import hmac
import requests
from .const import ServerError, ClientIDError, ClientSecretError, DeviceIDError | 749 |
def _get_sign(client_id: str, secret_key: str, url: str, t: int, token: str):
empty_hash = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
signstr = client_id + token + t + "GET" + "\n" + empty_hash + "\n" + "" + "\n" + url
return hmac.new(
secret_key.encode(), msg=signstr.encode(), digestmod="sha256"
).hexdigest()
def tuyarequest(
server: str, url: str, client_id: str, secret_key: str, token=""
) -> dict:
"""Handles authentication with provided token and makes request to tuya servers."""
t = str(int(round(datetime.datetime.timestamp(datetime.datetime.now()) * 1000, 0)))
sign = _get_sign(
client_id=client_id, secret_key=secret_key, url=url, t=t, token=token
)
headers = {
"sign_method": "HMAC-SHA256",
"client_id": client_id,
"t": t,
"sign": sign.upper(),
}
if token != "":
headers["access_token"] = token
return requests.get(url=server + url, headers=headers, timeout=2.5).json()
def get_download_link(
server: str, client_id: str, secret_key: str, device_id: str
) -> str:
"""Gets the download link of the real time map."""
url = "/v1.0/token?grant_type=1"
response = tuyarequest(
server=server, url=url, client_id=client_id, secret_key=secret_key
)
if not response["success"]:
if response["msg"] == "clientId is invalid":
raise ClientIDError("Invalid Client ID")
elif response["msg"] == "sign invalid":
raise ClientSecretError("Invalid Client Secret")
elif "cross-region access is not allowed" in response["msg"]:
raise ServerError("Wrong server region. Cross-region access is not allowed.")
else:
raise RuntimeError("Request failed - Response: ", response)
access_token = response["result"]["access_token"]
url = "/v1.0/users/sweepers/file/" + device_id + "/realtime-map"
response = tuyarequest(
server=server,
url=url,
client_id=client_id,
secret_key=secret_key,
token=access_token,
)
if not response["success"]:
if response["msg"] == "permission deny":
|
def _get_sign(client_id: str, secret_key: str, url: str, t: int, token: str):
empty_hash = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
signstr = client_id + token + t + "GET" + "\n" + empty_hash + "\n" + "" + "\n" + url
return hmac.new(
secret_key.encode(), msg=signstr.encode(), digestmod="sha256"
).hexdigest()
def tuyarequest(
server: str, url: str, client_id: str, secret_key: str, token=""
) -> dict:
"""Handles authentication with provided token and makes request to tuya servers."""
t = str(int(round(datetime.datetime.timestamp(datetime.datetime.now()) * 1000, 0)))
sign = _get_sign(
client_id=client_id, secret_key=secret_key, url=url, t=t, token=token
)
headers = {
"sign_method": "HMAC-SHA256",
"client_id": client_id,
"t": t,
"sign": sign.upper(),
}
if token != "":
headers["access_token"] = token
return requests.get(url=server + url, headers=headers, timeout=2.5).json()
def get_download_link(
server: str, client_id: str, secret_key: str, device_id: str
) -> str:
"""Gets the download link of the real time map."""
url = "/v1.0/token?grant_type=1"
response = tuyarequest(
server=server, url=url, client_id=client_id, secret_key=secret_key
)
if not response["success"]:
if response["msg"] == "clientId is invalid":
raise ClientIDError("Invalid Client ID")
elif response["msg"] == "sign invalid":
raise ClientSecretError("Invalid Client Secret")
elif "cross-region access is not allowed" in response["msg"]:
raise ServerError("Wrong server region. Cross-region access is not allowed.")
else:
raise RuntimeError("Request failed - Response: ", response)
access_token = response["result"]["access_token"]
url = "/v1.0/users/sweepers/file/" + device_id + "/realtime-map"
response = tuyarequest(
server=server,
url=url,
client_id=client_id,
secret_key=secret_key,
token=access_token,
)
if not response["success"]:
if response["msg"] == "permission deny": | raise DeviceIDError("Invalid Device ID") | 3 | 2023-10-22 10:48:25+00:00 | 2k |
mlbio-epfl/hume | hume.py | [
{
"identifier": "parse_args",
"path": "argparser.py",
"snippet": "def parse_args(args):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--phi1_path', \n type=str,\n required=True,\n help=\"Path to the embeddings in ... | import os
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import learn2learn as l2l
import numpy as np
from tqdm import tqdm
from argparser import parse_args
from activations import Sparsemax
from utils import fix_seed, get_cv_score, check_both_none_or_not_none
from metrics import cluster_acc, cluster_ari | 1,573 |
def run(args=None):
args = parse_args(args)
device = torch.device(args.device)
fix_seed(args.seed)
if not os.path.exists(args.exp_path):
os.makedirs(args.exp_path)
phi1 = np.load(args.phi1_path).astype(np.float32)
phi2 = np.load(args.phi2_path).astype(np.float32)
|
def run(args=None):
args = parse_args(args)
device = torch.device(args.device)
fix_seed(args.seed)
if not os.path.exists(args.exp_path):
os.makedirs(args.exp_path)
phi1 = np.load(args.phi1_path).astype(np.float32)
phi2 = np.load(args.phi2_path).astype(np.float32) | assert check_both_none_or_not_none(args.phi1_path_val, args.phi2_path_val) | 4 | 2023-10-20 15:32:06+00:00 | 2k |
MaxDude132/django-register-field | tests/models.py | [
{
"identifier": "Register",
"path": "django_register/base.py",
"snippet": "class Register:\n def __init__(self):\n self._key_to_class = {}\n self._class_to_key = {}\n\n def register(self, klass, db_key=None):\n if db_key is None:\n try:\n db_key = kla... | from dataclasses import dataclass
from django.db import models
from django_register import Register, RegisterChoices, RegisterField | 1,542 | # Standard libraries
# Django
# django_register
@dataclass(unsafe_hash=True)
class CountryInfo:
population: int
capital: str
class CountryChoices(RegisterChoices):
CANADA = CountryInfo(population=37_742_154, capital="Ottawa")
FRANCE = CountryInfo(population=65_273_511, capital="Paris")
GERMANY = CountryInfo(population=83_783_942, capital="Berlin")
UNITED_STATES = CountryInfo(population=331_900_000, capital="Washington")
@dataclass(unsafe_hash=True)
class ContinentInfo:
label: str
@dataclass(unsafe_hash=True)
class FoodInfo:
verbose_name: str
| # Standard libraries
# Django
# django_register
@dataclass(unsafe_hash=True)
class CountryInfo:
population: int
capital: str
class CountryChoices(RegisterChoices):
CANADA = CountryInfo(population=37_742_154, capital="Ottawa")
FRANCE = CountryInfo(population=65_273_511, capital="Paris")
GERMANY = CountryInfo(population=83_783_942, capital="Berlin")
UNITED_STATES = CountryInfo(population=331_900_000, capital="Washington")
@dataclass(unsafe_hash=True)
class ContinentInfo:
label: str
@dataclass(unsafe_hash=True)
class FoodInfo:
verbose_name: str
| food_register = Register() | 0 | 2023-10-23 18:11:08+00:00 | 2k |
hsouri/bob-classification | medical_chexpert/util/datasets.py | [
{
"identifier": "GaussianBlur",
"path": "medical_chexpert/util/custom_transforms.py",
"snippet": "class GaussianBlur(object):\n \"\"\"Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709\"\"\"\n\n def __init__(self, sigma=[.1, 2.]):\n self.sigma = sigma\n\n def __call__(... | import os
import PIL
import torch
from torchvision import datasets, transforms
from timm.data import create_transform
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from util.dataloader_med import RetinaDataset, Augmentation, Node21, ChestX_ray14, Covidx, CheXpert
from .custom_transforms import GaussianBlur
from .augment import new_data_aug_generator | 897 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# --------------------------------------------------------
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
print(dataset)
return dataset
def build_dataset_chest_xray(split, args):
is_train = (split == 'train')
# transform = build_transform(is_train, args)
if args.build_timm_transform:
transform = build_transform(is_train, args)
else:
if is_train:
if args.aug_strategy == 'simclr_with_randrotation':
print(args.aug_strategy)
transform = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
], p=0.8),
transforms.RandomRotation(degrees=(0, 45)),
transforms.RandomGrayscale(p=0.2),
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# --------------------------------------------------------
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform)
print(dataset)
return dataset
def build_dataset_chest_xray(split, args):
is_train = (split == 'train')
# transform = build_transform(is_train, args)
if args.build_timm_transform:
transform = build_transform(is_train, args)
else:
if is_train:
if args.aug_strategy == 'simclr_with_randrotation':
print(args.aug_strategy)
transform = transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
], p=0.8),
transforms.RandomRotation(degrees=(0, 45)),
transforms.RandomGrayscale(p=0.2), | transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5), | 0 | 2023-10-20 16:28:17+00:00 | 2k |
Salz0/telegram_flea | middlewares/message_logging_middleware.py | [
{
"identifier": "Message",
"path": "models.py",
"snippet": "class Message(BaseModel):\n \"\"\"The model for the Telegram message.\"\"\"\n\n from_user: fields.ForeignKeyRelation[User] = fields.ForeignKeyField(\n \"bot.User\", related_name=\"messages\"\n )\n id = fields.IntField(pk=True... | from aiogram import types
from aiogram.dispatcher.middlewares import BaseMiddleware
from arrow import arrow
from models import Message, User
from utils.loguru_logging import logger | 977 | """The middleware to log all the incoming messages into the database."""
class MessagesLoggingMiddleware(BaseMiddleware):
"""The middleware class, inherited from `BaseMiddleware`."""
@staticmethod
async def _save_message(msg: types.Message) -> Message:
"""Save the message into the database."""
if msg.reply_to_message:
reply_to_message = await Message.get_or_none(
message_id=msg.reply_to_message.message_id,
chat_id=msg.chat.id, # `message_id` is not unique. For details, see `models.py`.
)
else:
reply_to_message = None
return await Message.create(
# Primary fields
message_id=msg.message_id,
from_user_id=msg.from_user.id,
chat_id=msg.chat.id,
text=msg.text,
date=msg.date,
# Other fields that might be useful
reply_to_message=reply_to_message,
content_type=msg.content_type,
complete_message_json=msg.as_json(),
)
async def on_pre_process_message(self, msg: types.Message, *_, **__):
"""Save the message into the database _before_ processing it."""
user_data: dict = msg.from_user.to_python()
try:
# Create a user first, if not exist. Otherwise, we are unable to create a message
# with a foreign key.
user, created = await User.get_or_create(id=user_data.pop("id"), defaults=user_data)
if created:
if payload := msg.get_args():
user.start_payload = payload
await user.save()
| """The middleware to log all the incoming messages into the database."""
class MessagesLoggingMiddleware(BaseMiddleware):
"""The middleware class, inherited from `BaseMiddleware`."""
@staticmethod
async def _save_message(msg: types.Message) -> Message:
"""Save the message into the database."""
if msg.reply_to_message:
reply_to_message = await Message.get_or_none(
message_id=msg.reply_to_message.message_id,
chat_id=msg.chat.id, # `message_id` is not unique. For details, see `models.py`.
)
else:
reply_to_message = None
return await Message.create(
# Primary fields
message_id=msg.message_id,
from_user_id=msg.from_user.id,
chat_id=msg.chat.id,
text=msg.text,
date=msg.date,
# Other fields that might be useful
reply_to_message=reply_to_message,
content_type=msg.content_type,
complete_message_json=msg.as_json(),
)
async def on_pre_process_message(self, msg: types.Message, *_, **__):
"""Save the message into the database _before_ processing it."""
user_data: dict = msg.from_user.to_python()
try:
# Create a user first, if not exist. Otherwise, we are unable to create a message
# with a foreign key.
user, created = await User.get_or_create(id=user_data.pop("id"), defaults=user_data)
if created:
if payload := msg.get_args():
user.start_payload = payload
await user.save() | logger.info( | 2 | 2023-10-19 17:28:55+00:00 | 2k |
RobertCsordas/moe_layer | triton_src/moe_layer/moe_layer_simple.py | [
{
"identifier": "cvmm",
"path": "triton_src/moe_layer/cvmm.py",
"snippet": "def cvmm(x: torch.Tensor, sel: Union[torch.Tensor, CVMMSel], keys: torch.Tensor):\n if not isinstance(sel, CVMMSel):\n sel = cvmm_prepare_sel(sel, keys.shape[0])\n\n return CVMM.apply(x, sel.sel_index, sel.sel, keys... | import torch
import torch.distributed
import torch.nn.functional as F
import math
from typing import Tuple, List, Optional
from .cvmm import cvmm, cvmm_prepare_sel2, CVMMSel | 1,330 |
def dist_logsumexp(x: torch.Tensor, dim: int, keepdim: bool = False) -> torch.Tensor:
# Calculate numerically stable distributed logsumexp
xmax = x.max(dim=dim, keepdim=True).values
torch.distributed.all_reduce(xmax, op=torch.distributed.ReduceOp.MAX)
xe = (x - xmax).exp().sum(dim=dim, keepdim=True)
torch.distributed.all_reduce(xe, op=torch.distributed.ReduceOp.SUM)
res = (xmax + xe.log())
if not keepdim:
res = res.squeeze(dim)
return res
def log_mean(x: torch.Tensor, dim: int = 0):
if torch.distributed.is_initialized():
xlse = dist_logsumexp(x, dim=dim)
# Normalize
n = torch.tensor(x.shape[dim]).to(x.device)
torch.distributed.all_reduce(n, op=torch.distributed.ReduceOp.SUM)
return xlse - n.log()
else:
return x.logsumexp(dim) - math.log(x.shape[dim])
def entropy_l(l: torch.Tensor) -> torch.Tensor:
return - (l * l.exp()).sum(-1)
class MoE(torch.nn.Module):
def __init__(self, dmodel: int, n_experts: int, expert_size: int, k: int,
dropout: float = 0, selection_mode: str = "sigmoid",
activation_after_topk: bool = False,
activation=F.relu,
bias: bool = False, v_dim: Optional[int] = None,
sinkhorn_n_iters: int = 3, expert_dropout: float = 0.0,
weight_std_scale: float = 1.0):
super().__init__()
self.k_dim = dmodel
self.v_dim = v_dim if v_dim is not None else dmodel
self.n_experts = n_experts
self.expert_size = expert_size
self.size = self.n_experts * self.expert_size
self.dropout = dropout
self.selection_mode = selection_mode
self.k_vec_dim = self.k_dim
self.n_heads = k
self.activation_after_topk = activation_after_topk
self.activation = activation
self.sinkhorn_n_iters = sinkhorn_n_iters
self.expert_dropout = expert_dropout
if self.selection_mode not in {"softmax", "sigmoid", "sinkmoid"}:
raise ValueError(f"Unknown selection mode {self.selection_mode}")
self.keys = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim, self.expert_size))
self.values = torch.nn.Parameter(torch.empty(self.n_experts, self.expert_size, self.v_dim))
self.expert_sel = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim))
torch.nn.init.normal_(self.keys, std=dmodel ** -0.5 * weight_std_scale)
torch.nn.init.normal_(self.values, std=self.size ** -0.5 * weight_std_scale)
torch.nn.init.normal_(self.expert_sel, std=self.k_vec_dim ** -0.5 * weight_std_scale)
if bias:
self.bias = torch.nn.Parameter(torch.zeros(self.n_experts, self.expert_size))
self.o_bias = torch.nn.Parameter(torch.zeros(self.v_dim))
else:
self.bias = None
self.o_bias = None
self.renorm_keep_std(self.expert_sel, dim=1)
def renorm_keep_std(self, weight: torch.Tensor, dim: int = 0):
with torch.no_grad():
std = weight.std()
weight.div_(weight.norm(dim=dim, keepdim=True))
weight.mul_(std / weight.std())
def entropy_reg(self, sel: torch.Tensor) -> float:
# Everything is done in log scale
sel = sel.flatten(0, -2)
sel = F.log_softmax(sel, dim=-1)
sel = log_mean(sel, -2)
return - entropy_l(sel).mean()
def compute_scores(self, input: torch.Tensor, index: CVMMSel) -> torch.Tensor:
|
def dist_logsumexp(x: torch.Tensor, dim: int, keepdim: bool = False) -> torch.Tensor:
# Calculate numerically stable distributed logsumexp
xmax = x.max(dim=dim, keepdim=True).values
torch.distributed.all_reduce(xmax, op=torch.distributed.ReduceOp.MAX)
xe = (x - xmax).exp().sum(dim=dim, keepdim=True)
torch.distributed.all_reduce(xe, op=torch.distributed.ReduceOp.SUM)
res = (xmax + xe.log())
if not keepdim:
res = res.squeeze(dim)
return res
def log_mean(x: torch.Tensor, dim: int = 0):
if torch.distributed.is_initialized():
xlse = dist_logsumexp(x, dim=dim)
# Normalize
n = torch.tensor(x.shape[dim]).to(x.device)
torch.distributed.all_reduce(n, op=torch.distributed.ReduceOp.SUM)
return xlse - n.log()
else:
return x.logsumexp(dim) - math.log(x.shape[dim])
def entropy_l(l: torch.Tensor) -> torch.Tensor:
return - (l * l.exp()).sum(-1)
class MoE(torch.nn.Module):
def __init__(self, dmodel: int, n_experts: int, expert_size: int, k: int,
dropout: float = 0, selection_mode: str = "sigmoid",
activation_after_topk: bool = False,
activation=F.relu,
bias: bool = False, v_dim: Optional[int] = None,
sinkhorn_n_iters: int = 3, expert_dropout: float = 0.0,
weight_std_scale: float = 1.0):
super().__init__()
self.k_dim = dmodel
self.v_dim = v_dim if v_dim is not None else dmodel
self.n_experts = n_experts
self.expert_size = expert_size
self.size = self.n_experts * self.expert_size
self.dropout = dropout
self.selection_mode = selection_mode
self.k_vec_dim = self.k_dim
self.n_heads = k
self.activation_after_topk = activation_after_topk
self.activation = activation
self.sinkhorn_n_iters = sinkhorn_n_iters
self.expert_dropout = expert_dropout
if self.selection_mode not in {"softmax", "sigmoid", "sinkmoid"}:
raise ValueError(f"Unknown selection mode {self.selection_mode}")
self.keys = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim, self.expert_size))
self.values = torch.nn.Parameter(torch.empty(self.n_experts, self.expert_size, self.v_dim))
self.expert_sel = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim))
torch.nn.init.normal_(self.keys, std=dmodel ** -0.5 * weight_std_scale)
torch.nn.init.normal_(self.values, std=self.size ** -0.5 * weight_std_scale)
torch.nn.init.normal_(self.expert_sel, std=self.k_vec_dim ** -0.5 * weight_std_scale)
if bias:
self.bias = torch.nn.Parameter(torch.zeros(self.n_experts, self.expert_size))
self.o_bias = torch.nn.Parameter(torch.zeros(self.v_dim))
else:
self.bias = None
self.o_bias = None
self.renorm_keep_std(self.expert_sel, dim=1)
def renorm_keep_std(self, weight: torch.Tensor, dim: int = 0):
with torch.no_grad():
std = weight.std()
weight.div_(weight.norm(dim=dim, keepdim=True))
weight.mul_(std / weight.std())
def entropy_reg(self, sel: torch.Tensor) -> float:
# Everything is done in log scale
sel = sel.flatten(0, -2)
sel = F.log_softmax(sel, dim=-1)
sel = log_mean(sel, -2)
return - entropy_l(sel).mean()
def compute_scores(self, input: torch.Tensor, index: CVMMSel) -> torch.Tensor: | scores = cvmm(input, index, self.keys) | 0 | 2023-10-16 11:00:47+00:00 | 2k |
meanii/downly | downly/plugins/logger.py | [
{
"identifier": "Downly",
"path": "downly/downly.py",
"snippet": "class Downly(Client):\n \"\"\"\n Downly 🦉\n \"\"\"\n def __init__(self):\n name = self.__class__.__name__.lower()\n\n self.telegram = telegram\n\n super().__init__(\n name,\n api_id=... | from pyrogram import filters, Client
from pyrogram.types import Message
from pyrogram.enums import ChatType
from downly.downly import Downly
from downly.utils.b_logger import b_logger
from downly.database.users_sql import update_user, update_chat | 853 |
@Downly.on_message(filters.private | filters.group | filters.channel, group=2)
@b_logger
async def logger(client: Client, message: Message):
# check if a message is command then do nothing
if message.chat.type == ChatType.GROUP or message.chat.type == ChatType.SUPERGROUP:
update_chat(str(message.chat.id), message.chat.title)
if message.from_user:
|
@Downly.on_message(filters.private | filters.group | filters.channel, group=2)
@b_logger
async def logger(client: Client, message: Message):
# check if a message is command then do nothing
if message.chat.type == ChatType.GROUP or message.chat.type == ChatType.SUPERGROUP:
update_chat(str(message.chat.id), message.chat.title)
if message.from_user: | update_user(message.from_user.id, message.from_user.username) | 2 | 2023-10-17 16:21:31+00:00 | 2k |
hnesk/flipper-raw-rfid | flipper_raw_rfid/bits.py | [
{
"identifier": "batched",
"path": "flipper_raw_rfid/utils.py",
"snippet": "def batched(iterable: Iterable[Any], n: int) -> Iterable[tuple[Any, ...]]:\n # batched('ABCDEFG', 3) --> ABC DEF G\n if n < 1:\n raise ValueError('n must be at least one')\n it = iter(iterable)\n while batch :... | import re
import numpy
import numpy.typing as npt
from flipper_raw_rfid.utils import batched, Peak | 1,177 | """
Utilities for working with bitstreams
"""
def decode_lengths(pads: npt.NDArray[numpy.int64], peaks: list[Peak]) -> tuple[npt.NDArray[numpy.int8], int]:
"""
Loops through pulses and durations and matches them to peaks
Checks for the length of the peak as a multiple of the first peak and adds as many 1/0 to the result
:param pads: Pulse and duration values
:param peaks: A list of peaks from find_peaks, the center frequencies should be more or less multiples of the first peak
:return: The decoded bitstream
"""
result: list[int] = []
position = 0
result_position = None
first_length = peaks[0].center
for high, duration in pads:
low = duration - high
high_peak = None
low_peak = None
for p in peaks:
if high in p:
high_peak = p
if low in p:
low_peak = p
if high_peak and low_peak:
break
if not (high_peak and low_peak):
if not high_peak:
print(f'Found nothing for high {high}, restarting')
if not low_peak:
print(f'Found nothing for low {low}, restarting')
result = []
result_position = position
continue
result.extend([1] * int(round(high_peak.center / first_length)))
result.extend([0] * int(round(low_peak.center / first_length)))
position += duration
return numpy.array(result, dtype=numpy.int8), result_position
def decode_manchester(manchester: npt.NDArray[numpy.int8], biphase: bool = True) -> npt.NDArray[numpy.int8]:
"""
Decode manchester encoded bitstream
:param manchester: manchester encoded bitstream
:param biphase: True for biphase, False for diphase
:return: decoded bitstream
"""
if manchester[0] == manchester[1]:
manchester = manchester[1:]
result = []
| """
Utilities for working with bitstreams
"""
def decode_lengths(pads: npt.NDArray[numpy.int64], peaks: list[Peak]) -> tuple[npt.NDArray[numpy.int8], int]:
"""
Loops through pulses and durations and matches them to peaks
Checks for the length of the peak as a multiple of the first peak and adds as many 1/0 to the result
:param pads: Pulse and duration values
:param peaks: A list of peaks from find_peaks, the center frequencies should be more or less multiples of the first peak
:return: The decoded bitstream
"""
result: list[int] = []
position = 0
result_position = None
first_length = peaks[0].center
for high, duration in pads:
low = duration - high
high_peak = None
low_peak = None
for p in peaks:
if high in p:
high_peak = p
if low in p:
low_peak = p
if high_peak and low_peak:
break
if not (high_peak and low_peak):
if not high_peak:
print(f'Found nothing for high {high}, restarting')
if not low_peak:
print(f'Found nothing for low {low}, restarting')
result = []
result_position = position
continue
result.extend([1] * int(round(high_peak.center / first_length)))
result.extend([0] * int(round(low_peak.center / first_length)))
position += duration
return numpy.array(result, dtype=numpy.int8), result_position
def decode_manchester(manchester: npt.NDArray[numpy.int8], biphase: bool = True) -> npt.NDArray[numpy.int8]:
"""
Decode manchester encoded bitstream
:param manchester: manchester encoded bitstream
:param biphase: True for biphase, False for diphase
:return: decoded bitstream
"""
if manchester[0] == manchester[1]:
manchester = manchester[1:]
result = [] | for pair in batched(manchester, 2): | 0 | 2023-10-20 13:06:00+00:00 | 2k |
xingchenshanyao/YOLOP-E | lib/dataset/DemoDataset.py | [
{
"identifier": "clean_str",
"path": "lib/utils/utils.py",
"snippet": "def clean_str(s):\n # Cleans a string by replacing special characters with underscore _\n return re.sub(pattern=\"[|@#!¡·$€%&()=?¿^*;:,¨´><+]\", repl=\"_\", string=s)"
},
{
"identifier": "letterbox_for_img",
"path":... | import glob
import os
import random
import shutil
import time
import cv2
import math
import numpy as np
import torch
from pathlib import Path
from threading import Thread
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from ..utils import letterbox_for_img, clean_str | 1,417 |
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist' % p)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(p, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
h0, w0 = img0.shape[:2]
self.frame += 1
print('\n video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) # BGR
#img0 = cv2.cvtColor(img0, cv2.COLOR_BGR2RGB)
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: \n' % (self.count, self.nf, path), end='')
h0, w0 = img0.shape[:2]
# Padded resize # 填充尺寸,640*360*3 -> 640*384*3
|
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist' % p)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(p, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
h0, w0 = img0.shape[:2]
self.frame += 1
print('\n video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) # BGR
#img0 = cv2.cvtColor(img0, cv2.COLOR_BGR2RGB)
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: \n' % (self.count, self.nf, path), end='')
h0, w0 = img0.shape[:2]
# Padded resize # 填充尺寸,640*360*3 -> 640*384*3 | img, ratio, pad = letterbox_for_img(img0, new_shape=self.img_size, auto=True) | 1 | 2023-10-24 02:08:25+00:00 | 2k |
godisboy0/nonebot-adapter-wcf | adapters/wechatferry/api.py | [
{
"identifier": "ApiNotAvailable",
"path": "adapters/wechatferry/exception.py",
"snippet": "class ApiNotAvailable(BaseApiNotAvailable, WechatFerryAdapterException):\n \"\"\"API 连接不可用\"\"\""
},
{
"identifier": "UserInfo",
"path": "adapters/wechatferry/basemodel.py",
"snippet": "class U... | from wcferry import Wcf
from typing import Any
from .exception import ApiNotAvailable
from concurrent.futures import ThreadPoolExecutor
from .basemodel import UserInfo
from .sqldb import database
from .utils import file_md5, logger
from .config import AdapterConfig
import asyncio | 1,546 | """
所有的 api 都定义在这里。
call_api 的所有方法最终都会调用这里的方法。
"""
"""
发现绝大多数插件都是为 onebot.v11 所写,为了更好的复用(白嫖),这里也用 onebot.v11 中相关的数据结构。
参数约定:
to_wx_id: 群聊时为群聊id, 非群聊时为用户id
"""
user_cache = {}
md5_executor = ThreadPoolExecutor(max_workers=1)
class API:
| """
所有的 api 都定义在这里。
call_api 的所有方法最终都会调用这里的方法。
"""
"""
发现绝大多数插件都是为 onebot.v11 所写,为了更好的复用(白嫖),这里也用 onebot.v11 中相关的数据结构。
参数约定:
to_wx_id: 群聊时为群聊id, 非群聊时为用户id
"""
user_cache = {}
md5_executor = ThreadPoolExecutor(max_workers=1)
class API:
| def __init__(self, wcf: Wcf, config: AdapterConfig): | 4 | 2023-10-22 10:52:27+00:00 | 2k |
R1999RC-official/Reverse1999ResonanceCalculator | python/python_env/Lib/site-packages/setuptools/config/_apply_pyprojecttoml.py | [
{
"identifier": "SetuptoolsWarning",
"path": "python/python_env/Lib/site-packages/setuptools/warnings.py",
"snippet": "class SetuptoolsWarning(UserWarning):\n \"\"\"Base class in ``setuptools`` warning hierarchy.\"\"\"\n\n @classmethod\n def emit(\n cls,\n summary: Optional[str] =... | import logging
import os
from collections.abc import Mapping
from email.headerregistry import Address
from functools import partial, reduce
from itertools import chain
from types import MappingProxyType
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Set,
Tuple,
Type,
Union,
cast,
)
from ..warnings import SetuptoolsWarning, SetuptoolsDeprecationWarning
from setuptools._importlib import metadata # noqa
from setuptools.dist import Distribution # noqa
from setuptools.config import expand
from setuptools.config import expand
from setuptools.extern.packaging.specifiers import SpecifierSet
from .._importlib import metadata
from setuptools.dist import Distribution | 1,434 | """Translation layer between pyproject config and setuptools distribution and
metadata objects.
The distribution and metadata objects are modeled after (an old version of)
core metadata, therefore configs in the format specified for ``pyproject.toml``
need to be processed before being applied.
**PRIVATE MODULE**: API reserved for setuptools internal usage only.
"""
if TYPE_CHECKING:
EMPTY: Mapping = MappingProxyType({}) # Immutable dict-like
_Path = Union[os.PathLike, str]
_DictOrStr = Union[dict, str]
_CorrespFn = Callable[["Distribution", Any, _Path], None]
_Correspondence = Union[str, _CorrespFn]
_logger = logging.getLogger(__name__)
def apply(dist: "Distribution", config: dict, filename: _Path) -> "Distribution":
"""Apply configuration dict read with :func:`read_configuration`"""
if not config:
return dist # short-circuit unrelated pyproject.toml file
root_dir = os.path.dirname(filename) or "."
_apply_project_table(dist, config, root_dir)
_apply_tool_table(dist, config, filename)
current_directory = os.getcwd()
os.chdir(root_dir)
try:
dist._finalize_requires()
dist._finalize_license_files()
finally:
os.chdir(current_directory)
return dist
def _apply_project_table(dist: "Distribution", config: dict, root_dir: _Path):
project_table = config.get("project", {}).copy()
if not project_table:
return # short-circuit
_handle_missing_dynamic(dist, project_table)
_unify_entry_points(project_table)
for field, value in project_table.items():
norm_key = json_compatible_key(field)
corresp = PYPROJECT_CORRESPONDENCE.get(norm_key, norm_key)
if callable(corresp):
corresp(dist, value, root_dir)
else:
_set_config(dist, corresp, value)
def _apply_tool_table(dist: "Distribution", config: dict, filename: _Path):
tool_table = config.get("tool", {}).get("setuptools", {})
if not tool_table:
return # short-circuit
for field, value in tool_table.items():
norm_key = json_compatible_key(field)
if norm_key in TOOL_TABLE_DEPRECATIONS:
suggestion, kwargs = TOOL_TABLE_DEPRECATIONS[norm_key]
msg = f"The parameter `{norm_key}` is deprecated, {suggestion}"
| """Translation layer between pyproject config and setuptools distribution and
metadata objects.
The distribution and metadata objects are modeled after (an old version of)
core metadata, therefore configs in the format specified for ``pyproject.toml``
need to be processed before being applied.
**PRIVATE MODULE**: API reserved for setuptools internal usage only.
"""
if TYPE_CHECKING:
EMPTY: Mapping = MappingProxyType({}) # Immutable dict-like
_Path = Union[os.PathLike, str]
_DictOrStr = Union[dict, str]
_CorrespFn = Callable[["Distribution", Any, _Path], None]
_Correspondence = Union[str, _CorrespFn]
_logger = logging.getLogger(__name__)
def apply(dist: "Distribution", config: dict, filename: _Path) -> "Distribution":
"""Apply configuration dict read with :func:`read_configuration`"""
if not config:
return dist # short-circuit unrelated pyproject.toml file
root_dir = os.path.dirname(filename) or "."
_apply_project_table(dist, config, root_dir)
_apply_tool_table(dist, config, filename)
current_directory = os.getcwd()
os.chdir(root_dir)
try:
dist._finalize_requires()
dist._finalize_license_files()
finally:
os.chdir(current_directory)
return dist
def _apply_project_table(dist: "Distribution", config: dict, root_dir: _Path):
project_table = config.get("project", {}).copy()
if not project_table:
return # short-circuit
_handle_missing_dynamic(dist, project_table)
_unify_entry_points(project_table)
for field, value in project_table.items():
norm_key = json_compatible_key(field)
corresp = PYPROJECT_CORRESPONDENCE.get(norm_key, norm_key)
if callable(corresp):
corresp(dist, value, root_dir)
else:
_set_config(dist, corresp, value)
def _apply_tool_table(dist: "Distribution", config: dict, filename: _Path):
tool_table = config.get("tool", {}).get("setuptools", {})
if not tool_table:
return # short-circuit
for field, value in tool_table.items():
norm_key = json_compatible_key(field)
if norm_key in TOOL_TABLE_DEPRECATIONS:
suggestion, kwargs = TOOL_TABLE_DEPRECATIONS[norm_key]
msg = f"The parameter `{norm_key}` is deprecated, {suggestion}" | SetuptoolsDeprecationWarning.emit( | 1 | 2023-10-24 06:48:58+00:00 | 2k |
Summaw/genCraft-imageGen | main.py | [
{
"identifier": "write",
"path": "modules/write/write.py",
"snippet": "def write(text: str, case: str) -> None:\r\n current_time = time.strftime(\"%H:%M:%S\", time.localtime())\r\n switcher = {\r\n 'info': _write_info,\r\n 'success': _write_success,\r\n 'error': _write_error\r... | import time
import asyncio
import requests
from modules.write.write import write
from modules.tasks.login import login_attempt
from modules.tasks.generateImage import generate_image
| 1,328 |
async def start():
loginRequest = await login_attempt()
if loginRequest == 'False':
write("There was a problem logging in.", "error")
else:
write(f"Session ID: {loginRequest}", 'info')
|
async def start():
loginRequest = await login_attempt()
if loginRequest == 'False':
write("There was a problem logging in.", "error")
else:
write(f"Session ID: {loginRequest}", 'info')
| await generate_image(loginRequest)
| 2 | 2023-10-20 20:56:32+00:00 | 2k |
mentpy/mentpy | mentpy/gradients/grad.py | [
{
"identifier": "fd_gradient",
"path": "mentpy/gradients/_finite_difference.py",
"snippet": "def fd_gradient(f, x, h=1e-5, type=\"central\"):\n if type not in [\"central\", \"forward\", \"backward\"]:\n raise UserWarning(\n f\"Expected type to be 'central', 'forward', or 'backward' ... | import numpy as np
from ._finite_difference import fd_gradient, fd_hessian
from ._parameter_shift import psr_gradient, psr_hessian | 1,328 | # Copyright 2023 Luis Mantilla
#
# Licensed under the Apache License, Version 2.0.
# See <http://www.apache.org/licenses/LICENSE-2.0> for details.
"""Module that contains functions to calculate gradients of cost functions."""
__all__ = ["get_gradient", "get_hessian"]
def get_gradient(cost, x, method="parameter-shift", *args, **kwargs):
"""Calculate the gradient of a cost function.
Args:
cost (callable): Cost function to calculate the gradient of.
x (array): Input to the cost function.
method (str, optional): Method to use to calculate the gradient. Defaults to 'parameter-shift'.
Returns:
array: Gradient of the cost function.
"""
match method:
case "parameter-shift" | "psr" | "parametershift":
| # Copyright 2023 Luis Mantilla
#
# Licensed under the Apache License, Version 2.0.
# See <http://www.apache.org/licenses/LICENSE-2.0> for details.
"""Module that contains functions to calculate gradients of cost functions."""
__all__ = ["get_gradient", "get_hessian"]
def get_gradient(cost, x, method="parameter-shift", *args, **kwargs):
"""Calculate the gradient of a cost function.
Args:
cost (callable): Cost function to calculate the gradient of.
x (array): Input to the cost function.
method (str, optional): Method to use to calculate the gradient. Defaults to 'parameter-shift'.
Returns:
array: Gradient of the cost function.
"""
match method:
case "parameter-shift" | "psr" | "parametershift": | return psr_gradient(cost, x, *args, **kwargs) | 2 | 2023-10-18 18:29:42+00:00 | 2k |
rnag/cert-hero | cert_hero/cli.py | [
{
"identifier": "certs_please",
"path": "cert_hero/cert_hero.py",
"snippet": "def certs_please(\n hostnames: list[str] | tuple[str] | set[str],\n context: ssl.SSLContext = None,\n num_threads: int = 25,\n user_agent: str | None = _DEFAULT_USER_AGENT,\n) -> dict[str, CertHero]:\n \"\"\"\n ... | import argparse
import sys
from . import certs_please, set_expired | 1,297 | """Console script for cert_hero."""
def main():
"""Console script for cert_hero."""
parser = argparse.ArgumentParser(prog='ch', description='Retrieve the SSL certificate(s) for one or more given host')
parser.add_argument('hosts', nargs='*')
args = parser.parse_args()
host_to_cert = certs_please(args.hosts)
| """Console script for cert_hero."""
def main():
"""Console script for cert_hero."""
parser = argparse.ArgumentParser(prog='ch', description='Retrieve the SSL certificate(s) for one or more given host')
parser.add_argument('hosts', nargs='*')
args = parser.parse_args()
host_to_cert = certs_please(args.hosts) | set_expired(host_to_cert) | 1 | 2023-10-16 19:02:05+00:00 | 2k |
KosinskiLab/pyTME | tme/matching_optimization.py | [
{
"identifier": "rigid_transform",
"path": "tme/matching_utils.py",
"snippet": "def rigid_transform(\n coordinates: NDArray,\n rotation_matrix: NDArray,\n out: NDArray,\n translation: NDArray,\n use_geometric_center: bool = False,\n coordinates_mask: NDArray = None,\n out_mask: NDAr... | from typing import Tuple, Dict
from abc import ABC, abstractmethod
from numpy.typing import NDArray
from scipy.optimize import (
differential_evolution,
LinearConstraint,
basinhopping,
)
from scipy.ndimage import laplace
from scipy.spatial import KDTree
from .matching_utils import rigid_transform, euler_to_rotationmatrix
import numpy as np | 1,363 | """ Implements various methods for non-exhaustive template matching
based on numerical optimization.
Copyright (c) 2023 European Molecular Biology Laboratory
Author: Valentin Maurer <valentin.maurer@embl-hamburg.de>
"""
class MatchCoordinatesToDensity(ABC):
"""
A class to template match coordinate sets.
Parameters
----------
target_coordinates : NDArray
The coordinates of the target.
template_coordinates : NDArray
The coordinates of the template.
target_weights : NDArray
The weights of the target.
template_weights : NDArray
The weights of the template.
sampling_rate : NDArray
The size of the voxel.
template_mask_coordinates : NDArray, optional
The coordinates of the template mask. Default is None.
target_mask_coordinates : NDArray, optional
The coordinates of the target mask. Default is None.
**kwargs : dict, optional
Other keyword arguments.
"""
def __init__(
self,
target_coordinates: NDArray,
template_coordinates: NDArray,
target_weights: NDArray,
template_weights: NDArray,
sampling_rate: NDArray,
template_mask_coordinates: NDArray = None,
target_mask_coordinates: NDArray = None,
**kwargs,
):
target, _, origin = FitRefinement.array_from_coordinates(
target_coordinates, target_weights, sampling_rate
)
self.target_density = target
self.target_origin = origin
self.sampling_rate = sampling_rate
self.template_weights = template_weights
self.template_coordinates = template_coordinates
self.template_coordinates_rotated = np.empty(
self.template_coordinates.shape, dtype=np.float32
)
self.target_mask_density = None
if target_mask_coordinates is not None:
target_mask, *_ = FitRefinement.array_from_coordinates(
coordinates=target_mask_coordinates.astype(np.float32),
weights=np.ones(target_mask_coordinates.shape[1]),
shape=self.target_density.shape,
origin=self.target_origin,
sampling_rate=self.sampling_rate,
)
self.target_mask_density = target_mask
self.template_mask_coordinates = None
self.template_mask_coordinates_rotated = None
if template_mask_coordinates is not None:
self.template_mask_coordinates = template_mask_coordinates
self.template_mask_coordinates_rotated = np.empty(
self.template_mask_coordinates.shape, dtype=np.float32
)
def __call__(self, x: NDArray):
"""
Return the score for a given transformation.
Parameters
----------
x : NDArray
The input transformation parameters.
Returns
-------
float
The negative score from the scoring function.
"""
translation, rotation = x[:3], x[3:]
rotation_matrix = euler_to_rotationmatrix(rotation)
| """ Implements various methods for non-exhaustive template matching
based on numerical optimization.
Copyright (c) 2023 European Molecular Biology Laboratory
Author: Valentin Maurer <valentin.maurer@embl-hamburg.de>
"""
class MatchCoordinatesToDensity(ABC):
"""
A class to template match coordinate sets.
Parameters
----------
target_coordinates : NDArray
The coordinates of the target.
template_coordinates : NDArray
The coordinates of the template.
target_weights : NDArray
The weights of the target.
template_weights : NDArray
The weights of the template.
sampling_rate : NDArray
The size of the voxel.
template_mask_coordinates : NDArray, optional
The coordinates of the template mask. Default is None.
target_mask_coordinates : NDArray, optional
The coordinates of the target mask. Default is None.
**kwargs : dict, optional
Other keyword arguments.
"""
def __init__(
self,
target_coordinates: NDArray,
template_coordinates: NDArray,
target_weights: NDArray,
template_weights: NDArray,
sampling_rate: NDArray,
template_mask_coordinates: NDArray = None,
target_mask_coordinates: NDArray = None,
**kwargs,
):
target, _, origin = FitRefinement.array_from_coordinates(
target_coordinates, target_weights, sampling_rate
)
self.target_density = target
self.target_origin = origin
self.sampling_rate = sampling_rate
self.template_weights = template_weights
self.template_coordinates = template_coordinates
self.template_coordinates_rotated = np.empty(
self.template_coordinates.shape, dtype=np.float32
)
self.target_mask_density = None
if target_mask_coordinates is not None:
target_mask, *_ = FitRefinement.array_from_coordinates(
coordinates=target_mask_coordinates.astype(np.float32),
weights=np.ones(target_mask_coordinates.shape[1]),
shape=self.target_density.shape,
origin=self.target_origin,
sampling_rate=self.sampling_rate,
)
self.target_mask_density = target_mask
self.template_mask_coordinates = None
self.template_mask_coordinates_rotated = None
if template_mask_coordinates is not None:
self.template_mask_coordinates = template_mask_coordinates
self.template_mask_coordinates_rotated = np.empty(
self.template_mask_coordinates.shape, dtype=np.float32
)
def __call__(self, x: NDArray):
"""
Return the score for a given transformation.
Parameters
----------
x : NDArray
The input transformation parameters.
Returns
-------
float
The negative score from the scoring function.
"""
translation, rotation = x[:3], x[3:]
rotation_matrix = euler_to_rotationmatrix(rotation)
| rigid_transform( | 0 | 2023-10-20 13:46:01+00:00 | 2k |
hookla/DreamTeamGPT | dream_team_gpt/main.py | [
{
"identifier": "Meeting",
"path": "dream_team_gpt/meeting.py",
"snippet": "class Meeting:\n idea: str\n config: Path = None\n\n def __post_init__(self) -> None:\n \"\"\"Create agents\"\"\"\n client_factory = ai_client_factory(\n AIClientConfig(\n client_... | from dataclasses import dataclass
from pathlib import Path
from dotenv import load_dotenv
from dream_team_gpt.meeting import Meeting
from dream_team_gpt.utils import configure_logging
import os
import click | 655 |
@click.command()
@click.option(
"--idea",
"-i",
type=str,
required=True,
help="your idea for the team to discuss. Please use double quotes",
)
@click.option(
"--config",
"-c",
type=click.Path(exists=True),
default=None,
help="yaml file with team personalities details",
)
@click.option("-v", "--verbose", default=1, count=True)
def run_meeting(idea: str, config: Path = None, verbose: int = 1) -> None:
print(idea)
|
@click.command()
@click.option(
"--idea",
"-i",
type=str,
required=True,
help="your idea for the team to discuss. Please use double quotes",
)
@click.option(
"--config",
"-c",
type=click.Path(exists=True),
default=None,
help="yaml file with team personalities details",
)
@click.option("-v", "--verbose", default=1, count=True)
def run_meeting(idea: str, config: Path = None, verbose: int = 1) -> None:
print(idea) | configure_logging(verbose) | 1 | 2023-10-18 22:45:50+00:00 | 2k |
amrahhh/sqla_async_orm_queries | examples/test.py | [
{
"identifier": "Model",
"path": "sqla_async_orm_queries/models.py",
"snippet": "class Model(Base):\n __abstract__ = True\n\n @classmethod\n async def create(cls, data: dict):\n async with SessionLocal() as session:\n try:\n data = cls(**data)\n s... | import asyncio
from sqlalchemy import Column, String, Integer, and_
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine, async_sessionmaker
from sqla_async_orm_queries import Model, init_session | 836 |
# create your engine
engine = create_async_engine(
"postgresql+asyncpg://test_user:12345@localhost/test_db",
echo=True,
)
# create your SessionLocal
SessionLocal = async_sessionmaker(
expire_on_commit=True,
class_=AsyncSession,
bind=engine,
)
|
# create your engine
engine = create_async_engine(
"postgresql+asyncpg://test_user:12345@localhost/test_db",
echo=True,
)
# create your SessionLocal
SessionLocal = async_sessionmaker(
expire_on_commit=True,
class_=AsyncSession,
bind=engine,
)
| class Test(Model): | 0 | 2023-10-17 09:42:44+00:00 | 2k |
MeetingAgent/MeetingAgent-Core | meeting_buddy.py | [
{
"identifier": "MyTTS",
"path": "voice_cloning/clone.py",
"snippet": "class MyTTS:\n def __init__(self):\n # Get device\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n self.tts = TTS(\"tts_models/en/ljspeech/tacotron2-DDC\")\n self.use_default_speak... | import pyaudio
import wave
import whisper
import threading
import time
import pygame
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.switch import Switch
from kivy.uix.label import Label
from kivy.clock import Clock
from kivy.uix.textinput import TextInput
from kivy.core.window import Window
from kivy.support import install_twisted_reactor
from gtts import gTTS
from pydub import AudioSegment
from ftlangdetect import detect
from voice_cloning.clone import MyTTS
from meeting_buddy_system.gpt_utils import gpt_4_answer, gpt_3_5_turbo_16k_answer
from meeting_buddy_system.prompts import MEETING_BUDDY_MAIN_PROMPT, EXTRACT_QUERY_PROMPT | 1,587 | # Audio Processing
# GUI
install_twisted_reactor()
# gtts text to speech
# personalized voice text to speech
# Local
recording = False
audio_thread = None
def get_audio() -> None:
global recording
recording = True
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16, channels=1, rate=44100, input=True, frames_per_buffer=1024)
frames = []
try:
print("Recording...")
while recording:
data = stream.read(1024)
frames.append(data)
print("Finished recording.")
finally:
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open('meeting_buddy_audio/input_audio.wav', 'wb')
wf.setnchannels(1)
wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
wf.setframerate(44100)
wf.writeframes(b''.join(frames))
wf.close()
def stop_audio() -> None:
global recording
recording = False
def whisper_process_audio(audio_file: str) -> str:
model = whisper.load_model("base") # for multilingual
result = model.transcribe(audio_file)
return result["text"]
def detect_language(text: str) -> str:
cleaned_text = text.replace('\n', ' ')
return detect(text=cleaned_text, low_memory=True)
def gtts_text_to_speech(text: str, output_file='meeting_buddy_audio/output.mp3') -> None:
language = detect_language(text=text)["lang"]
tts = gTTS(text=text, lang=language, slow=False)
tts.save(output_file)
print(f'Audio saved as {output_file}')
def voice_clone_text_to_speech(text: str, output_file='meeting_buddy_audio/output.wav') -> None:
app.tts.text_to_speech(text, output_file)
print(f'Audio saved as {output_file}')
# initialize mixer
pygame.mixer.init()
def play_audio(file_path):
pygame.mixer.music.load(file_path)
pygame.mixer.music.play()
def stop_audio_playback():
pygame.mixer.music.stop()
def gpt_pipeline(meeting_context: str, input_text: str) -> str:
"""
Extract query from text and produce the final answer to query.
"""
print("\n\n\n###### EXTRACTING QUERY FROM TEXT ######\n\n\n")
messages = [{"role": "system", "content": EXTRACT_QUERY_PROMPT}, {"role": "user", "content": input_text}]
| # Audio Processing
# GUI
install_twisted_reactor()
# gtts text to speech
# personalized voice text to speech
# Local
recording = False
audio_thread = None
def get_audio() -> None:
global recording
recording = True
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16, channels=1, rate=44100, input=True, frames_per_buffer=1024)
frames = []
try:
print("Recording...")
while recording:
data = stream.read(1024)
frames.append(data)
print("Finished recording.")
finally:
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open('meeting_buddy_audio/input_audio.wav', 'wb')
wf.setnchannels(1)
wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
wf.setframerate(44100)
wf.writeframes(b''.join(frames))
wf.close()
def stop_audio() -> None:
global recording
recording = False
def whisper_process_audio(audio_file: str) -> str:
model = whisper.load_model("base") # for multilingual
result = model.transcribe(audio_file)
return result["text"]
def detect_language(text: str) -> str:
cleaned_text = text.replace('\n', ' ')
return detect(text=cleaned_text, low_memory=True)
def gtts_text_to_speech(text: str, output_file='meeting_buddy_audio/output.mp3') -> None:
language = detect_language(text=text)["lang"]
tts = gTTS(text=text, lang=language, slow=False)
tts.save(output_file)
print(f'Audio saved as {output_file}')
def voice_clone_text_to_speech(text: str, output_file='meeting_buddy_audio/output.wav') -> None:
app.tts.text_to_speech(text, output_file)
print(f'Audio saved as {output_file}')
# initialize mixer
pygame.mixer.init()
def play_audio(file_path):
pygame.mixer.music.load(file_path)
pygame.mixer.music.play()
def stop_audio_playback():
pygame.mixer.music.stop()
def gpt_pipeline(meeting_context: str, input_text: str) -> str:
"""
Extract query from text and produce the final answer to query.
"""
print("\n\n\n###### EXTRACTING QUERY FROM TEXT ######\n\n\n")
messages = [{"role": "system", "content": EXTRACT_QUERY_PROMPT}, {"role": "user", "content": input_text}] | query = gpt_3_5_turbo_16k_answer(messages=messages) | 2 | 2023-10-18 06:50:56+00:00 | 2k |
KaichengGroup/FUSE-Flow | FUSE_Flow/other_modules/adaptive_unet.py | [
{
"identifier": "AEInit",
"path": "FUSE_Flow/other_modules/utils.py",
"snippet": "class AEInit(str, Enum):\n zero = 'zero'\n xavier = 'xavier'\n\n @classmethod\n def get_values(cls):\n return tuple(map(lambda c: c.value, cls))"
},
{
"identifier": "ConvBlock",
"path": "FUSE... | import math
import pytorch_lightning as pl
import torch
from torch import nn
from FUSE_Flow.other_modules.utils import AEInit
from .conv_modules.conv_block import ConvBlock
from .gated_resnet import UpsampleBlock, DownsampleBlock | 1,391 |
class AdaptiveUNet(pl.LightningModule):
"""SR network architecture that uses Residual-in-Residual Dense Blocks.
Implement Figure (3) in ESRGAN paper.
Parameters
----------
d_x : int
Priority dimension (height or width) of input chosen for downstream comparisons.
d_y : int
Priority dimension (height or width) of output chosen for downstream comparisons.
add_depth : int
Additional depth on top of that required based on difference in scale of input and output.
Largest value this value can take is the largest n where input_shape[1]/factor**n is whole and odd.
factor: int
Factor at which data expands or shrinks. Currently only works for factor = 2.
c_in : int
Number of channels of input tensor.
c_hid : int
Number of channels of inner convolutional layers.
n_conv : int
Number of conv layers.
no_skip : bool
To include skip connection between mirrored layers.
attention_type: AttentionType
type of attention implemented in gated conv blocks
attn_red_ratio : float # default 16
Minimum value = 1, Maximum value = c_in, set reduction from 1 to c_in using attn_red_ratio
Smaller attn_red_ratio --> Less Parameters
Hyperparameter to vary capacity and computational cost of SE blocks in the network.
"""
def __init__(self, d_x, d_y, add_depth, factor, c_in, c_hid, n_conv, no_skip,
attention_type, attn_red_ratio):
super().__init__()
self.save_hyperparameters()
self.no_skip = no_skip
# double the number of channels needed if no skip connection
if no_skip:
c_inter = c_hid
else:
c_inter = c_hid//2
# larger of the input and output priority dimension
d_l = max(d_x, d_y)
# larger of the input and output priority dimension
d_s = min(d_x, d_y)
# scale difference between input and output
scale = int(d_l / d_s)
# max depth of U-Net
max_depth = int(math.log(scale, factor) + 1 + add_depth)
# represents dimension size of unwanted depths
denominator = d_l // (factor ** (max_depth - 1))
# number of down-sampling blocks
n_down = math.floor(math.log(d_x / denominator, factor))
# number of up-sampling layers in encoder
n_enc_up = max_depth - 1 - n_down - math.ceil(math.log(scale, factor) % 1)
# number of up-sampling layers in decoder
n_dec_up = math.floor(math.log(d_y / denominator, factor))
# discrepancy between size of input priority dimension and nearest larger multiple of 2
k_up = d_l // (factor ** math.floor(math.log(scale, factor))) - d_s
# discrepancy between size of input priority dimension and nearest smaller multiple of 2
k_down = d_s - d_l // (factor ** math.ceil(math.log(scale, factor)))
# need resizing if data is not multiple of 2
self.need_resizing = k_up or k_down
# encoder
if not no_skip:
c_up = c_inter // (factor ** (n_down+self.need_resizing))
self.up_resizer = nn.Sequential(
*[ConvBlock(nn.ConvTranspose2d, c_in, c_up,
|
class AdaptiveUNet(pl.LightningModule):
"""SR network architecture that uses Residual-in-Residual Dense Blocks.
Implement Figure (3) in ESRGAN paper.
Parameters
----------
d_x : int
Priority dimension (height or width) of input chosen for downstream comparisons.
d_y : int
Priority dimension (height or width) of output chosen for downstream comparisons.
add_depth : int
Additional depth on top of that required based on difference in scale of input and output.
Largest value this value can take is the largest n where input_shape[1]/factor**n is whole and odd.
factor: int
Factor at which data expands or shrinks. Currently only works for factor = 2.
c_in : int
Number of channels of input tensor.
c_hid : int
Number of channels of inner convolutional layers.
n_conv : int
Number of conv layers.
no_skip : bool
To include skip connection between mirrored layers.
attention_type: AttentionType
type of attention implemented in gated conv blocks
attn_red_ratio : float # default 16
Minimum value = 1, Maximum value = c_in, set reduction from 1 to c_in using attn_red_ratio
Smaller attn_red_ratio --> Less Parameters
Hyperparameter to vary capacity and computational cost of SE blocks in the network.
"""
def __init__(self, d_x, d_y, add_depth, factor, c_in, c_hid, n_conv, no_skip,
attention_type, attn_red_ratio):
super().__init__()
self.save_hyperparameters()
self.no_skip = no_skip
# double the number of channels needed if no skip connection
if no_skip:
c_inter = c_hid
else:
c_inter = c_hid//2
# larger of the input and output priority dimension
d_l = max(d_x, d_y)
# larger of the input and output priority dimension
d_s = min(d_x, d_y)
# scale difference between input and output
scale = int(d_l / d_s)
# max depth of U-Net
max_depth = int(math.log(scale, factor) + 1 + add_depth)
# represents dimension size of unwanted depths
denominator = d_l // (factor ** (max_depth - 1))
# number of down-sampling blocks
n_down = math.floor(math.log(d_x / denominator, factor))
# number of up-sampling layers in encoder
n_enc_up = max_depth - 1 - n_down - math.ceil(math.log(scale, factor) % 1)
# number of up-sampling layers in decoder
n_dec_up = math.floor(math.log(d_y / denominator, factor))
# discrepancy between size of input priority dimension and nearest larger multiple of 2
k_up = d_l // (factor ** math.floor(math.log(scale, factor))) - d_s
# discrepancy between size of input priority dimension and nearest smaller multiple of 2
k_down = d_s - d_l // (factor ** math.ceil(math.log(scale, factor)))
# need resizing if data is not multiple of 2
self.need_resizing = k_up or k_down
# encoder
if not no_skip:
c_up = c_inter // (factor ** (n_down+self.need_resizing))
self.up_resizer = nn.Sequential(
*[ConvBlock(nn.ConvTranspose2d, c_in, c_up, | 3, 1, 1, AEInit.xavier, attention_type, attn_red_ratio)] + | 0 | 2023-10-19 06:49:31+00:00 | 2k |
zytedata/zyte-spider-templates | zyte_spider_templates/spiders/ecommerce.py | [
{
"identifier": "document_enum",
"path": "zyte_spider_templates/documentation.py",
"snippet": "def document_enum(func):\n return func"
},
{
"identifier": "BaseSpider",
"path": "zyte_spider_templates/spiders/base.py",
"snippet": "class BaseSpider(scrapy.Spider):\n custom_settings: D... | from enum import Enum
from typing import Any, Callable, Dict, Iterable, Optional, Union
from pydantic import Field
from scrapy import Request
from scrapy.crawler import Crawler
from scrapy_poet import DummyResponse
from scrapy_spider_metadata import Args
from zyte_common_items import ProbabilityRequest, Product, ProductNavigation
from zyte_spider_templates.documentation import document_enum
from zyte_spider_templates.spiders.base import BaseSpider, BaseSpiderParams
import scrapy | 1,014 |
@document_enum
class EcommerceCrawlStrategy(str, Enum):
full: str = "full"
"""Follow most links within the domain of URL in an attempt to discover and
extract as many products as possible."""
navigation: str = "navigation"
"""Follow pagination, subcategories, and product detail pages."""
pagination_only: str = "pagination_only"
"""Follow pagination and product detail pages. SubCategory links are
ignored. Use this when some subCategory links are misidentified by
ML-extraction."""
@document_enum
class ExtractFrom(str, Enum):
httpResponseBody: str = "httpResponseBody"
"""Use HTTP responses. Cost-efficient and fast extraction method, which
works well on many websites."""
browserHtml: str = "browserHtml"
"""Use browser rendering. Often provides the best quality."""
|
@document_enum
class EcommerceCrawlStrategy(str, Enum):
full: str = "full"
"""Follow most links within the domain of URL in an attempt to discover and
extract as many products as possible."""
navigation: str = "navigation"
"""Follow pagination, subcategories, and product detail pages."""
pagination_only: str = "pagination_only"
"""Follow pagination and product detail pages. SubCategory links are
ignored. Use this when some subCategory links are misidentified by
ML-extraction."""
@document_enum
class ExtractFrom(str, Enum):
httpResponseBody: str = "httpResponseBody"
"""Use HTTP responses. Cost-efficient and fast extraction method, which
works well on many websites."""
browserHtml: str = "browserHtml"
"""Use browser rendering. Often provides the best quality."""
| class EcommerceSpiderParams(BaseSpiderParams): | 2 | 2023-10-18 10:58:44+00:00 | 2k |
Bio-OS/bio-mate | bio_mate/BaseWidget.py | [
{
"identifier": "gen_data_url_img",
"path": "bio_mate/defs.py",
"snippet": "def gen_data_url_img(img_path: Path):\n base64_utf8_str = base64.b64encode(img_path.read_bytes()).decode(\"utf-8\")\n ext = str(img_path).split(\".\")[-1]\n data_url = f\"data:image/{ext};base64,{base64_utf8_str}\"\n\n ... | from ipywidgets import DOMWidget
from traitlets import Bool, Unicode, Dict, Int
from bio_mate.defs import gen_data_url_img, get_img, list_files, prepare_plot_env
import json
import warnings
import subprocess | 848 |
module_name = "bio-mate"
module_version = "1.0.0"
class BaseWidget(DOMWidget):
_model_name = Unicode("BaseWidgetModel").tag(sync=True)
_model_module = Unicode(module_name).tag(sync=True)
_model_module_version = Unicode(module_version).tag(sync=True)
_view_name = Unicode("BaseWidgetView").tag(sync=True)
_view_module = Unicode(module_name).tag(sync=True)
_view_module_version = Unicode(module_version).tag(sync=True)
_view_count = Int(0).tag(sync=True)
type = Unicode("").tag(sync=True)
count = Int(100).tag(sync=True)
all_defs = Dict().tag(sync=True)
def handle_messages(self, widget, content: dict, buffers):
reqId = content.get("reqId", "")
method_name = content.get("method", "")
if not reqId or not method_name:
print(f"Invalid CommRequest: reqId: {reqId}-{method_name}")
return
if not hasattr(self, method_name):
content["response"] = {"status": "failed", "msg": "NotImplementedError"}
self.send(content)
return
func = getattr(self, method_name)
func(content)
def __init__(self, **kwargs):
super(BaseWidget, self).__init__(**kwargs)
# Assign keyword parameters to this object
recognized_keys = dir(self.__class__)
for key, value in kwargs.items():
if key not in recognized_keys and f"_{key}" not in recognized_keys:
warnings.warn(RuntimeWarning(f"Keyword parameter {key} not recognized"))
setattr(self, key, value)
# Attach the callback event handler
self.on_msg(self.handle_messages)
def getSampleImage(self, content: dict):
|
module_name = "bio-mate"
module_version = "1.0.0"
class BaseWidget(DOMWidget):
_model_name = Unicode("BaseWidgetModel").tag(sync=True)
_model_module = Unicode(module_name).tag(sync=True)
_model_module_version = Unicode(module_version).tag(sync=True)
_view_name = Unicode("BaseWidgetView").tag(sync=True)
_view_module = Unicode(module_name).tag(sync=True)
_view_module_version = Unicode(module_version).tag(sync=True)
_view_count = Int(0).tag(sync=True)
type = Unicode("").tag(sync=True)
count = Int(100).tag(sync=True)
all_defs = Dict().tag(sync=True)
def handle_messages(self, widget, content: dict, buffers):
reqId = content.get("reqId", "")
method_name = content.get("method", "")
if not reqId or not method_name:
print(f"Invalid CommRequest: reqId: {reqId}-{method_name}")
return
if not hasattr(self, method_name):
content["response"] = {"status": "failed", "msg": "NotImplementedError"}
self.send(content)
return
func = getattr(self, method_name)
func(content)
def __init__(self, **kwargs):
super(BaseWidget, self).__init__(**kwargs)
# Assign keyword parameters to this object
recognized_keys = dir(self.__class__)
for key, value in kwargs.items():
if key not in recognized_keys and f"_{key}" not in recognized_keys:
warnings.warn(RuntimeWarning(f"Keyword parameter {key} not recognized"))
setattr(self, key, value)
# Attach the callback event handler
self.on_msg(self.handle_messages)
def getSampleImage(self, content: dict): | content["response"] = {"status": "ok", "result": get_img(self.type)} | 1 | 2023-10-19 02:15:54+00:00 | 2k |
iamarunbrahma/llm-prompt-testing | metrics.py | [
{
"identifier": "get_embeddings",
"path": "utils.py",
"snippet": "@retry(wait=wait_random_exponential(min=3, max=90), stop=stop_after_attempt(6))\r\ndef get_embeddings(text, embedding_model=\"text-embedding-ada-002\"):\r\n response = openai.Embedding.create(\r\n model=embedding_model,\r\n ... | from collections import Counter
from numpy.linalg import norm
from utils import get_embeddings, get_chat_completion
import evaluate
import streamlit as st
import traceback
import numpy as np
| 1,122 |
class Metrics:
def __init__(self, question, context, answer, config, strictness=1):
self.question = question
self.context = context
self.answer = answer
self.strictness = strictness
config["model_name"] = "gpt-3.5-turbo"
self.config = config
def rouge_score(self):
try:
if not self.answer or not self.context:
raise ValueError(
"Please provide both context and answer to generate Rouge Score."
)
rouge = evaluate.load("rouge")
results = rouge.compute(predictions=self.answer, references=self.context)
rouge1 = np.round(results["rouge1"], 3)
rouge2 = np.round(results["rouge2"], 3)
rougeL = np.round(results["rougeL"], 3)
return rouge1, rouge2, rougeL
except Exception as e:
func_name = traceback.extract_stack()[-1].name
st.error(f"Error in {func_name}: {str(e)}")
def bleu_score(self):
try:
if not self.answer or not self.context:
raise ValueError(
"Please provide both context and answer to generate BLEU Score."
)
bleu = evaluate.load("bleu")
results = bleu.compute(predictions=self.answer, references=self.context)
return np.round(results["bleu"], 3)
except Exception as e:
func_name = traceback.extract_stack()[-1].name
st.error(f"Error in {func_name}: {str(e)}")
def bert_score(self):
try:
if not self.answer or not self.context:
raise ValueError(
"Please provide both context and answer to generate BLEU Score."
)
bertscore = evaluate.load("bertscore")
results = bertscore.compute(
predictions=self.answer,
references=self.context,
lang="en",
model_type="distilbert-base-uncased",
)
return np.round(results["f1"], 3)
except Exception as e:
func_name = traceback.extract_stack()[-1].name
st.error(f"Error in {func_name}: {str(e)}")
def answer_relevancy(self):
try:
if not self.answer or not self.question:
raise ValueError(
"Please provide both question and answer to generate Answer Relevancy Score."
)
relevancy_prompt = """
Generate question for the given answer.
Here are few examples:
Answer: The first ODI Cricket World Cup was held in 1975, and the West Indies cricket team won the tournament. Clive Lloyd was the captain of the winning West Indies team. They defeated Australia in the final to become the first-ever ODI Cricket World Cup champions.
Question: Which team won the first ODI Cricket World Cup and in which year? Who was the captain of the winning team?
Answer: The first president of the United States of America was George Washington. He became president in the year 1789. Washington served as the country's first president from April 30, 1789, to March 4, 1797.
Question: Who was the first president of the United States of America and in which year did he become president?
Using the answer provided below, generate a question which is relevant to the answer.
"""
answer_relevancy_score = []
for _ in range(self.strictness):
generated_question = get_chat_completion(
self.config, relevancy_prompt, self.answer
)
|
class Metrics:
def __init__(self, question, context, answer, config, strictness=1):
self.question = question
self.context = context
self.answer = answer
self.strictness = strictness
config["model_name"] = "gpt-3.5-turbo"
self.config = config
def rouge_score(self):
try:
if not self.answer or not self.context:
raise ValueError(
"Please provide both context and answer to generate Rouge Score."
)
rouge = evaluate.load("rouge")
results = rouge.compute(predictions=self.answer, references=self.context)
rouge1 = np.round(results["rouge1"], 3)
rouge2 = np.round(results["rouge2"], 3)
rougeL = np.round(results["rougeL"], 3)
return rouge1, rouge2, rougeL
except Exception as e:
func_name = traceback.extract_stack()[-1].name
st.error(f"Error in {func_name}: {str(e)}")
def bleu_score(self):
try:
if not self.answer or not self.context:
raise ValueError(
"Please provide both context and answer to generate BLEU Score."
)
bleu = evaluate.load("bleu")
results = bleu.compute(predictions=self.answer, references=self.context)
return np.round(results["bleu"], 3)
except Exception as e:
func_name = traceback.extract_stack()[-1].name
st.error(f"Error in {func_name}: {str(e)}")
def bert_score(self):
try:
if not self.answer or not self.context:
raise ValueError(
"Please provide both context and answer to generate BLEU Score."
)
bertscore = evaluate.load("bertscore")
results = bertscore.compute(
predictions=self.answer,
references=self.context,
lang="en",
model_type="distilbert-base-uncased",
)
return np.round(results["f1"], 3)
except Exception as e:
func_name = traceback.extract_stack()[-1].name
st.error(f"Error in {func_name}: {str(e)}")
def answer_relevancy(self):
try:
if not self.answer or not self.question:
raise ValueError(
"Please provide both question and answer to generate Answer Relevancy Score."
)
relevancy_prompt = """
Generate question for the given answer.
Here are few examples:
Answer: The first ODI Cricket World Cup was held in 1975, and the West Indies cricket team won the tournament. Clive Lloyd was the captain of the winning West Indies team. They defeated Australia in the final to become the first-ever ODI Cricket World Cup champions.
Question: Which team won the first ODI Cricket World Cup and in which year? Who was the captain of the winning team?
Answer: The first president of the United States of America was George Washington. He became president in the year 1789. Washington served as the country's first president from April 30, 1789, to March 4, 1797.
Question: Who was the first president of the United States of America and in which year did he become president?
Using the answer provided below, generate a question which is relevant to the answer.
"""
answer_relevancy_score = []
for _ in range(self.strictness):
generated_question = get_chat_completion(
self.config, relevancy_prompt, self.answer
)
| question_vec = np.asarray(get_embeddings(self.question.strip()))
| 0 | 2023-10-24 17:37:07+00:00 | 2k |
AVAniketh0905/fluidspy | fluidspylib/fluidspy/numerical/methods/finite_differential.py | [
{
"identifier": "CompositeBoundary",
"path": "fluidspylib/fluidspy/numerical/boundary/composite.py",
"snippet": "class CompositeBoundary:\n children: List[Direction]\n\n def __init__(self, *args) -> None:\n self.children = list(args)\n\n def init_apply(self):\n for child in self.c... | from abc import ABC
from abc import abstractmethod
from typing import List
from ..boundary.composite import CompositeBoundary
from ..dim import Dimension
from ..material_properties import MaterialProperties
from ..material_properties import ThermalProperties
from ..state import SimulationState
from ..step import Step
from ..step import Vector
import numpy as np | 1,020 |
class FiniteDifferentialMethod(ABC):
def __init__(
self,
state: SimulationState,
dim: Dimension,
properties: ThermalProperties,
|
class FiniteDifferentialMethod(ABC):
def __init__(
self,
state: SimulationState,
dim: Dimension,
properties: ThermalProperties, | boundary_conditions: CompositeBoundary, | 0 | 2023-10-21 06:55:58+00:00 | 2k |
zorrobyte/esp32-universal-diesel-heater-controller | main.py | [
{
"identifier": "stateMachine",
"path": "states/stateMachine.py",
"snippet": "def log(message, level=2):\ndef handle_state(current_state, switch_value, exhaust_temp, output_temp):"
},
{
"identifier": "emergencyStop",
"path": "states/emergencyStop.py",
"snippet": "def log(message, level=1... | import machine
import _thread
import hardwareConfig as config
import utime
import webserver
from machine import Timer
from states import stateMachine, emergencyStop
from lib import sensors, networking, fanPID | 1,091 | ####################################################################
# WARNING #
####################################################################
# This code is provided "AS IS" without warranty of any kind. #
# Use of this code in any form acknowledges your acceptance of #
# these terms. #
# #
# This code has NOT been tested in real-world scenarios. #
# Improper usage, lack of understanding, or any combination #
# thereof can result in significant property damage, injury, #
# loss of life, or worse. #
# Specifically, this code is related to controlling heating #
# elements and systems, and there's a very real risk that it #
# can BURN YOUR SHIT DOWN. #
# #
# By using, distributing, or even reading this code, you agree #
# to assume all responsibility and risk associated with it. #
# The author(s), contributors, and distributors of this code #
# will NOT be held liable for any damages, injuries, or other #
# consequences you may face as a result of using or attempting #
# to use this code. #
# #
# Always approach such systems with caution. Ensure you understand #
# the code, the systems involved, and the potential risks. #
# If you're unsure, DO NOT use the code. #
# #
# Stay safe and think before you act. #
####################################################################
# Initialize the WDT with a 10-second timeout
wdt = machine.WDT(id=0, timeout=10000) # 10 seconds
def log(message, level=2):
if config.LOG_LEVEL >= level:
print(message)
def get_reset_reason():
reset_reason = machine.reset_cause()
if reset_reason == machine.PWRON_RESET:
print("Reboot was because of Power-On!")
elif reset_reason == machine.WDT_RESET:
print("Reboot was because of WDT!")
return reset_reason
pulse_timer = Timer(0)
last_pulse_time = 0
off_timer = Timer(1)
def turn_off_pump(_):
config.FUEL_PIN.off()
def pulse_fuel_callback(_):
global last_pulse_time
current_time = utime.ticks_ms()
if utime.ticks_diff(current_time, config.heartbeat) > 10000:
config.FUEL_PIN.off()
log("Heartbeat missing, fuel pump turned off.")
elif config.pump_frequency > 0:
period = 1000.0 / config.pump_frequency
if utime.ticks_diff(current_time, last_pulse_time) >= period:
last_pulse_time = current_time
config.FUEL_PIN.on()
off_timer.init(period=int(config.PUMP_ON_TIME * 1000), mode=Timer.ONE_SHOT, callback=turn_off_pump)
else:
config.FUEL_PIN.off()
pulse_timer.init(period=100, mode=Timer.PERIODIC, callback=pulse_fuel_callback)
def emergency_stop_thread():
while True:
wdt.feed()
current_time = utime.ticks_ms() # Use ticks_ms to get the current time in milliseconds
if utime.ticks_diff(current_time, config.heartbeat) > 10000: # Compare in milliseconds (10 seconds = 10000 ms)
emergencyStop.emergency_stop("No heartbeat detected")
utime.sleep(1)
def run_networking_thread():
while True:
networking.run_networking()
utime.sleep(5)
def main():
while True:
config.heartbeat = utime.ticks_ms()
config.output_temp = sensors.read_output_temp()
config.exhaust_temp = sensors.read_exhaust_temp()
current_switch_value = config.SWITCH_PIN.value()
| ####################################################################
# WARNING #
####################################################################
# This code is provided "AS IS" without warranty of any kind. #
# Use of this code in any form acknowledges your acceptance of #
# these terms. #
# #
# This code has NOT been tested in real-world scenarios. #
# Improper usage, lack of understanding, or any combination #
# thereof can result in significant property damage, injury, #
# loss of life, or worse. #
# Specifically, this code is related to controlling heating #
# elements and systems, and there's a very real risk that it #
# can BURN YOUR SHIT DOWN. #
# #
# By using, distributing, or even reading this code, you agree #
# to assume all responsibility and risk associated with it. #
# The author(s), contributors, and distributors of this code #
# will NOT be held liable for any damages, injuries, or other #
# consequences you may face as a result of using or attempting #
# to use this code. #
# #
# Always approach such systems with caution. Ensure you understand #
# the code, the systems involved, and the potential risks. #
# If you're unsure, DO NOT use the code. #
# #
# Stay safe and think before you act. #
####################################################################
# Initialize the WDT with a 10-second timeout
wdt = machine.WDT(id=0, timeout=10000) # 10 seconds
def log(message, level=2):
if config.LOG_LEVEL >= level:
print(message)
def get_reset_reason():
reset_reason = machine.reset_cause()
if reset_reason == machine.PWRON_RESET:
print("Reboot was because of Power-On!")
elif reset_reason == machine.WDT_RESET:
print("Reboot was because of WDT!")
return reset_reason
pulse_timer = Timer(0)
last_pulse_time = 0
off_timer = Timer(1)
def turn_off_pump(_):
config.FUEL_PIN.off()
def pulse_fuel_callback(_):
global last_pulse_time
current_time = utime.ticks_ms()
if utime.ticks_diff(current_time, config.heartbeat) > 10000:
config.FUEL_PIN.off()
log("Heartbeat missing, fuel pump turned off.")
elif config.pump_frequency > 0:
period = 1000.0 / config.pump_frequency
if utime.ticks_diff(current_time, last_pulse_time) >= period:
last_pulse_time = current_time
config.FUEL_PIN.on()
off_timer.init(period=int(config.PUMP_ON_TIME * 1000), mode=Timer.ONE_SHOT, callback=turn_off_pump)
else:
config.FUEL_PIN.off()
pulse_timer.init(period=100, mode=Timer.PERIODIC, callback=pulse_fuel_callback)
def emergency_stop_thread():
while True:
wdt.feed()
current_time = utime.ticks_ms() # Use ticks_ms to get the current time in milliseconds
if utime.ticks_diff(current_time, config.heartbeat) > 10000: # Compare in milliseconds (10 seconds = 10000 ms)
emergencyStop.emergency_stop("No heartbeat detected")
utime.sleep(1)
def run_networking_thread():
while True:
networking.run_networking()
utime.sleep(5)
def main():
while True:
config.heartbeat = utime.ticks_ms()
config.output_temp = sensors.read_output_temp()
config.exhaust_temp = sensors.read_exhaust_temp()
current_switch_value = config.SWITCH_PIN.value()
| config.current_state, config.emergency_reason = stateMachine.handle_state( | 0 | 2023-10-24 14:50:47+00:00 | 2k |
suliman-99/django-seeding | django_seeding/seeder_registry.py | [
{
"identifier": "Seeder",
"path": "django_seeding/seeders.py",
"snippet": "class Seeder():\n \"\"\" \n The `Seeder` class provides a minimal class which may be used\n for writing custom seeding implementations.\n \n Required:\n seed:\n `seed()` as <method>\n\n Additio... | import sys
import importlib.util
from pathlib import Path
from django.apps import apps
from django.conf import settings
from .seeders import Seeder
from .models import AppliedSeeder | 1,236 |
class SeederRegistry:
"""
The `SeederRegistry` class apply registered seeders when the server is run.
seeder registering is doing by:
@SeederRegistry.register as <decorator>
or
SeederRegistry.register(<seeder-class>) as <method>
"""
seeders = []
@classmethod
def register(cls, seeder):
""" Method and decorator to register the seeder-class in the seeders list to be seeded when the server is run """
|
class SeederRegistry:
"""
The `SeederRegistry` class apply registered seeders when the server is run.
seeder registering is doing by:
@SeederRegistry.register as <decorator>
or
SeederRegistry.register(<seeder-class>) as <method>
"""
seeders = []
@classmethod
def register(cls, seeder):
""" Method and decorator to register the seeder-class in the seeders list to be seeded when the server is run """ | if not issubclass(seeder, Seeder): | 0 | 2023-10-24 17:00:49+00:00 | 2k |
cfs-energy/cfspopcon | cfspopcon/helpers.py | [
{
"identifier": "Algorithms",
"path": "cfspopcon/named_options.py",
"snippet": "class Algorithms(Enum):\n \"\"\"Select which top-level algorithm to run.\"\"\"\n\n predictive_popcon = auto()\n two_point_model_fixed_fpow = auto()\n two_point_model_fixed_qpart = auto()\n two_point_model_fixe... | from typing import Any, Union
from .named_options import (
Algorithms,
ConfinementScaling,
Impurity,
LambdaQScaling,
MomentumLossFunction,
ProfileForm,
RadiationMethod,
ReactionType,
)
import xarray as xr | 1,324 | """Constructors and helper functions."""
def convert_named_options(key: str, val: Any) -> Any: # noqa: PLR0911, PLR0912
"""Given a 'key' matching a named_option, return the corresponding Enum value."""
if key == "algorithms":
return Algorithms[val]
elif key == "energy_confinement_scaling":
return ConfinementScaling[val]
elif key == "profile_form":
| """Constructors and helper functions."""
def convert_named_options(key: str, val: Any) -> Any: # noqa: PLR0911, PLR0912
"""Given a 'key' matching a named_option, return the corresponding Enum value."""
if key == "algorithms":
return Algorithms[val]
elif key == "energy_confinement_scaling":
return ConfinementScaling[val]
elif key == "profile_form": | return ProfileForm[val] | 5 | 2023-10-19 16:58:23+00:00 | 2k |
yifei-he/GOAT | experiments.py | [
{
"identifier": "ot_ablation",
"path": "ot_util.py",
"snippet": "def ot_ablation(size, mode):\n ns, nt = size, size\n plan = np.zeros((ns, nt))\n ran = np.arange(ns*nt)\n np.random.shuffle(ran)\n idx = ran[:size]\n\n for i in idx:\n row = i // nt\n col = i-i//nt * nt\n ... | import torch
import torch.optim as optim
import copy
import argparse
import random
import torch.backends.cudnn as cudnn
import time
from model import *
from train_model import *
from util import *
from ot_util import ot_ablation
from da_algo import *
from ot_util import generate_domains
from dataset import * | 1,057 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_source_model(args, trainset, testset, n_class, mode, encoder=None, epochs=50, verbose=True):
print("Start training source model")
model = Classifier(encoder, MLP(mode=mode, n_class=n_class, hidden=1024)).to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
trainloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
testloader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
for epoch in range(1, epochs+1):
train(epoch, trainloader, model, optimizer, verbose=verbose)
if epoch % 5 == 0:
test(testloader, model, verbose=verbose)
return model
def run_goat(model_copy, source_model, src_trainset, tgt_trainset, all_sets, generated_domains, epochs=10):
# get the performance of direct adaptation from the source to target, st involves self-training on target
direct_acc, st_acc = self_train(args, model_copy, [tgt_trainset], epochs=epochs)
# get the performance of GST from the source to target, st involves self-training on target
direct_acc_all, st_acc_all = self_train(args, source_model, all_sets, epochs=epochs)
# encode the source and target domains
e_src_trainset, e_tgt_trainset = get_encoded_dataset(source_model.encoder, src_trainset), get_encoded_dataset(source_model.encoder, tgt_trainset)
# encode the intermediate ground-truth domains
intersets = all_sets[:-1]
encoded_intersets = [e_src_trainset]
for i in intersets:
encoded_intersets.append(get_encoded_dataset(source_model.encoder, i))
encoded_intersets.append(e_tgt_trainset)
# generate intermediate domains
generated_acc = 0
if generated_domains > 0:
all_domains = []
for i in range(len(encoded_intersets)-1):
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_source_model(args, trainset, testset, n_class, mode, encoder=None, epochs=50, verbose=True):
print("Start training source model")
model = Classifier(encoder, MLP(mode=mode, n_class=n_class, hidden=1024)).to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
trainloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
testloader = DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
for epoch in range(1, epochs+1):
train(epoch, trainloader, model, optimizer, verbose=verbose)
if epoch % 5 == 0:
test(testloader, model, verbose=verbose)
return model
def run_goat(model_copy, source_model, src_trainset, tgt_trainset, all_sets, generated_domains, epochs=10):
# get the performance of direct adaptation from the source to target, st involves self-training on target
direct_acc, st_acc = self_train(args, model_copy, [tgt_trainset], epochs=epochs)
# get the performance of GST from the source to target, st involves self-training on target
direct_acc_all, st_acc_all = self_train(args, source_model, all_sets, epochs=epochs)
# encode the source and target domains
e_src_trainset, e_tgt_trainset = get_encoded_dataset(source_model.encoder, src_trainset), get_encoded_dataset(source_model.encoder, tgt_trainset)
# encode the intermediate ground-truth domains
intersets = all_sets[:-1]
encoded_intersets = [e_src_trainset]
for i in intersets:
encoded_intersets.append(get_encoded_dataset(source_model.encoder, i))
encoded_intersets.append(e_tgt_trainset)
# generate intermediate domains
generated_acc = 0
if generated_domains > 0:
all_domains = []
for i in range(len(encoded_intersets)-1): | all_domains += generate_domains(generated_domains, encoded_intersets[i], encoded_intersets[i+1]) | 1 | 2023-10-20 16:41:00+00:00 | 2k |
ansible/django-ansible-base | ansible_base/tests/unit/serializers/test_common.py | [
{
"identifier": "AuthenticatorMap",
"path": "ansible_base/models/authenticator_map.py",
"snippet": "class AuthenticatorMap(NamedCommonModel):\n class Meta:\n app_label = 'ansible_base'\n # If the map type is a team then we must have an org/team\n constraints = [\n mode... | import pytest
from ansible_base.models import AuthenticatorMap
from ansible_base.serializers.common import CommonModelSerializer
from ansible_base.utils.encryption import ENCRYPTED_STRING
from test_app.models import EncryptionModel
from test_app.serializers import EncryptionTestSerializer | 1,413 |
@pytest.mark.django_db
def test_representation_of_encrypted_fields():
model = EncryptionModel.objects.create()
|
@pytest.mark.django_db
def test_representation_of_encrypted_fields():
model = EncryptionModel.objects.create() | serializer = EncryptionTestSerializer() | 4 | 2023-10-20 13:20:12+00:00 | 2k |
zhudotexe/kani-vision | kani/ext/vision/engines/openai/models.py | [
{
"identifier": "ImagePart",
"path": "kani/ext/vision/parts.py",
"snippet": "class ImagePart(MessagePart, abc.ABC):\n \"\"\"Base class for all image message parts.\n\n Generally, you shouldn't construct this directly - instead, use one of the classmethods to initialize the image from\n a file p... | from typing import Annotated, Literal, Union
from pydantic import Field
from kani.engines.openai.models import OpenAIChatMessage
from kani.models import BaseModel, ChatMessage, ChatRole
from ...parts import ImagePart, RemoteURLImagePart | 1,114 |
# note: `type` does not have default since we use `.model_dump(..., exclude_defaults=True)`
class OpenAIText(BaseModel):
type: Literal["text"]
text: str
@classmethod
def from_text(cls, data: str):
return cls(type="text", text=data)
class OpenAIImage(BaseModel):
type: Literal["image_url"]
image_url: str
detail: Literal["high"] | Literal["low"] | None = None
@classmethod
def from_imagepart(cls, part: ImagePart):
|
# note: `type` does not have default since we use `.model_dump(..., exclude_defaults=True)`
class OpenAIText(BaseModel):
type: Literal["text"]
text: str
@classmethod
def from_text(cls, data: str):
return cls(type="text", text=data)
class OpenAIImage(BaseModel):
type: Literal["image_url"]
image_url: str
detail: Literal["high"] | Literal["low"] | None = None
@classmethod
def from_imagepart(cls, part: ImagePart): | if isinstance(part, RemoteURLImagePart): | 1 | 2023-10-20 16:21:03+00:00 | 2k |
line/Skeleton-Temporal-Action-Localization | evaluation/eval.py | [
{
"identifier": "getClassificationMAP",
"path": "evaluation/classificationMAP.py",
"snippet": "def getClassificationMAP(confidence, labels):\n \"\"\" confidence and labels are of dimension n_samples x n_label \"\"\"\n\n AP = []\n for i in range(np.shape(labels)[1]):\n AP.append(getAP(con... | import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from .classificationMAP import getClassificationMAP as cmAP
from .detectionMAP import getSingleStreamDetectionMAP as dsmAP
from .detectionMAP import getTwoStreamDetectionMAP as dtmAP
from .utils import write_results_to_eval_file, write_results_to_file | 1,385 |
def ss_eval(epoch, dataloader, args, logger, model, device):
vid_preds = []
frm_preds = []
vid_lens = []
labels = []
for num, sample in enumerate(dataloader):
if (num + 1) % 100 == 0:
print("Testing test data point %d of %d" % (num + 1, len(dataloader)))
features = sample["data"].numpy()
label = sample["labels"].numpy()
vid_len = sample["vid_len"].numpy()
features = torch.from_numpy(features).float().to(device)
with torch.no_grad():
_, vid_pred, _, frm_scr = model(Variable(features))
frm_pred = F.softmax(frm_scr, -1)
vid_pred = np.squeeze(vid_pred.cpu().data.numpy(), axis=0)
frm_pred = np.squeeze(frm_pred.cpu().data.numpy(), axis=0)
label = np.squeeze(label, axis=0)
vid_preds.append(vid_pred)
frm_preds.append(frm_pred)
vid_lens.append(vid_len)
labels.append(label)
vid_preds = np.array(vid_preds)
frm_preds = np.array(frm_preds)
vid_lens = np.array(vid_lens)
labels = np.array(labels)
cmap = cmAP(vid_preds, labels)
dmap, iou = dsmAP(
vid_preds, frm_preds, vid_lens, dataloader.dataset.path_to_annotations, args
)
print("Classification map %f" % cmap)
for item in list(zip(iou, dmap)):
print("Detection map @ %f = %f" % (item[0], item[1]))
logger.log_value("Test Classification mAP", cmap, epoch)
for item in list(zip(dmap, iou)):
logger.log_value("Test Detection1 mAP @ IoU = " + str(item[1]), item[0], epoch)
|
def ss_eval(epoch, dataloader, args, logger, model, device):
vid_preds = []
frm_preds = []
vid_lens = []
labels = []
for num, sample in enumerate(dataloader):
if (num + 1) % 100 == 0:
print("Testing test data point %d of %d" % (num + 1, len(dataloader)))
features = sample["data"].numpy()
label = sample["labels"].numpy()
vid_len = sample["vid_len"].numpy()
features = torch.from_numpy(features).float().to(device)
with torch.no_grad():
_, vid_pred, _, frm_scr = model(Variable(features))
frm_pred = F.softmax(frm_scr, -1)
vid_pred = np.squeeze(vid_pred.cpu().data.numpy(), axis=0)
frm_pred = np.squeeze(frm_pred.cpu().data.numpy(), axis=0)
label = np.squeeze(label, axis=0)
vid_preds.append(vid_pred)
frm_preds.append(frm_pred)
vid_lens.append(vid_len)
labels.append(label)
vid_preds = np.array(vid_preds)
frm_preds = np.array(frm_preds)
vid_lens = np.array(vid_lens)
labels = np.array(labels)
cmap = cmAP(vid_preds, labels)
dmap, iou = dsmAP(
vid_preds, frm_preds, vid_lens, dataloader.dataset.path_to_annotations, args
)
print("Classification map %f" % cmap)
for item in list(zip(iou, dmap)):
print("Detection map @ %f = %f" % (item[0], item[1]))
logger.log_value("Test Classification mAP", cmap, epoch)
for item in list(zip(dmap, iou)):
logger.log_value("Test Detection1 mAP @ IoU = " + str(item[1]), item[0], epoch)
| write_results_to_file(args, dmap, cmap, epoch) | 4 | 2023-10-20 05:38:16+00:00 | 2k |
n-thumann/xbox-cloud-statistics | backend/xbox_cloud_statistics/main.py | [
{
"identifier": "Game",
"path": "backend/xbox_cloud_statistics/models.py",
"snippet": "class Game(Model):\n id: str\n title: str\n image_url: str\n subscriptions: Subscription\n\n def to_dict(self) -> dict:\n return {\"id\": self.id, \"title\": self.title, \"image_url\": self.image... | import asyncio
import itertools
import httpx
from pathlib import Path
from xbox_cloud_statistics.client import XBoxCloudClient
from xbox_cloud_statistics.config import Config
from xbox_cloud_statistics.io.cli import CLI
from xbox_cloud_statistics.io.json import JSON
from .models import (
Game,
Measurement,
Results,
Subscription,
) | 669 |
def run():
asyncio.run(main())
async def main():
config = Config()
results = Results()
async with httpx.AsyncClient(http2=True) as http_client:
client = XBoxCloudClient(http_client, config.client_id, config.client_secret)
if config.f2p_token:
await run_measurements(
client,
Subscription.F2P,
config.f2p_token,
config.f2p_games,
results,
)
if config.gpu_token:
await run_measurements(
client,
Subscription.GPU,
config.gpu_token,
config.gpu_games,
results,
)
CLI.handle(results)
JSON.handle(results, Path("./results"))
async def run_measurements(
client: XBoxCloudClient,
subscription: Subscription,
token: str,
games: list[Game],
results: Results,
):
await client.login(subscription, token)
games_regions = list(itertools.product(games, client.regions))
coroutines = [client.measure(region, game) for game, region in games_regions]
|
def run():
asyncio.run(main())
async def main():
config = Config()
results = Results()
async with httpx.AsyncClient(http2=True) as http_client:
client = XBoxCloudClient(http_client, config.client_id, config.client_secret)
if config.f2p_token:
await run_measurements(
client,
Subscription.F2P,
config.f2p_token,
config.f2p_games,
results,
)
if config.gpu_token:
await run_measurements(
client,
Subscription.GPU,
config.gpu_token,
config.gpu_games,
results,
)
CLI.handle(results)
JSON.handle(results, Path("./results"))
async def run_measurements(
client: XBoxCloudClient,
subscription: Subscription,
token: str,
games: list[Game],
results: Results,
):
await client.login(subscription, token)
games_regions = list(itertools.product(games, client.regions))
coroutines = [client.measure(region, game) for game, region in games_regions] | times: list[Measurement | Exception] = await asyncio.gather( | 1 | 2023-10-22 13:05:00+00:00 | 2k |
albu-org/aiotp | aiotp/totp/totp.py | [
{
"identifier": "OTP",
"path": "aiotp/core/otp.py",
"snippet": "class OTP(AbstractOTP):\n def __init__(\n self,\n secret: str,\n digit: int = 5,\n algorithm: algorithms = 'sha1'\n ) -> None:\n assert 0 < digit < 11\n assert algorithm.lower() in ('sha1', 's... | import hmac
import datetime
import unicodedata
from typing import Optional
from urllib.parse import quote, urlencode, urlparse
from ..core import OTP
from ..utils import conversion
from ..typing import algorithms
from ..abstracts import AbstractTOTP | 711 |
class TOTP(AbstractTOTP, OTP):
def __init__(
self,
secret: str,
digits: int = 5,
interval: int = 60,
algorithm: algorithms = 'sha1',
) -> None:
self.interval = interval
super().__init__(secret, digits, algorithm)
async def __aenter__(self) -> 'TOTP':
return self
async def __aexit__(self, *args, **kwargs) -> None:
...
async def create(self, dt: Optional[datetime.datetime] = None) -> str:
if not dt:
dt = datetime.datetime.now()
|
class TOTP(AbstractTOTP, OTP):
def __init__(
self,
secret: str,
digits: int = 5,
interval: int = 60,
algorithm: algorithms = 'sha1',
) -> None:
self.interval = interval
super().__init__(secret, digits, algorithm)
async def __aenter__(self) -> 'TOTP':
return self
async def __aexit__(self, *args, **kwargs) -> None:
...
async def create(self, dt: Optional[datetime.datetime] = None) -> str:
if not dt:
dt = datetime.datetime.now()
| return await self._generate(await conversion(dt, self.interval)) | 1 | 2023-10-20 18:51:22+00:00 | 2k |
brandonrobertz/reason-act-sqlite-py | llm_sql_queries.py | [
{
"identifier": "DB_PATH",
"path": "actions.py",
"snippet": "DB_PATH = \"example.db\""
},
{
"identifier": "load_db",
"path": "actions.py",
"snippet": "def load_db(path):\n assert os.path.exists(path), f\"Database doesn't exist: {path}\"\n db = sqlite_utils.Database(path)\n retur... | import json
import os
import re
import sys
import sqlite3
from llama_cpp import Llama
from actions import (
DB_PATH, load_db,
tables, schema, help, sql_query
) | 971 |
try:
except ModuleNotFoundError:
print("llama_cpp not installed, continuing without")
# Larger context sizes will reduce quality, but some models
# support large contexts better than others.
#CONTEXT_SIZE=2048
CONTEXT_SIZE=2048*2
# how many tokens to allow the model to output in a sigle go w/o stopping
MAX_TOKENS=400
# Utils n stuff
def load_model(model_path, n_gpu_layers=0, n_threads=os.cpu_count() - 1,
n_ctx=CONTEXT_SIZE, temp=None, top_p=None):
# for LLaMA2 70B models add kwarg: n_gqa=8 (NOTE: not required for GGUF models)
print("Loading model", model_path)
print("CTX:", n_ctx, "GPU layers:", n_gpu_layers, "CPU threads:", n_threads)
print("Temperature:", temp, "Top-p Sampling:", top_p)
kwargs = dict(
model_path=model_path,
n_ctx=n_ctx,
n_gpu_layers=n_gpu_layers,
n_threads=n_threads,
verbose=False
)
if temp is not None:
kwargs["temp"] = temp
if top_p is not None:
kwargs["top_p"] = top_p
llm = Llama(**kwargs)
return llm
def execute(model_path, outfile=None, debug=True, return_dict=None,
prompt=None, n_gpu_layers=0, temp=None, top_p=None):
llm = load_model(model_path, n_gpu_layers=n_gpu_layers, temp=temp,
top_p=top_p)
|
try:
except ModuleNotFoundError:
print("llama_cpp not installed, continuing without")
# Larger context sizes will reduce quality, but some models
# support large contexts better than others.
#CONTEXT_SIZE=2048
CONTEXT_SIZE=2048*2
# how many tokens to allow the model to output in a sigle go w/o stopping
MAX_TOKENS=400
# Utils n stuff
def load_model(model_path, n_gpu_layers=0, n_threads=os.cpu_count() - 1,
n_ctx=CONTEXT_SIZE, temp=None, top_p=None):
# for LLaMA2 70B models add kwarg: n_gqa=8 (NOTE: not required for GGUF models)
print("Loading model", model_path)
print("CTX:", n_ctx, "GPU layers:", n_gpu_layers, "CPU threads:", n_threads)
print("Temperature:", temp, "Top-p Sampling:", top_p)
kwargs = dict(
model_path=model_path,
n_ctx=n_ctx,
n_gpu_layers=n_gpu_layers,
n_threads=n_threads,
verbose=False
)
if temp is not None:
kwargs["temp"] = temp
if top_p is not None:
kwargs["top_p"] = top_p
llm = Llama(**kwargs)
return llm
def execute(model_path, outfile=None, debug=True, return_dict=None,
prompt=None, n_gpu_layers=0, temp=None, top_p=None):
llm = load_model(model_path, n_gpu_layers=n_gpu_layers, temp=temp,
top_p=top_p) | db = load_db(DB_PATH) | 1 | 2023-10-15 04:30:30+00:00 | 2k |
sehyun03/MulActSeg | tools/label_assignment_tensor.py | [
{
"identifier": "RegionCityscapesTensor",
"path": "dataloader/region_cityscapes_tensor.py",
"snippet": "class RegionCityscapesTensor(RegionCityscapes):\n\n def __init__(self, args, root, datalist, split='train', transform=None, region_dict=\"dataloader/init_data/cityscapes/train.dict\"):\n sup... | import os
import sys
import argparse
import numpy as np
import dataloader.ext_transforms as et
from tqdm import tqdm
from dataloader.region_cityscapes_tensor import RegionCityscapesTensor
from dataloader.utils import DataProvider | 1,591 | sys.path.append(os.path.abspath('.'))
def get_parser():
# Training configurations
parser = argparse.ArgumentParser(description='')
parser.add_argument('--nseg', type=int, default=2048, help='# superpixel component for slic')
parser.add_argument('--save_data_dir', help='superpixel directory root')
parser.add_argument('--num_worker', type=int, default=8, help='number of classes in dataset')
parser.add_argument('--ignore_size', type=int, default=0, help='(or_lbeling) ignore class region smaller than this')
parser.add_argument('--mark_topk', type=int, default=-1, help='(or_lbeling) ignore classes with the region size under than kth order')
parser.add_argument('--num_classes', type=int, default=19, help='number of classes in dataset')
parser.add_argument('--trim_kernel_size', type=int, default=3)
parser.add_argument('--trim_multihot_boundary', action='store_true', default=False)
parser.add_argument('--prob_dominant', action='store_true', default=False)
parser.add_argument('--spx_method', default='seed')
parser.add_argument('--trg_data_dir', default='./data/Cityscapes')
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
args.trg_datalist = 'dataloader/init_data/cityscapes/train_{}{}.txt'.format(args.spx_method, args.nseg)
args.region_dict = 'dataloader/init_data/cityscapes/train_{}{}.dict'.format(args.spx_method, args.nseg)
args.known_ignore = False
print(args)
identity_transform = et.ExtCompose([et.ExtToTensor(dtype_list=['int','int'])])
### load superpixel & max-frequent pooled target
| sys.path.append(os.path.abspath('.'))
def get_parser():
# Training configurations
parser = argparse.ArgumentParser(description='')
parser.add_argument('--nseg', type=int, default=2048, help='# superpixel component for slic')
parser.add_argument('--save_data_dir', help='superpixel directory root')
parser.add_argument('--num_worker', type=int, default=8, help='number of classes in dataset')
parser.add_argument('--ignore_size', type=int, default=0, help='(or_lbeling) ignore class region smaller than this')
parser.add_argument('--mark_topk', type=int, default=-1, help='(or_lbeling) ignore classes with the region size under than kth order')
parser.add_argument('--num_classes', type=int, default=19, help='number of classes in dataset')
parser.add_argument('--trim_kernel_size', type=int, default=3)
parser.add_argument('--trim_multihot_boundary', action='store_true', default=False)
parser.add_argument('--prob_dominant', action='store_true', default=False)
parser.add_argument('--spx_method', default='seed')
parser.add_argument('--trg_data_dir', default='./data/Cityscapes')
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
args.trg_datalist = 'dataloader/init_data/cityscapes/train_{}{}.txt'.format(args.spx_method, args.nseg)
args.region_dict = 'dataloader/init_data/cityscapes/train_{}{}.dict'.format(args.spx_method, args.nseg)
args.known_ignore = False
print(args)
identity_transform = et.ExtCompose([et.ExtToTensor(dtype_list=['int','int'])])
### load superpixel & max-frequent pooled target | region_dataset = RegionCityscapesTensor(args, | 0 | 2023-10-24 09:19:58+00:00 | 2k |
upiterbarg/hihack | models/flat_transformer.py | [
{
"identifier": "generate_square_subsequent_mask",
"path": "models/transformer_lstm.py",
"snippet": "def generate_square_subsequent_mask(sz: int, device: str = \"cpu\") -> torch.Tensor:\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = (\n mask.float()\n .masked... | import json
import numpy as np
import os
import pathlib
import pdb
import sys
import torch
from nle import nethack
from nle.nethack.actions import ACTIONS as A
from torch import nn
from torch.nn import functional as F
from .transformer_lstm import (
generate_square_subsequent_mask,
PositionalEncoding
)
from chaotic_dwarf import (
TopLineEncoder,
BottomLinesEncoder,
ScreenEncoder,
conv_outdim
) | 1,449 |
base_path = pathlib.Path().resolve()
sys.path.insert(0, os.path.join(base_path, '..', 'dungeonsdata-neurips2022/experiment_code/hackrl/models'))
class FlatTransformer(nn.Module):
def __init__(self, shape, action_space, flags, device):
super(FlatTransformer, self).__init__()
self.flags = flags
self.num_actions = len(action_space)
self.use_prev_action = flags.use_prev_action
self.topline_encoder = TopLineEncoder()
self.bottomline_encoder = torch.jit.script(BottomLinesEncoder())
pixel_size = flags.pixel_size
if flags.crop_dim == 0:
screen_shape = (24 * pixel_size, 80 * pixel_size)
else:
screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size)
self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape))
self.prev_actions_dim = 128 if self.use_prev_action else 0
self.h_dim = sum(
[
self.topline_encoder.hidden_dim,
self.bottomline_encoder.hidden_dim,
self.screen_encoder.hidden_dim,
self.prev_actions_dim,
]
)
self.num_attention_heads = flags.num_attention_heads
self.num_transformer_encoder_layers = flags.num_transformer_layers
core_layer = nn.TransformerEncoderLayer(d_model=self.h_dim, nhead=self.num_attention_heads)
self.core = nn.TransformerEncoder(core_layer, num_layers=self.num_transformer_encoder_layers)
self.positional_encoder = PositionalEncoding(self.h_dim)
self.policy_hidden_dim = 1024
self.policy = nn.Sequential(nn.Linear(self.h_dim, self.policy_hidden_dim),
nn.ELU(),
nn.Linear(self.policy_hidden_dim, self.policy_hidden_dim),
nn.ELU(),
nn.Linear(self.policy_hidden_dim, self.num_actions)
)
self.baseline = nn.Linear(self.h_dim, 1)
self.version = 0
self.inference_unroll_length = 1
def initial_state(self, batch_size=1):
return (
torch.zeros(1, batch_size, self.inference_unroll_length, self.inference_unroll_length),
torch.rand(self.inference_unroll_length, batch_size, self.h_dim)
)
def forward(self, inputs, core_state=None):
T, B, C, H, W = inputs["screen_image"].shape
topline = inputs["tty_chars"][..., 0, :]
bottom_line = inputs["tty_chars"][..., -2:, :]
st = [
self.topline_encoder(
topline.float(memory_format=torch.contiguous_format).view(T * B, -1)
),
self.bottomline_encoder(
bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)
),
self.screen_encoder(
inputs["screen_image"]
.float(memory_format=torch.contiguous_format)
.view(T * B, C, H, W)
),
]
if self.use_prev_action:
st.append(torch.nn.functional.one_hot(inputs["prev_action"], self.prev_actions_dim).view(T * B, -1))
st = torch.cat(st, dim=1)
core_input = st.reshape(T, B, -1)
notdone = (~inputs["done"]).float()
if not self.training:
prev_mask, prev_encodings = core_state
prev_mask = prev_mask.squeeze(0)
core_input = torch.cat([prev_encodings[1:], core_input], axis=0)
core_mask = torch.stack(
[torch.cat([torch.cat([prev_mask[i, 1:, 1:], prev_mask[i, -1, 1:].unsqueeze(0)], axis=0) * notdone[-1, i], torch.zeros((self.inference_unroll_length, 1)).to(core_input.device)], axis=1) for i in range(B)]
)
core_mask[:, -1, -1] = 1
core_state = (core_mask.detach().clone().unsqueeze(0),
core_input.detach().clone()
)
for i in range(B):
core_mask[i].fill_diagonal_(1)
core_mask = (core_mask.float().masked_fill(core_mask == 0, float("-inf")).masked_fill(core_mask == 1, float(0.0))).to(device=core_input.device)
core_mask = torch.repeat_interleave(core_mask, self.num_attention_heads, dim=1).reshape(B * self.num_attention_heads, self.inference_unroll_length, self.inference_unroll_length)
T = core_input.shape[0]
else:
|
base_path = pathlib.Path().resolve()
sys.path.insert(0, os.path.join(base_path, '..', 'dungeonsdata-neurips2022/experiment_code/hackrl/models'))
class FlatTransformer(nn.Module):
def __init__(self, shape, action_space, flags, device):
super(FlatTransformer, self).__init__()
self.flags = flags
self.num_actions = len(action_space)
self.use_prev_action = flags.use_prev_action
self.topline_encoder = TopLineEncoder()
self.bottomline_encoder = torch.jit.script(BottomLinesEncoder())
pixel_size = flags.pixel_size
if flags.crop_dim == 0:
screen_shape = (24 * pixel_size, 80 * pixel_size)
else:
screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size)
self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape))
self.prev_actions_dim = 128 if self.use_prev_action else 0
self.h_dim = sum(
[
self.topline_encoder.hidden_dim,
self.bottomline_encoder.hidden_dim,
self.screen_encoder.hidden_dim,
self.prev_actions_dim,
]
)
self.num_attention_heads = flags.num_attention_heads
self.num_transformer_encoder_layers = flags.num_transformer_layers
core_layer = nn.TransformerEncoderLayer(d_model=self.h_dim, nhead=self.num_attention_heads)
self.core = nn.TransformerEncoder(core_layer, num_layers=self.num_transformer_encoder_layers)
self.positional_encoder = PositionalEncoding(self.h_dim)
self.policy_hidden_dim = 1024
self.policy = nn.Sequential(nn.Linear(self.h_dim, self.policy_hidden_dim),
nn.ELU(),
nn.Linear(self.policy_hidden_dim, self.policy_hidden_dim),
nn.ELU(),
nn.Linear(self.policy_hidden_dim, self.num_actions)
)
self.baseline = nn.Linear(self.h_dim, 1)
self.version = 0
self.inference_unroll_length = 1
def initial_state(self, batch_size=1):
return (
torch.zeros(1, batch_size, self.inference_unroll_length, self.inference_unroll_length),
torch.rand(self.inference_unroll_length, batch_size, self.h_dim)
)
def forward(self, inputs, core_state=None):
T, B, C, H, W = inputs["screen_image"].shape
topline = inputs["tty_chars"][..., 0, :]
bottom_line = inputs["tty_chars"][..., -2:, :]
st = [
self.topline_encoder(
topline.float(memory_format=torch.contiguous_format).view(T * B, -1)
),
self.bottomline_encoder(
bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)
),
self.screen_encoder(
inputs["screen_image"]
.float(memory_format=torch.contiguous_format)
.view(T * B, C, H, W)
),
]
if self.use_prev_action:
st.append(torch.nn.functional.one_hot(inputs["prev_action"], self.prev_actions_dim).view(T * B, -1))
st = torch.cat(st, dim=1)
core_input = st.reshape(T, B, -1)
notdone = (~inputs["done"]).float()
if not self.training:
prev_mask, prev_encodings = core_state
prev_mask = prev_mask.squeeze(0)
core_input = torch.cat([prev_encodings[1:], core_input], axis=0)
core_mask = torch.stack(
[torch.cat([torch.cat([prev_mask[i, 1:, 1:], prev_mask[i, -1, 1:].unsqueeze(0)], axis=0) * notdone[-1, i], torch.zeros((self.inference_unroll_length, 1)).to(core_input.device)], axis=1) for i in range(B)]
)
core_mask[:, -1, -1] = 1
core_state = (core_mask.detach().clone().unsqueeze(0),
core_input.detach().clone()
)
for i in range(B):
core_mask[i].fill_diagonal_(1)
core_mask = (core_mask.float().masked_fill(core_mask == 0, float("-inf")).masked_fill(core_mask == 1, float(0.0))).to(device=core_input.device)
core_mask = torch.repeat_interleave(core_mask, self.num_attention_heads, dim=1).reshape(B * self.num_attention_heads, self.inference_unroll_length, self.inference_unroll_length)
T = core_input.shape[0]
else: | core_mask = generate_square_subsequent_mask(T, core_input.device) | 0 | 2023-10-23 15:44:32+00:00 | 2k |
kulkansecurity/gitverify | gitverify.py | [
{
"identifier": "gh_api",
"path": "include/gh_api.py",
"snippet": "GITHUB_API_URL = \"https://api.github.com/repos/\"\nGITHUB_TOKEN = os.environ.get(\"GH_ACCESS_TOKEN\", None)\ndef github_request_json(url):\ndef fetch_domains_from_code(repository):\ndef fetch_repository(github_url):\ndef fetch_contribut... | import os, sys
from include import gh_api, output, arg_parser
from modules import verify_metadata
from modules import verify_contributors
from modules import verify_domains
from modules import verify_issues_prs | 1,180 | #!/usr/bin/env python3
if __name__ == "__main__":
args = arg_parser.parse_arguments()
output_obj = output.Output(verbose=args.verbose, outfile=args.outfile, outformat=args.format)
print("""
░██████╗░██╗████████╗██╗░░░██╗███████╗██████╗░██╗███████╗██╗░░░██╗
██╔════╝░██║╚══██╔══╝██║░░░██║██╔════╝██╔══██╗██║██╔════╝╚██╗░██╔╝
██║░░██╗░██║░░░██║░░░╚██╗░██╔╝█████╗░░██████╔╝██║█████╗░░░╚████╔╝░
██║░░╚██╗██║░░░██║░░░░╚████╔╝░██╔══╝░░██╔══██╗██║██╔══╝░░░░╚██╔╝░░
╚██████╔╝██║░░░██║░░░░░╚██╔╝░░███████╗██║░░██║██║██║░░░░░░░░██║░░░
░╚═════╝░╚═╝░░░╚═╝░░░░░░╚═╝░░░╚══════╝╚═╝░░╚═╝╚═╝╚═╝░░░░░░░░╚═╝░░░
GitVerify: Is the repo trustworthy? Make an informed decision.
v1.0 - https://www.kulkan.com
######################################################################################""")
# Let's warn the user that unauth RateLimits are pretty low
if os.environ.get("GH_ACCESS_TOKEN", None) == None:
output_obj.warn("GH_ACCESS_TOKEN environment variable not set, using GitHub RateLimits for anonymous queries")
output_obj.warn("Unauthenticated requests to the Github API will enforce a very low and strict RateLimit")
print("For information on how to create a GitHub API Access Token refer to: ")
print("https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens")
if os.environ.get("VT_API_KEY", None) == None:
output_obj.warn("VT_API_KEY environment variable not set, disabling VirusTotal checks.")
print("For information on how to create a VirusTotal API Key refer to: ")
print("https://www.virustotal.com/en/documentation/public-api/")
args.disable_vt = True
if not args.repositories_file:
args.repositories_file = [args.repository]
for repo in args.repositories_file:
try:
repository = gh_api.fetch_repository(repo)
print("######################################################################################")
print("Now verifying repository: {}".format(repository.get('full_name')))
except Exception as ex:
print("Unable to pull data for the repository that was provided. Is it a valid repo URL?")
if args.verbose:
print(ex)
sys.exit()
output_obj.initialize_repo_output(repository.get('full_name'))
verify_metadata.run(repository, output_obj)
# We store the result from contributors() to prevent calling it again for I+PRS
| #!/usr/bin/env python3
if __name__ == "__main__":
args = arg_parser.parse_arguments()
output_obj = output.Output(verbose=args.verbose, outfile=args.outfile, outformat=args.format)
print("""
░██████╗░██╗████████╗██╗░░░██╗███████╗██████╗░██╗███████╗██╗░░░██╗
██╔════╝░██║╚══██╔══╝██║░░░██║██╔════╝██╔══██╗██║██╔════╝╚██╗░██╔╝
██║░░██╗░██║░░░██║░░░╚██╗░██╔╝█████╗░░██████╔╝██║█████╗░░░╚████╔╝░
██║░░╚██╗██║░░░██║░░░░╚████╔╝░██╔══╝░░██╔══██╗██║██╔══╝░░░░╚██╔╝░░
╚██████╔╝██║░░░██║░░░░░╚██╔╝░░███████╗██║░░██║██║██║░░░░░░░░██║░░░
░╚═════╝░╚═╝░░░╚═╝░░░░░░╚═╝░░░╚══════╝╚═╝░░╚═╝╚═╝╚═╝░░░░░░░░╚═╝░░░
GitVerify: Is the repo trustworthy? Make an informed decision.
v1.0 - https://www.kulkan.com
######################################################################################""")
# Let's warn the user that unauth RateLimits are pretty low
if os.environ.get("GH_ACCESS_TOKEN", None) == None:
output_obj.warn("GH_ACCESS_TOKEN environment variable not set, using GitHub RateLimits for anonymous queries")
output_obj.warn("Unauthenticated requests to the Github API will enforce a very low and strict RateLimit")
print("For information on how to create a GitHub API Access Token refer to: ")
print("https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens")
if os.environ.get("VT_API_KEY", None) == None:
output_obj.warn("VT_API_KEY environment variable not set, disabling VirusTotal checks.")
print("For information on how to create a VirusTotal API Key refer to: ")
print("https://www.virustotal.com/en/documentation/public-api/")
args.disable_vt = True
if not args.repositories_file:
args.repositories_file = [args.repository]
for repo in args.repositories_file:
try:
repository = gh_api.fetch_repository(repo)
print("######################################################################################")
print("Now verifying repository: {}".format(repository.get('full_name')))
except Exception as ex:
print("Unable to pull data for the repository that was provided. Is it a valid repo URL?")
if args.verbose:
print(ex)
sys.exit()
output_obj.initialize_repo_output(repository.get('full_name'))
verify_metadata.run(repository, output_obj)
# We store the result from contributors() to prevent calling it again for I+PRS | contributors = verify_contributors.run(repository, output_obj) | 4 | 2023-10-24 15:39:55+00:00 | 2k |
nmathey/finasync | finasync/realt.py | [
{
"identifier": "GNOSIS_API_TOKENLIST_URI",
"path": "finasync/constants.py",
"snippet": "GNOSIS_API_TOKENLIST_URI = (\n \"https://blockscout.com/xdai/mainnet/api?module=account&action=tokenlist&address=\"\n)"
},
{
"identifier": "REALT_API_TOKENLIST_URI",
"path": "finasync/constants.py",
... | import requests
import re
import json
import time
import os
import logging
from pathlib import Path
from datetime import datetime, timedelta
from json.decoder import JSONDecodeError
from finary_uapi.user_real_estates import (
get_user_real_estates,
delete_user_real_estates,
update_user_real_estates,
add_user_real_estates,
add_user_real_estates_with_currency,
)
from finary_uapi.user_me import get_display_currency_code
from .constants import (
GNOSIS_API_TOKENLIST_URI,
REALT_API_TOKENLIST_URI,
REALT_OFFLINE_TOKENS_LIST,
)
from .utils import convert_currency | 881 |
def get_realt_token_details(realt_token_contractAdress):
Now_Time = datetime.today()
RealT_OfflineTokensList_Path = Path(REALT_OFFLINE_TOKENS_LIST)
RealT_OfflineTokensList_Path.touch(exist_ok=True)
with open(RealT_OfflineTokensList_Path) as json_file:
try:
RealT_OfflineTokensList = json.load(json_file)
except JSONDecodeError:
RealT_OfflineTokensList = {
"info": {
"last_sync": str(datetime.timestamp(Now_Time - timedelta(weeks=2)))
},
"data": {},
}
# Update offlineTokensList from RealT API only if more than 1 week old
if float(RealT_OfflineTokensList["info"]["last_sync"]) < datetime.timestamp(
Now_Time - timedelta(weeks=1)
):
MyRealT_API_Header = {
"Accept": "*/*",
"X-AUTH-REALT-TOKEN": os.environ["MYREALT_API_KEY"],
}
TokensListReq = requests.get(
|
def get_realt_token_details(realt_token_contractAdress):
Now_Time = datetime.today()
RealT_OfflineTokensList_Path = Path(REALT_OFFLINE_TOKENS_LIST)
RealT_OfflineTokensList_Path.touch(exist_ok=True)
with open(RealT_OfflineTokensList_Path) as json_file:
try:
RealT_OfflineTokensList = json.load(json_file)
except JSONDecodeError:
RealT_OfflineTokensList = {
"info": {
"last_sync": str(datetime.timestamp(Now_Time - timedelta(weeks=2)))
},
"data": {},
}
# Update offlineTokensList from RealT API only if more than 1 week old
if float(RealT_OfflineTokensList["info"]["last_sync"]) < datetime.timestamp(
Now_Time - timedelta(weeks=1)
):
MyRealT_API_Header = {
"Accept": "*/*",
"X-AUTH-REALT-TOKEN": os.environ["MYREALT_API_KEY"],
}
TokensListReq = requests.get( | REALT_API_TOKENLIST_URI, headers=MyRealT_API_Header | 1 | 2023-10-24 00:32:05+00:00 | 2k |
biggzlar/plausible-uncertainties | evidential_regression/networks.py | [
{
"identifier": "DenseInverseGamma",
"path": "evidential_regression/layers.py",
"snippet": "class DenseInverseGamma(torch.nn.Module):\n \"\"\" Based on: https://github.com/aamini/evidential-deep-learning.\n \"\"\"\n def __init__(self, in_features, units=1):\n super(DenseInverseGamma, sel... | import torch
import torch.nn as nn
import numpy as np
from .layers import DenseInverseGamma, DenseInverseWishart | 897 |
class UnivariateDerNet(nn.Module):
def __init__(self):
super(UnivariateDerNet, self).__init__()
self.hidden = nn.Sequential(
nn.Linear(in_features=1, out_features=128),
# nn.ReLU6(),
# nn.Tanh(),
nn.Mish(),
nn.Linear(in_features=128, out_features=128),
# nn.ReLU6(),
# nn.Tanh(),
nn.Mish(),
nn.Linear(in_features=128, out_features=128),
# nn.ReLU6(),
# nn.Tanh(),
nn.Mish(),
nn.Linear(in_features=128, out_features=128),
|
class UnivariateDerNet(nn.Module):
def __init__(self):
super(UnivariateDerNet, self).__init__()
self.hidden = nn.Sequential(
nn.Linear(in_features=1, out_features=128),
# nn.ReLU6(),
# nn.Tanh(),
nn.Mish(),
nn.Linear(in_features=128, out_features=128),
# nn.ReLU6(),
# nn.Tanh(),
nn.Mish(),
nn.Linear(in_features=128, out_features=128),
# nn.ReLU6(),
# nn.Tanh(),
nn.Mish(),
nn.Linear(in_features=128, out_features=128), | DenseInverseGamma(in_features=128, units=1) | 0 | 2023-10-19 08:44:08+00:00 | 2k |
t-ega/whatsapp-cloud-sdk | whatsapp_cloud_sdk/_formaters/message_formatter.py | [
{
"identifier": "JSONDict",
"path": "whatsapp_cloud_sdk/_utils/types.py",
"snippet": "class MessageTypes(Enum):\n IMAGE = \"image\"\n AUDIO = \"audio\"\n TEXT = \"text\"\n REACTION = \"reaction\"\n STICKER = \"sticker\"\n LOCATION = \"location\"\n UNKNOWN = \"unknown\""
},
{
... | from enum import Enum
from typing import List, Optional
from unicodedata import decimal
from whatsapp_cloud_sdk._utils.types import JSONDict
from whatsapp_cloud_sdk._validators.messages import ButtonContents | 884 | """This module contains custom formatting class and aliases for internal use within the library.
Warning:
Contents of this module are intended to be used internally by the library and *not* by the
user. Changes to this module are not considered breaking changes and may not be documented in
the changelog.
"""
class LinkTypes(Enum):
"""
Constants representing different types of links.
Attributes:
AUDIO (str): A link type for audio content.
IMAGE (str): A link type for image content.
VIDEO (str): A link type for video content.
"""
AUDIO = "audio"
IMAGE = "image"
VIDEO = "video"
class MessageFormatter:
"""
Provides methods for formatting messages and data for interaction with the WhatsApp API.
Methods:
- format_text_message(body: str, to: str, preview_url: bool = False,
message_id: str = None) -> JSONDict:
- format_button_message(to: str, text: str, buttons: List[ButtonContents],
message_id: Optional[str])
-> JSONDict:
- format_reply_with_reaction(to: str, emoji, message_id: Optional[str]) -> JSONDict:
- format_link_message(to: str, link: str, m_type: LinkTypes, caption: str = "",
message_id: str =None
-> JSONDict:
- format_send_document_by_url(to: str, document_link: str, caption: str,
is_reply: bool = False,
message_id: str = None) -> JSONDict:
- format_location_message(to: str, latitude: decimal, longitude: int, name: str,
address: str,
message_id: Optional[str])
-> JSONDict:
- format_contact_message(contact: list, to: str, message_id: Optional[str]) -> JSONDict:
- format_sticker_message_by_url(link: str, to: str, message_id: Optional[str]) -> JSONDict:
- mark_message_as_read(message_id: str) -> JSONDict:
"""
@staticmethod
def format_text_message(
body: str, to: str, preview_url: bool = False, message_id: str = None
) -> JSONDict:
"""
Formats a text message for WhatsApp.
Args:
- body (str): The text message body.
- to (str): The recipient's WhatsApp number.
- preview_url (bool, optional): Whether to preview URLs in the message.
- message_id (str, optional): The ID of the message being replied to.
Returns:
- JSONDict: The formatted text message.
"""
body = {
"messaging_product": "whatsapp",
"recipient_type": "individual",
"to": to,
"type": "text",
"text": {"preview_url": preview_url, "body": body},
}
if message_id:
body["context"] = {"message_id": message_id}
return body
@staticmethod
def format_button_message(
to: str,
text: str,
| """This module contains custom formatting class and aliases for internal use within the library.
Warning:
Contents of this module are intended to be used internally by the library and *not* by the
user. Changes to this module are not considered breaking changes and may not be documented in
the changelog.
"""
class LinkTypes(Enum):
"""
Constants representing different types of links.
Attributes:
AUDIO (str): A link type for audio content.
IMAGE (str): A link type for image content.
VIDEO (str): A link type for video content.
"""
AUDIO = "audio"
IMAGE = "image"
VIDEO = "video"
class MessageFormatter:
"""
Provides methods for formatting messages and data for interaction with the WhatsApp API.
Methods:
- format_text_message(body: str, to: str, preview_url: bool = False,
message_id: str = None) -> JSONDict:
- format_button_message(to: str, text: str, buttons: List[ButtonContents],
message_id: Optional[str])
-> JSONDict:
- format_reply_with_reaction(to: str, emoji, message_id: Optional[str]) -> JSONDict:
- format_link_message(to: str, link: str, m_type: LinkTypes, caption: str = "",
message_id: str =None
-> JSONDict:
- format_send_document_by_url(to: str, document_link: str, caption: str,
is_reply: bool = False,
message_id: str = None) -> JSONDict:
- format_location_message(to: str, latitude: decimal, longitude: int, name: str,
address: str,
message_id: Optional[str])
-> JSONDict:
- format_contact_message(contact: list, to: str, message_id: Optional[str]) -> JSONDict:
- format_sticker_message_by_url(link: str, to: str, message_id: Optional[str]) -> JSONDict:
- mark_message_as_read(message_id: str) -> JSONDict:
"""
@staticmethod
def format_text_message(
body: str, to: str, preview_url: bool = False, message_id: str = None
) -> JSONDict:
"""
Formats a text message for WhatsApp.
Args:
- body (str): The text message body.
- to (str): The recipient's WhatsApp number.
- preview_url (bool, optional): Whether to preview URLs in the message.
- message_id (str, optional): The ID of the message being replied to.
Returns:
- JSONDict: The formatted text message.
"""
body = {
"messaging_product": "whatsapp",
"recipient_type": "individual",
"to": to,
"type": "text",
"text": {"preview_url": preview_url, "body": body},
}
if message_id:
body["context"] = {"message_id": message_id}
return body
@staticmethod
def format_button_message(
to: str,
text: str, | buttons: List[ButtonContents], | 1 | 2023-10-15 21:12:45+00:00 | 2k |
DTennant/GPC | data/imagenet.py | [
{
"identifier": "subsample_instances",
"path": "data/data_utils.py",
"snippet": "def subsample_instances(dataset, prop_indices_to_subsample=0.8):\n\n np.random.seed(0)\n subsample_indices = np.random.choice(range(len(dataset)), replace=False,\n size=(int(pro... | import torchvision
import numpy as np
import os
from copy import deepcopy
from data.data_utils import subsample_instances
from config import imagenet_root | 1,514 |
class ImageNetBase(torchvision.datasets.ImageFolder):
def __init__(self, root, transform):
super(ImageNetBase, self).__init__(root, transform)
self.uq_idxs = np.array(range(len(self)))
def __getitem__(self, item):
img, label = super().__getitem__(item)
uq_idx = self.uq_idxs[item]
return img, label, uq_idx
def subsample_dataset(dataset, idxs):
imgs_ = []
for i in idxs:
imgs_.append(dataset.imgs[i])
dataset.imgs = imgs_
samples_ = []
for i in idxs:
samples_.append(dataset.samples[i])
dataset.samples = samples_
# dataset.imgs = [x for i, x in enumerate(dataset.imgs) if i in idxs]
# dataset.samples = [x for i, x in enumerate(dataset.samples) if i in idxs]
dataset.targets = np.array(dataset.targets)[idxs].tolist()
dataset.uq_idxs = dataset.uq_idxs[idxs]
return dataset
def subsample_classes(dataset, include_classes=list(range(1000))):
cls_idxs = [x for x, t in enumerate(dataset.targets) if t in include_classes]
target_xform_dict = {}
for i, k in enumerate(include_classes):
target_xform_dict[k] = i
dataset = subsample_dataset(dataset, cls_idxs)
dataset.target_transform = lambda x: target_xform_dict[x]
return dataset
def get_train_val_indices(train_dataset, val_split=0.2):
train_classes = list(set(train_dataset.targets))
# Get train/test indices
train_idxs = []
val_idxs = []
for cls in train_classes:
cls_idxs = np.where(np.array(train_dataset.targets) == cls)[0]
v_ = np.random.choice(cls_idxs, replace=False, size=((int(val_split * len(cls_idxs))),))
t_ = [x for x in cls_idxs if x not in v_]
train_idxs.extend(t_)
val_idxs.extend(v_)
return train_idxs, val_idxs
def get_equal_len_datasets(dataset1, dataset2):
"""
Make two datasets the same length
"""
if len(dataset1) > len(dataset2):
rand_idxs = np.random.choice(range(len(dataset1)), size=(len(dataset2, )))
subsample_dataset(dataset1, rand_idxs)
elif len(dataset2) > len(dataset1):
rand_idxs = np.random.choice(range(len(dataset2)), size=(len(dataset1, )))
subsample_dataset(dataset2, rand_idxs)
return dataset1, dataset2
def get_imagenet_100_datasets(train_transform, test_transform, train_classes=range(80),
prop_train_labels=0.8, split_train_val=False, seed=0):
np.random.seed(seed)
# Subsample imagenet dataset initially to include 100 classes
subsampled_100_classes = np.random.choice(range(1000), size=(100,), replace=False)
subsampled_100_classes = np.sort(subsampled_100_classes)
print(f'Constructing ImageNet-100 dataset from the following classes: {subsampled_100_classes.tolist()}')
cls_map = {i: j for i, j in zip(subsampled_100_classes, range(100))}
# Init entire training set
|
class ImageNetBase(torchvision.datasets.ImageFolder):
def __init__(self, root, transform):
super(ImageNetBase, self).__init__(root, transform)
self.uq_idxs = np.array(range(len(self)))
def __getitem__(self, item):
img, label = super().__getitem__(item)
uq_idx = self.uq_idxs[item]
return img, label, uq_idx
def subsample_dataset(dataset, idxs):
imgs_ = []
for i in idxs:
imgs_.append(dataset.imgs[i])
dataset.imgs = imgs_
samples_ = []
for i in idxs:
samples_.append(dataset.samples[i])
dataset.samples = samples_
# dataset.imgs = [x for i, x in enumerate(dataset.imgs) if i in idxs]
# dataset.samples = [x for i, x in enumerate(dataset.samples) if i in idxs]
dataset.targets = np.array(dataset.targets)[idxs].tolist()
dataset.uq_idxs = dataset.uq_idxs[idxs]
return dataset
def subsample_classes(dataset, include_classes=list(range(1000))):
cls_idxs = [x for x, t in enumerate(dataset.targets) if t in include_classes]
target_xform_dict = {}
for i, k in enumerate(include_classes):
target_xform_dict[k] = i
dataset = subsample_dataset(dataset, cls_idxs)
dataset.target_transform = lambda x: target_xform_dict[x]
return dataset
def get_train_val_indices(train_dataset, val_split=0.2):
train_classes = list(set(train_dataset.targets))
# Get train/test indices
train_idxs = []
val_idxs = []
for cls in train_classes:
cls_idxs = np.where(np.array(train_dataset.targets) == cls)[0]
v_ = np.random.choice(cls_idxs, replace=False, size=((int(val_split * len(cls_idxs))),))
t_ = [x for x in cls_idxs if x not in v_]
train_idxs.extend(t_)
val_idxs.extend(v_)
return train_idxs, val_idxs
def get_equal_len_datasets(dataset1, dataset2):
"""
Make two datasets the same length
"""
if len(dataset1) > len(dataset2):
rand_idxs = np.random.choice(range(len(dataset1)), size=(len(dataset2, )))
subsample_dataset(dataset1, rand_idxs)
elif len(dataset2) > len(dataset1):
rand_idxs = np.random.choice(range(len(dataset2)), size=(len(dataset1, )))
subsample_dataset(dataset2, rand_idxs)
return dataset1, dataset2
def get_imagenet_100_datasets(train_transform, test_transform, train_classes=range(80),
prop_train_labels=0.8, split_train_val=False, seed=0):
np.random.seed(seed)
# Subsample imagenet dataset initially to include 100 classes
subsampled_100_classes = np.random.choice(range(1000), size=(100,), replace=False)
subsampled_100_classes = np.sort(subsampled_100_classes)
print(f'Constructing ImageNet-100 dataset from the following classes: {subsampled_100_classes.tolist()}')
cls_map = {i: j for i, j in zip(subsampled_100_classes, range(100))}
# Init entire training set | imagenet_training_set = ImageNetBase(root=os.path.join(imagenet_root, 'train'), transform=train_transform) | 1 | 2023-10-23 18:23:22+00:00 | 2k |
camenduru/MiniGPT-v2-hf | minigpt4/models/base_model.py | [
{
"identifier": "download_cached_file",
"path": "minigpt4/common/dist_utils.py",
"snippet": "def download_cached_file(url, check_hash=True, progress=False):\n \"\"\"\n Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again.\n If distributed, only... | import os
import logging
import contextlib
import numpy as np
import torch
import torch.nn as nn
from omegaconf import OmegaConf
from transformers import BertTokenizer, LlamaTokenizer
from transformers.models.llama.modeling_llama import LlamaForCausalLM
from peft import (
LoraConfig,
get_peft_model,
prepare_model_for_int8_training,
)
from minigpt4.common.dist_utils import download_cached_file, is_dist_avail_and_initialized
from minigpt4.common.utils import get_abs_path, is_url
from minigpt4.models.eva_vit import create_eva_vit_g | 1,202 | """
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
class BaseModel(nn.Module):
"""Base class for models."""
def __init__(self):
super().__init__()
@property
def device(self):
return list(self.parameters())[-1].device
def load_checkpoint(self, url_or_filename):
"""
Load from a finetuned checkpoint.
This should expect no mismatch in the model keys and the checkpoint keys.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location="cpu")
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location="cpu")
else:
raise RuntimeError("checkpoint url or path is invalid")
if "model" in checkpoint.keys():
state_dict = checkpoint["model"]
else:
state_dict = checkpoint
msg = self.load_state_dict(state_dict, strict=False)
logging.info("Missing keys {}".format(msg.missing_keys))
logging.info("load checkpoint from %s" % url_or_filename)
return msg
@classmethod
def from_pretrained(cls, model_type):
"""
Build a pretrained model from default configuration file, specified by model_type.
Args:
- model_type (str): model type, specifying architecture and checkpoints.
Returns:
- model (nn.Module): pretrained or finetuned model, depending on the configuration.
"""
model_cfg = OmegaConf.load(cls.default_config_path(model_type)).model
model = cls.from_config(model_cfg)
return model
@classmethod
def default_config_path(cls, model_type):
assert (
model_type in cls.PRETRAINED_MODEL_CONFIG_DICT
), "Unknown model type {}".format(model_type)
| """
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
class BaseModel(nn.Module):
"""Base class for models."""
def __init__(self):
super().__init__()
@property
def device(self):
return list(self.parameters())[-1].device
def load_checkpoint(self, url_or_filename):
"""
Load from a finetuned checkpoint.
This should expect no mismatch in the model keys and the checkpoint keys.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location="cpu")
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location="cpu")
else:
raise RuntimeError("checkpoint url or path is invalid")
if "model" in checkpoint.keys():
state_dict = checkpoint["model"]
else:
state_dict = checkpoint
msg = self.load_state_dict(state_dict, strict=False)
logging.info("Missing keys {}".format(msg.missing_keys))
logging.info("load checkpoint from %s" % url_or_filename)
return msg
@classmethod
def from_pretrained(cls, model_type):
"""
Build a pretrained model from default configuration file, specified by model_type.
Args:
- model_type (str): model type, specifying architecture and checkpoints.
Returns:
- model (nn.Module): pretrained or finetuned model, depending on the configuration.
"""
model_cfg = OmegaConf.load(cls.default_config_path(model_type)).model
model = cls.from_config(model_cfg)
return model
@classmethod
def default_config_path(cls, model_type):
assert (
model_type in cls.PRETRAINED_MODEL_CONFIG_DICT
), "Unknown model type {}".format(model_type) | return get_abs_path(cls.PRETRAINED_MODEL_CONFIG_DICT[model_type]) | 2 | 2023-10-15 19:54:22+00:00 | 2k |
deepghs/sdeval | sdeval/corrupt/aicorrupt.py | [
{
"identifier": "load_images",
"path": "sdeval/utils/images.py",
"snippet": "def _yield_images(images: ImagesTyping) -> Iterator[Image.Image]:\ndef load_images(images: ImagesTyping) -> List[Image.Image]:"
},
{
"identifier": "tqdm",
"path": "sdeval/utils/tqdm_.py",
"snippet": "def tqdm(*a... | import json
import numpy as np
from functools import lru_cache
from typing import Tuple, Optional, Mapping
from PIL import Image
from huggingface_hub import hf_hub_download
from imgutils.data import rgb_encode, ImageTyping, load_image
from imgutils.utils import open_onnx_model
from ..utils import ImagesTyping, load_images, tqdm | 1,561 |
@lru_cache()
def _open_anime_aicop_meta(model_name: str):
"""
Open the meta information of the AI image corrupted detection model.
This function downloads and opens the meta information of the AI image corrupted detection model specified by the given model name using Hugging Face Hub.
:param model_name: The name of the AI image corrupted detection model.
:type model_name: str
:return: The opened meta information of the AI image corrupted detection model.
:rtype: dict
"""
with open(hf_hub_download(
f'deepghs/ai_image_corrupted',
f'{model_name}/meta.json',
), 'r', encoding='utf-8') as f:
return json.load(f)
@lru_cache()
def _open_anime_aicop_labels(model_name: str):
"""
Open the labels of the AI image corrupted detection model.
This function opens the labels of the AI image corrupted detection model specified by the given model name.
:param model_name: The name of the AI image corrupted detection model.
:type model_name: str
:return: The labels of the AI image corrupted detection model.
:rtype: List[str]
"""
return _open_anime_aicop_meta(model_name)['labels']
def _img_encode(image: Image.Image, size: Tuple[int, int] = (384, 384),
normalize: Optional[Tuple[float, float]] = (0.5, 0.5)):
"""
Encode the image for AI image corrupted detection.
This function resizes and encodes the image for AI image corrupted detection.
:param image: The input image.
:type image: Image.Image
:param size: The target size for encoding. Default is (384, 384).
:type size: Tuple[int, int]
:param normalize: The normalization parameters. Default is (0.5, 0.5).
:type normalize: Optional[Tuple[float, float]]
:return: The encoded image data.
:rtype: np.ndarray
"""
image = image.resize(size, Image.BILINEAR)
data = rgb_encode(image, order_='CHW')
if normalize is not None:
mean_, std_ = normalize
mean = np.asarray([mean_]).reshape((-1, 1, 1))
std = np.asarray([std_]).reshape((-1, 1, 1))
data = (data - mean) / std
return data.astype(np.float32)
def get_ai_corrupted(image: ImageTyping, model_name: str = _DEFAULT_MODEL_NAME) -> Mapping[str, float]:
"""
Get AI image corrupted detection scores for an image.
This function calculates AI image corrupted detection scores for a given image using the specified model.
:param image: The input image.
:type image: ImageTyping
:param model_name: The name of the AI image corrupted detection model. Default is 'caformer_s36_v0_focal'.
:type model_name: str
:return: A dictionary containing the corrupted score.
:rtype: Mapping[str, float]
"""
image = load_image(image, force_background='white', mode='RGB')
input_ = _img_encode(image)[None, ...]
output, = _open_anime_aicop_model(model_name).run(['output'], {'input': input_})
return dict(zip(_open_anime_aicop_labels(model_name), output[0].tolist()))
class AICorruptMetrics:
"""
Class for calculating an AI image corruptness score.
The `AICorruptMetrics` class allows you to calculate an AI image corruptness score using the AI image corrupted detection model.
:param model_name: The name of the AI image corrupted detection model. Default is 'caformer_s36_v0_focal'.
:type model_name: str
:param silent: If True, suppresses progress bars and additional output during calculation.
:type silent: bool
:param tqdm_desc: Description for the tqdm progress bar during calculation.
:type tqdm_desc: str
"""
def __init__(self, model_name: str = _DEFAULT_MODEL_NAME,
silent: bool = False, tqdm_desc: str = None):
self._model_name = model_name
self.silent = silent
self.tqdm_desc = tqdm_desc or self.__class__.__name__
def score(self, images: ImagesTyping, silent: bool = None):
"""
Calculate the AI image corruptness score for a set of images.
This method calculates the AI image corruptness score for a set of input images using the AI image corrupted detection model.
:param images: The set of input images for calculating the AI image corruptness score.
:type images: ImagesTyping
:param silent: If True, suppresses progress bars and additional output during calculation.
:type silent: bool
:return: The AI image corruptness score.
:rtype: float
"""
| """
Overview:
AI image corrupt evaluation metrics.
"""
_DEFAULT_MODEL_NAME = 'caformer_s36_v0_focal'
@lru_cache()
def _open_anime_aicop_model(model_name: str):
"""
Open the AI image corrupted detection model.
This function downloads and opens the AI image corrupted detection model specified by the given model name using Hugging Face Hub.
:param model_name: The name of the AI image corrupted detection model.
:type model_name: str
:return: The opened AI image corrupted detection model.
:rtype: Model
"""
return open_onnx_model(hf_hub_download(
f'deepghs/ai_image_corrupted',
f'{model_name}/model.onnx',
))
@lru_cache()
def _open_anime_aicop_meta(model_name: str):
"""
Open the meta information of the AI image corrupted detection model.
This function downloads and opens the meta information of the AI image corrupted detection model specified by the given model name using Hugging Face Hub.
:param model_name: The name of the AI image corrupted detection model.
:type model_name: str
:return: The opened meta information of the AI image corrupted detection model.
:rtype: dict
"""
with open(hf_hub_download(
f'deepghs/ai_image_corrupted',
f'{model_name}/meta.json',
), 'r', encoding='utf-8') as f:
return json.load(f)
@lru_cache()
def _open_anime_aicop_labels(model_name: str):
"""
Open the labels of the AI image corrupted detection model.
This function opens the labels of the AI image corrupted detection model specified by the given model name.
:param model_name: The name of the AI image corrupted detection model.
:type model_name: str
:return: The labels of the AI image corrupted detection model.
:rtype: List[str]
"""
return _open_anime_aicop_meta(model_name)['labels']
def _img_encode(image: Image.Image, size: Tuple[int, int] = (384, 384),
normalize: Optional[Tuple[float, float]] = (0.5, 0.5)):
"""
Encode the image for AI image corrupted detection.
This function resizes and encodes the image for AI image corrupted detection.
:param image: The input image.
:type image: Image.Image
:param size: The target size for encoding. Default is (384, 384).
:type size: Tuple[int, int]
:param normalize: The normalization parameters. Default is (0.5, 0.5).
:type normalize: Optional[Tuple[float, float]]
:return: The encoded image data.
:rtype: np.ndarray
"""
image = image.resize(size, Image.BILINEAR)
data = rgb_encode(image, order_='CHW')
if normalize is not None:
mean_, std_ = normalize
mean = np.asarray([mean_]).reshape((-1, 1, 1))
std = np.asarray([std_]).reshape((-1, 1, 1))
data = (data - mean) / std
return data.astype(np.float32)
def get_ai_corrupted(image: ImageTyping, model_name: str = _DEFAULT_MODEL_NAME) -> Mapping[str, float]:
"""
Get AI image corrupted detection scores for an image.
This function calculates AI image corrupted detection scores for a given image using the specified model.
:param image: The input image.
:type image: ImageTyping
:param model_name: The name of the AI image corrupted detection model. Default is 'caformer_s36_v0_focal'.
:type model_name: str
:return: A dictionary containing the corrupted score.
:rtype: Mapping[str, float]
"""
image = load_image(image, force_background='white', mode='RGB')
input_ = _img_encode(image)[None, ...]
output, = _open_anime_aicop_model(model_name).run(['output'], {'input': input_})
return dict(zip(_open_anime_aicop_labels(model_name), output[0].tolist()))
class AICorruptMetrics:
"""
Class for calculating an AI image corruptness score.
The `AICorruptMetrics` class allows you to calculate an AI image corruptness score using the AI image corrupted detection model.
:param model_name: The name of the AI image corrupted detection model. Default is 'caformer_s36_v0_focal'.
:type model_name: str
:param silent: If True, suppresses progress bars and additional output during calculation.
:type silent: bool
:param tqdm_desc: Description for the tqdm progress bar during calculation.
:type tqdm_desc: str
"""
def __init__(self, model_name: str = _DEFAULT_MODEL_NAME,
silent: bool = False, tqdm_desc: str = None):
self._model_name = model_name
self.silent = silent
self.tqdm_desc = tqdm_desc or self.__class__.__name__
def score(self, images: ImagesTyping, silent: bool = None):
"""
Calculate the AI image corruptness score for a set of images.
This method calculates the AI image corruptness score for a set of input images using the AI image corrupted detection model.
:param images: The set of input images for calculating the AI image corruptness score.
:type images: ImagesTyping
:param silent: If True, suppresses progress bars and additional output during calculation.
:type silent: bool
:return: The AI image corruptness score.
:rtype: float
""" | image_list = load_images(images) | 0 | 2023-10-18 03:35:52+00:00 | 2k |
WHUlwb/Assisted_learning | hrnet/hrnet.py | [
{
"identifier": "BN_MOMENTUM",
"path": "hrnet/backbone.py",
"snippet": "BN_MOMENTUM = 0.1\r"
},
{
"identifier": "hrnet_classification",
"path": "hrnet/backbone.py",
"snippet": "def hrnet_classification(backbone='hrnetv2_w18'):\r\n model = HighResolutionNet_Classification(num_classes=1... | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .backbone import BN_MOMENTUM, hrnet_classification
| 677 |
class HRnet_Backbone(nn.Module):
def __init__(self, in_channel, backbone = 'hrnetv2_w18'):
super(HRnet_Backbone, self).__init__()
self.model = hrnet_classification(backbone = backbone)
del self.model.incre_modules
del self.model.downsamp_modules
del self.model.final_layer
del self.model.classifier
self.conv1 = nn.Conv2d(in_channel, 64, kernel_size=3, stride=2, padding=1, bias=False)
def forward(self, x):
# x = self.model.conv1(x) # 原来的
x = self.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.conv2(x)
x = self.model.bn2(x)
x = self.model.relu(x)
x = self.model.layer1(x)
x_list = []
for i in range(2):
if self.model.transition1[i] is not None:
x_list.append(self.model.transition1[i](x))
else:
x_list.append(x)
y_list = self.model.stage2(x_list)
x_list = []
for i in range(3):
if self.model.transition2[i] is not None:
if i < 2:
x_list.append(self.model.transition2[i](y_list[i]))
else:
x_list.append(self.model.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.model.stage3(x_list)
x_list = []
for i in range(4):
if self.model.transition3[i] is not None:
if i < 3:
x_list.append(self.model.transition3[i](y_list[i]))
else:
x_list.append(self.model.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.model.stage4(x_list)
return y_list
class HRnet(nn.Module):
def __init__(self, in_channel, num_classes = 21, backbone = 'hrnetv2_w18', pretrained = True):
super(HRnet, self).__init__()
self.backbone = HRnet_Backbone(in_channel, backbone = backbone, pretrained = pretrained)
last_inp_channels = np.int(np.sum(self.backbone.model.pre_stage_channels))
self.last_layer = nn.Sequential(
nn.Conv2d(in_channels=last_inp_channels, out_channels=last_inp_channels, kernel_size=1, stride=1, padding=0),
|
class HRnet_Backbone(nn.Module):
def __init__(self, in_channel, backbone = 'hrnetv2_w18'):
super(HRnet_Backbone, self).__init__()
self.model = hrnet_classification(backbone = backbone)
del self.model.incre_modules
del self.model.downsamp_modules
del self.model.final_layer
del self.model.classifier
self.conv1 = nn.Conv2d(in_channel, 64, kernel_size=3, stride=2, padding=1, bias=False)
def forward(self, x):
# x = self.model.conv1(x) # 原来的
x = self.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.conv2(x)
x = self.model.bn2(x)
x = self.model.relu(x)
x = self.model.layer1(x)
x_list = []
for i in range(2):
if self.model.transition1[i] is not None:
x_list.append(self.model.transition1[i](x))
else:
x_list.append(x)
y_list = self.model.stage2(x_list)
x_list = []
for i in range(3):
if self.model.transition2[i] is not None:
if i < 2:
x_list.append(self.model.transition2[i](y_list[i]))
else:
x_list.append(self.model.transition2[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.model.stage3(x_list)
x_list = []
for i in range(4):
if self.model.transition3[i] is not None:
if i < 3:
x_list.append(self.model.transition3[i](y_list[i]))
else:
x_list.append(self.model.transition3[i](y_list[-1]))
else:
x_list.append(y_list[i])
y_list = self.model.stage4(x_list)
return y_list
class HRnet(nn.Module):
def __init__(self, in_channel, num_classes = 21, backbone = 'hrnetv2_w18', pretrained = True):
super(HRnet, self).__init__()
self.backbone = HRnet_Backbone(in_channel, backbone = backbone, pretrained = pretrained)
last_inp_channels = np.int(np.sum(self.backbone.model.pre_stage_channels))
self.last_layer = nn.Sequential(
nn.Conv2d(in_channels=last_inp_channels, out_channels=last_inp_channels, kernel_size=1, stride=1, padding=0),
| nn.BatchNorm2d(last_inp_channels, momentum=BN_MOMENTUM),
| 0 | 2023-10-17 06:19:02+00:00 | 2k |
dagedarr/telegram-budget | handlers/change_info_handler.py | [
{
"identifier": "get_by_id",
"path": "core/crud.py",
"snippet": "async def get_by_id(\n model: ModelType,\n obj_id: int,\n session: AsyncSession\n) -> ModelType:\n \"\"\"\n Получение объекта по ID.\n\n Parameters:\n - model (ModelType): Тип модели SQLAlchemy.\n - obj_id (int): Ид... | from aiogram import F, Router
from aiogram.fsm.context import FSMContext
from aiogram.types import CallbackQuery, Message
from sqlalchemy.ext.asyncio import AsyncSession
from core.crud import get_by_id, update
from filters import IsEndOnboardingFilter
from forms import RegistrationForm
from keyboards import set_info_keyboard, universal_keyboard
from models import User
from utils.user_actions import callback_message | 1,474 |
router = Router(name='change_info_router')
@router.callback_query(F.data == 'change_info')
async def change_info(callback: CallbackQuery):
"""Выводит Категории и Статистику и осльной функционал."""
await callback_message(
target=callback,
text='Изменить данные о себе',
|
router = Router(name='change_info_router')
@router.callback_query(F.data == 'change_info')
async def change_info(callback: CallbackQuery):
"""Выводит Категории и Статистику и осльной функционал."""
await callback_message(
target=callback,
text='Изменить данные о себе', | reply_markup=set_info_keyboard(), | 4 | 2023-10-23 17:30:24+00:00 | 2k |
nchen909/Pass-Tuning | evaluator/CodeBLEU/parser/DFG.py | [
{
"identifier": "remove_comments_and_docstrings",
"path": "evaluator/CodeBLEU/parser/utils.py",
"snippet": "def remove_comments_and_docstrings(source, lang):\n if lang in ['python']:\n \"\"\"\n Returns 'source' minus comments and docstrings.\n \"\"\"\n io_obj = StringIO(so... | from tree_sitter import Language, Parser
from .utils import (remove_comments_and_docstrings,
tree_to_token_index,
index_to_code_token,
tree_to_variable_index) | 1,245 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
def DFG_python(root_node,index_to_code,states):
assignment=['assignment','augmented_assignment','for_in_clause']
if_statement=['if_statement']
for_statement=['for_statement']
while_statement=['while_statement']
do_first_statement=['for_in_clause']
def_statement=['default_parameter']
states=states.copy()
if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':
idx,code=index_to_code[(root_node.start_point,root_node.end_point)]
if root_node.type==code:
return [],states
elif code in states:
return [(code,idx,'comesFrom',[code],states[code].copy())],states
else:
if root_node.type=='identifier':
states[code]=[idx]
return [(code,idx,'comesFrom',[],[])],states
elif root_node.type in def_statement:
name=root_node.child_by_field_name('name')
value=root_node.child_by_field_name('value')
DFG=[]
if value is None:
| # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
def DFG_python(root_node,index_to_code,states):
assignment=['assignment','augmented_assignment','for_in_clause']
if_statement=['if_statement']
for_statement=['for_statement']
while_statement=['while_statement']
do_first_statement=['for_in_clause']
def_statement=['default_parameter']
states=states.copy()
if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':
idx,code=index_to_code[(root_node.start_point,root_node.end_point)]
if root_node.type==code:
return [],states
elif code in states:
return [(code,idx,'comesFrom',[code],states[code].copy())],states
else:
if root_node.type=='identifier':
states[code]=[idx]
return [(code,idx,'comesFrom',[],[])],states
elif root_node.type in def_statement:
name=root_node.child_by_field_name('name')
value=root_node.child_by_field_name('value')
DFG=[]
if value is None: | indexs=tree_to_variable_index(name,index_to_code) | 3 | 2023-10-20 09:24:44+00:00 | 2k |
kavisha725/MBNSF | trajectory_estimation/mbnt.py | [
{
"identifier": "extract_clusters_dbscan",
"path": "utils/o3d_uitls.py",
"snippet": "def extract_clusters_dbscan(cloud, eps = 0.9, min_points=10, return_clusters= False, return_colored_pcd=False):\n pcl = copy.deepcopy(cloud)\n pcl = make_open3d_point_cloud(pcl)\n labels = np.array(\n ... | import os, glob
import argparse
import logging
import csv
import numpy as np
import torch
import sys
import pytorch3d.loss as p3dloss
from utils.general_utils import *
from utils.ntp_utils import *
from utils.o3d_uitls import extract_clusters_dbscan
from utils.sc_utils import spatial_consistency_loss | 1,338 | # Long-term trajectory estimation with MBNT.
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
logger = logging.getLogger(__name__)
def total_sc_loss(labels_t, label_ids, pc, pc_defored, d_thresh=0.03, max_points=3000):
loss_sc = None
for id in label_ids:
cluster = pc[labels_t == id]
cluster_deformed = pc_defored[labels_t == id]
assert cluster.shape == cluster_deformed.shape
cluster_cs_loss = spatial_consistency_loss(cluster.unsqueeze(0), cluster_deformed.unsqueeze(0), d_thre=d_thresh, max_points=max_points)
if not loss_sc:
loss_sc = cluster_cs_loss
else: loss_sc += cluster_cs_loss
loss_sc /= len(label_ids)
return loss_sc.squeeze()
def fit_trajectory_field(
exp_dir,
pc_list,
options,
flow_gt_list = None,
traj_gt = None,
traj_val_mask = None
):
csv_file = open(f"{exp_dir}/metrics.csv", 'w')
metric_labels = ['train_loss', 'train_chamfer_loss', 'train_sc_loss', 'train_consist_loss', 'traj_consist', 'epe', 'acc_strict', 'acc_relax', 'angle_error', 'outlier']
csv_writer = csv.DictWriter(csv_file, ['itr'] + metric_labels + ['traj_metric'])
csv_writer.writeheader()
n_lidar_sweeps = len(pc_list)
if traj_gt is not None and traj_val_mask is not None:
traj_gt = torch.from_numpy(traj_gt).cuda()
traj_val_mask = torch.from_numpy(traj_val_mask).cuda()
# ANCHOR: Initialize the trajectory field
net = NeuralTrajField(traj_len=n_lidar_sweeps,
filter_size=options.hidden_units,
act_fn=options.act_fn, traj_type=options.traj_type, st_embed_type=options.st_embed_type)
net.to(options.device)
optimizer = torch.optim.Adam(net.parameters(), lr=options.lr, weight_decay=options.weight_decay)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=500, gamma=0.5)
# scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[200,400,600,800], gamma=0.5)
# Pre-compute clusters:
labels_database, label_ids_database = [], []
for fid in range(n_lidar_sweeps):
| # Long-term trajectory estimation with MBNT.
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
logger = logging.getLogger(__name__)
def total_sc_loss(labels_t, label_ids, pc, pc_defored, d_thresh=0.03, max_points=3000):
loss_sc = None
for id in label_ids:
cluster = pc[labels_t == id]
cluster_deformed = pc_defored[labels_t == id]
assert cluster.shape == cluster_deformed.shape
cluster_cs_loss = spatial_consistency_loss(cluster.unsqueeze(0), cluster_deformed.unsqueeze(0), d_thre=d_thresh, max_points=max_points)
if not loss_sc:
loss_sc = cluster_cs_loss
else: loss_sc += cluster_cs_loss
loss_sc /= len(label_ids)
return loss_sc.squeeze()
def fit_trajectory_field(
exp_dir,
pc_list,
options,
flow_gt_list = None,
traj_gt = None,
traj_val_mask = None
):
csv_file = open(f"{exp_dir}/metrics.csv", 'w')
metric_labels = ['train_loss', 'train_chamfer_loss', 'train_sc_loss', 'train_consist_loss', 'traj_consist', 'epe', 'acc_strict', 'acc_relax', 'angle_error', 'outlier']
csv_writer = csv.DictWriter(csv_file, ['itr'] + metric_labels + ['traj_metric'])
csv_writer.writeheader()
n_lidar_sweeps = len(pc_list)
if traj_gt is not None and traj_val_mask is not None:
traj_gt = torch.from_numpy(traj_gt).cuda()
traj_val_mask = torch.from_numpy(traj_val_mask).cuda()
# ANCHOR: Initialize the trajectory field
net = NeuralTrajField(traj_len=n_lidar_sweeps,
filter_size=options.hidden_units,
act_fn=options.act_fn, traj_type=options.traj_type, st_embed_type=options.st_embed_type)
net.to(options.device)
optimizer = torch.optim.Adam(net.parameters(), lr=options.lr, weight_decay=options.weight_decay)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=500, gamma=0.5)
# scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[200,400,600,800], gamma=0.5)
# Pre-compute clusters:
labels_database, label_ids_database = [], []
for fid in range(n_lidar_sweeps): | labels = extract_clusters_dbscan(pc_list[fid], eps = options.sc_cluster_eps, min_points=options.sc_cluster_min_points, return_clusters= False, return_colored_pcd=False) | 0 | 2023-10-16 07:21:12+00:00 | 2k |
cool-dev-guy/tkmoderngl | main.py | [
{
"identifier": "FramebufferImage",
"path": "tkmoderngl/framebuffer.py",
"snippet": "class FramebufferImage(ImageTk.PhotoImage):\n def __init__(self, master, ctx, size):\n super(FramebufferImage, self).__init__(Image.new('RGB', size, (0, 0, 0)))\n self.ctx = ctx\n self.fbo = self... | import tkinter as tk
import moderngl
import numpy as np
from tkmoderngl.framebuffer import FramebufferImage
from tkmoderngl.renderer import Canvas, PanTool | 1,014 | """
code from moderngl/examples
modified by : cool-dev-guy
"""
# the moderngl widget
class GlWidget(tk.Label):
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
self.parent = args[0]
self._ctx = moderngl.create_standalone_context()
self._tkfbo = FramebufferImage(args[0],self._ctx,(500,500))
| """
code from moderngl/examples
modified by : cool-dev-guy
"""
# the moderngl widget
class GlWidget(tk.Label):
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
self.parent = args[0]
self._ctx = moderngl.create_standalone_context()
self._tkfbo = FramebufferImage(args[0],self._ctx,(500,500)) | self._canvas = Canvas(self._ctx) | 1 | 2023-10-15 07:58:13+00:00 | 2k |
G3VV/Yank | index.py | [
{
"identifier": "start_token_thread",
"path": "util/spotify.py",
"snippet": "def start_token_thread():\n \n client_id = spotify_id\n client_secret = spotify_secret\n \n get_access_token(client_id, client_secret)"
},
{
"identifier": "start",
"path": "util/download.py",
"sni... | from quart import Quart, send_file
from util.spotify import start_token_thread
from util.download import start, start_playlist
from dotenv import load_dotenv
import threading
import re
import os
import json | 850 |
app = Quart(__name__)
load_dotenv()
port = os.environ.get("port")
@app.route('/track/<string:id>')
async def serve_audio(id):
filename = await start(id)
return await send_file(filename, mimetype='audio/mpeg')
@app.route('/')
async def serve_index():
return "online"
@app.route('/playlist/<string:id>')
async def serve_playlist(id):
|
app = Quart(__name__)
load_dotenv()
port = os.environ.get("port")
@app.route('/track/<string:id>')
async def serve_audio(id):
filename = await start(id)
return await send_file(filename, mimetype='audio/mpeg')
@app.route('/')
async def serve_index():
return "online"
@app.route('/playlist/<string:id>')
async def serve_playlist(id): | filename = await start_playlist(id) | 2 | 2023-10-15 04:35:56+00:00 | 2k |
openfoodfacts/open-prices | app/models.py | [
{
"identifier": "Base",
"path": "app/db.py",
"snippet": ""
},
{
"identifier": "CurrencyEnum",
"path": "app/enums.py",
"snippet": "CURRENCIES = [(currency, currency) for currency in list_currencies()]\n NODE = \"NODE\"\n WAY = \"WAY\"\n RELATION = \"RELATION\"\n PRICE_TAG = \"... | from openfoodfacts import Flavor
from sqlalchemy import (
JSON,
BigInteger,
Boolean,
Column,
Date,
DateTime,
ForeignKey,
Integer,
Numeric,
String,
)
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import Mapped, mapped_column, relationship
from sqlalchemy.sql import func
from sqlalchemy_utils import force_auto_coercion
from sqlalchemy_utils.types.choice import ChoiceType
from app.db import Base
from app.enums import CurrencyEnum, LocationOSMEnum, PricePerEnum, ProofTypeEnum | 734 |
force_auto_coercion()
JSONVariant = JSON().with_variant(JSONB(), "postgresql")
class User(Base):
user_id = Column(String, primary_key=True, index=True)
token = Column(String, unique=True, index=True)
last_used = Column(DateTime(timezone=True))
price_count = Column(Integer, nullable=False, server_default="0", index=True)
created = Column(DateTime(timezone=True), server_default=func.now())
__tablename__ = "users"
class Product(Base):
id = Column(Integer, primary_key=True, index=True)
code = Column(String, unique=True, index=True)
source = Column(ChoiceType(Flavor))
product_name = Column(String)
product_quantity = Column(Integer)
brands = Column(String)
image_url = Column(String)
unique_scans_n = Column(Integer, nullable=False, server_default="0")
prices: Mapped[list["Price"]] = relationship(back_populates="product")
price_count = Column(Integer, nullable=False, server_default="0", index=True)
created = Column(DateTime(timezone=True), server_default=func.now())
updated = Column(DateTime(timezone=True), onupdate=func.now())
__tablename__ = "products"
class Location(Base):
id = Column(Integer, primary_key=True, index=True)
osm_id = Column(BigInteger)
osm_type = Column(ChoiceType(LocationOSMEnum))
osm_name = Column(String)
osm_display_name = Column(String)
osm_address_postcode = Column(String)
osm_address_city = Column(String)
osm_address_country = Column(String)
osm_lat = Column(Numeric(precision=11, scale=7))
osm_lon = Column(Numeric(precision=11, scale=7))
prices: Mapped[list["Price"]] = relationship(back_populates="location")
price_count = Column(Integer, nullable=False, server_default="0", index=True)
created = Column(DateTime(timezone=True), server_default=func.now())
updated = Column(DateTime(timezone=True), onupdate=func.now())
__tablename__ = "locations"
class Proof(Base):
id = Column(Integer, primary_key=True, index=True)
file_path = Column(String, nullable=False)
mimetype = Column(String, index=True)
|
force_auto_coercion()
JSONVariant = JSON().with_variant(JSONB(), "postgresql")
class User(Base):
user_id = Column(String, primary_key=True, index=True)
token = Column(String, unique=True, index=True)
last_used = Column(DateTime(timezone=True))
price_count = Column(Integer, nullable=False, server_default="0", index=True)
created = Column(DateTime(timezone=True), server_default=func.now())
__tablename__ = "users"
class Product(Base):
id = Column(Integer, primary_key=True, index=True)
code = Column(String, unique=True, index=True)
source = Column(ChoiceType(Flavor))
product_name = Column(String)
product_quantity = Column(Integer)
brands = Column(String)
image_url = Column(String)
unique_scans_n = Column(Integer, nullable=False, server_default="0")
prices: Mapped[list["Price"]] = relationship(back_populates="product")
price_count = Column(Integer, nullable=False, server_default="0", index=True)
created = Column(DateTime(timezone=True), server_default=func.now())
updated = Column(DateTime(timezone=True), onupdate=func.now())
__tablename__ = "products"
class Location(Base):
id = Column(Integer, primary_key=True, index=True)
osm_id = Column(BigInteger)
osm_type = Column(ChoiceType(LocationOSMEnum))
osm_name = Column(String)
osm_display_name = Column(String)
osm_address_postcode = Column(String)
osm_address_city = Column(String)
osm_address_country = Column(String)
osm_lat = Column(Numeric(precision=11, scale=7))
osm_lon = Column(Numeric(precision=11, scale=7))
prices: Mapped[list["Price"]] = relationship(back_populates="location")
price_count = Column(Integer, nullable=False, server_default="0", index=True)
created = Column(DateTime(timezone=True), server_default=func.now())
updated = Column(DateTime(timezone=True), onupdate=func.now())
__tablename__ = "locations"
class Proof(Base):
id = Column(Integer, primary_key=True, index=True)
file_path = Column(String, nullable=False)
mimetype = Column(String, index=True)
| type = Column(ChoiceType(ProofTypeEnum)) | 1 | 2023-10-21 14:02:15+00:00 | 2k |
krasnoukhov/homeassistant-smart-maic | custom_components/smart_maic/config_flow.py | [
{
"identifier": "DEVICE_NAME",
"path": "custom_components/smart_maic/const.py",
"snippet": "DEVICE_NAME = \"device_name\""
},
{
"identifier": "DEVICE_ID",
"path": "custom_components/smart_maic/const.py",
"snippet": "DEVICE_ID = \"devid\""
},
{
"identifier": "DEVICE_TYPE",
"pa... | import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from typing import Any
from homeassistant import config_entries
from homeassistant.components import mqtt
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import AbortFlow
from .const import (
DEVICE_NAME,
DEVICE_ID,
DEVICE_TYPE,
DOMAIN,
IP_ADDRESS,
PIN,
)
from .smart_maic import SmartMaic
from .coordinator import SmartMaicCoordinator | 1,530 | """Config flow for Smart MAIC integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
USER_SCHEMA = vol.Schema(
{
vol.Required(IP_ADDRESS): cv.string,
vol.Required(PIN): cv.string,
vol.Required(DEVICE_NAME, default="Energy"): cv.string,
}
)
async def validate_input(hass: HomeAssistant, data: dict) -> dict[str, Any]:
"""Validate the user input allows us to connect.
Data has the keys from USER_SCHEMA with values provided by the user.
"""
if not await mqtt.async_wait_for_mqtt_client(hass):
raise AbortFlow("mqtt_unavailable")
smart_maic = SmartMaic(data)
| """Config flow for Smart MAIC integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
USER_SCHEMA = vol.Schema(
{
vol.Required(IP_ADDRESS): cv.string,
vol.Required(PIN): cv.string,
vol.Required(DEVICE_NAME, default="Energy"): cv.string,
}
)
async def validate_input(hass: HomeAssistant, data: dict) -> dict[str, Any]:
"""Validate the user input allows us to connect.
Data has the keys from USER_SCHEMA with values provided by the user.
"""
if not await mqtt.async_wait_for_mqtt_client(hass):
raise AbortFlow("mqtt_unavailable")
smart_maic = SmartMaic(data) | coordinator = SmartMaicCoordinator(smart_maic, hass) | 7 | 2023-10-16 17:24:45+00:00 | 2k |
JoaoPedro9674/django-ledger | django_ledger/contrib/django_ledger_graphene/api.py | [
{
"identifier": "ChartOfAccountsModelType",
"path": "django_ledger/contrib/django_ledger_graphene/coa/schema.py",
"snippet": "class ChartOfAccountsModelType(DjangoObjectType):\n class Meta:\n model = ChartOfAccountModel\n fields = [\n 'uuid',\n 'slug',\n ... | import graphene
from django_ledger.contrib.django_ledger_graphene.coa.schema import ChartOfAccountsModelType
from django_ledger.contrib.django_ledger_graphene.entity.schema import EntityModelQuery, EntityModelType | 945 |
class Query(
EntityModelQuery,
# ChartOfAccountsModelQuery
# CustomerQuery,
# Bill_list_Query,
# Accountlist_Query,
# Bank_account_Query ,
# ChartOfAccountsQuery,
# UnitOfMeasureQuery,
# VendorsQuery,
# EntityUnitQuery,
# LedgerQuery,
# TransactionsQuery,
# JournalEntryQuery,
# PurchaseOrderQuery,
# QueryUser,
):
pass
# class Mutation(
# # CustomerMutations,
# # BankAccountMutations,
# # AuthMutation,
# ):
# pass
schema = graphene.Schema(
types=[
EntityModelType,
|
class Query(
EntityModelQuery,
# ChartOfAccountsModelQuery
# CustomerQuery,
# Bill_list_Query,
# Accountlist_Query,
# Bank_account_Query ,
# ChartOfAccountsQuery,
# UnitOfMeasureQuery,
# VendorsQuery,
# EntityUnitQuery,
# LedgerQuery,
# TransactionsQuery,
# JournalEntryQuery,
# PurchaseOrderQuery,
# QueryUser,
):
pass
# class Mutation(
# # CustomerMutations,
# # BankAccountMutations,
# # AuthMutation,
# ):
# pass
schema = graphene.Schema(
types=[
EntityModelType, | ChartOfAccountsModelType | 0 | 2023-10-20 01:07:20+00:00 | 2k |
HLTCHKUST/InstructAlign | main_nlu_prompt.py | [
{
"identifier": "get_prompt",
"path": "nlu_prompt.py",
"snippet": "def get_prompt(prompt_lang):\n if prompt_lang == 'EN':\n return DATA_TO_EN_PROMPT\n elif prompt_lang == 'EN2':\n return DATA_TO_EN2_PROMPT\n elif prompt_lang == 'EN3':\n return DATA_TO_EN3_PROMPT\n elif p... | import os, sys
import csv
import pandas as pd
import torch
import torch.nn.functional as F
from os.path import exists
from numpy import argmax
from tqdm import tqdm
from sklearn.metrics import f1_score, accuracy_score
from nlu_prompt import get_prompt
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
from nusacrowd import NusantaraConfigHelper
from data_utils import load_xnli_dataset, load_nusa_menulis_dataset, load_nlu_tasks | 1,378 | """nusacrowd zero-shot prompt.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Ru8DyS2ALWfRdkjOPHj-KNjw6Pfa44Nd
"""
#!pip install git+https://github.com/IndoNLP/nusa-crowd.git@release_exp
#!pip install transformers
#!pip install sentencepiece
DEBUG=False
def to_prompt(input, prompt, labels, prompt_lang):
# single label
if 'text' in input:
prompt = prompt.replace('[INPUT]', input['text'])
else:
prompt = prompt.replace('[INPUT_A]', input['text_1'])
prompt = prompt.replace('[INPUT_B]', input['text_2'])
# replace [OPTIONS] to A, B, or C
if "[OPTIONS]" in prompt:
new_labels = [f'{l}' for l in labels]
new_labels[-1] = ("or " if 'EN' in prompt_lang else "atau ") + new_labels[-1]
if len(new_labels) > 2:
prompt = prompt.replace('[OPTIONS]', ', '.join(new_labels))
else:
prompt = prompt.replace('[OPTIONS]', ' '.join(new_labels))
return prompt
@torch.no_grad()
def get_logprobs(model, tokenizer, prompt, label_ids=None, label_attn=None):
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024).to('cuda')
input_ids, output_ids = inputs["input_ids"], inputs["input_ids"][:, 1:]
outputs = model(**inputs, labels=input_ids)
logits = outputs.logits
if model.config.is_encoder_decoder:
logprobs = torch.gather(F.log_softmax(logits, dim=2), 2, label_ids.unsqueeze(2)) * label_attn.unsqueeze(2)
return logprobs.sum() / label_attn.sum()
else:
logprobs = torch.gather(F.log_softmax(logits, dim=2), 2, output_ids.unsqueeze(2))
return logprobs.mean()
def predict_classification(model, tokenizer, prompt, labels):
if model.config.is_encoder_decoder:
labels_encoded = tokenizer(labels, add_special_tokens=False, padding=True, return_tensors='pt')
list_label_ids =labels_encoded['input_ids'].to('cuda')
list_label_attn =labels_encoded['attention_mask'].to('cuda')
probs = [
get_logprobs(model, tokenizer, prompt.replace('[LABELS_CHOICE]', ''), label_ids.view(1,-1), label_attn.view(1,-1))
for (label_ids, label_attn) in zip(list_label_ids, list_label_attn)
]
else:
probs = [get_logprobs(model, tokenizer, prompt.replace('[LABELS_CHOICE]', label)) for label in labels]
return probs
if __name__ == '__main__':
if len(sys.argv) < 3:
raise ValueError('main_nlu_prompt.py <prompt_lang> <model_path_or_name> <optional_output_name>')
prompt_lang = sys.argv[1]
MODEL = sys.argv[2]
output_name = None
if len(sys.argv) == 4:
output_name = sys.argv[3]
os.makedirs('./outputs', exist_ok=True)
# Load Prompt
DATA_TO_PROMPT = get_prompt(prompt_lang)
# Load Dataset
print('Load NLU Datasets...')
| """nusacrowd zero-shot prompt.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Ru8DyS2ALWfRdkjOPHj-KNjw6Pfa44Nd
"""
#!pip install git+https://github.com/IndoNLP/nusa-crowd.git@release_exp
#!pip install transformers
#!pip install sentencepiece
DEBUG=False
def to_prompt(input, prompt, labels, prompt_lang):
# single label
if 'text' in input:
prompt = prompt.replace('[INPUT]', input['text'])
else:
prompt = prompt.replace('[INPUT_A]', input['text_1'])
prompt = prompt.replace('[INPUT_B]', input['text_2'])
# replace [OPTIONS] to A, B, or C
if "[OPTIONS]" in prompt:
new_labels = [f'{l}' for l in labels]
new_labels[-1] = ("or " if 'EN' in prompt_lang else "atau ") + new_labels[-1]
if len(new_labels) > 2:
prompt = prompt.replace('[OPTIONS]', ', '.join(new_labels))
else:
prompt = prompt.replace('[OPTIONS]', ' '.join(new_labels))
return prompt
@torch.no_grad()
def get_logprobs(model, tokenizer, prompt, label_ids=None, label_attn=None):
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024).to('cuda')
input_ids, output_ids = inputs["input_ids"], inputs["input_ids"][:, 1:]
outputs = model(**inputs, labels=input_ids)
logits = outputs.logits
if model.config.is_encoder_decoder:
logprobs = torch.gather(F.log_softmax(logits, dim=2), 2, label_ids.unsqueeze(2)) * label_attn.unsqueeze(2)
return logprobs.sum() / label_attn.sum()
else:
logprobs = torch.gather(F.log_softmax(logits, dim=2), 2, output_ids.unsqueeze(2))
return logprobs.mean()
def predict_classification(model, tokenizer, prompt, labels):
if model.config.is_encoder_decoder:
labels_encoded = tokenizer(labels, add_special_tokens=False, padding=True, return_tensors='pt')
list_label_ids =labels_encoded['input_ids'].to('cuda')
list_label_attn =labels_encoded['attention_mask'].to('cuda')
probs = [
get_logprobs(model, tokenizer, prompt.replace('[LABELS_CHOICE]', ''), label_ids.view(1,-1), label_attn.view(1,-1))
for (label_ids, label_attn) in zip(list_label_ids, list_label_attn)
]
else:
probs = [get_logprobs(model, tokenizer, prompt.replace('[LABELS_CHOICE]', label)) for label in labels]
return probs
if __name__ == '__main__':
if len(sys.argv) < 3:
raise ValueError('main_nlu_prompt.py <prompt_lang> <model_path_or_name> <optional_output_name>')
prompt_lang = sys.argv[1]
MODEL = sys.argv[2]
output_name = None
if len(sys.argv) == 4:
output_name = sys.argv[3]
os.makedirs('./outputs', exist_ok=True)
# Load Prompt
DATA_TO_PROMPT = get_prompt(prompt_lang)
# Load Dataset
print('Load NLU Datasets...') | nlu_datasets = load_nlu_tasks() | 3 | 2023-10-24 07:46:05+00:00 | 2k |
ambient-innovation/django-migration-zero | tests/services/test_deployment.py | [
{
"identifier": "InvalidMigrationTreeError",
"path": "django_migration_zero/exceptions.py",
"snippet": "class InvalidMigrationTreeError(RuntimeError):\n pass"
},
{
"identifier": "MigrationZeroConfigurationManager",
"path": "django_migration_zero/managers.py",
"snippet": "class Migrati... | from logging import Logger
from unittest import mock
from django.test import TestCase
from django.utils import timezone
from freezegun import freeze_time
from django_migration_zero.exceptions import InvalidMigrationTreeError
from django_migration_zero.managers import MigrationZeroConfigurationManager
from django_migration_zero.models import MigrationZeroConfiguration
from django_migration_zero.services.deployment import DatabasePreparationService | 1,158 |
@freeze_time("2023-06-26")
class DatabasePreparationServiceTest(TestCase):
config: MigrationZeroConfiguration
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.service = DatabasePreparationService()
cls.config, _ = MigrationZeroConfiguration.objects.get_or_create()
def test_init_logger_set(self):
self.assertIsInstance(self.service.logger, Logger)
def test_process_regular(self):
# Setup
self.config.migration_imminent = True
self.config.migration_date = timezone.now().date()
self.config.save()
# Assertions
self.assertIsNone(self.service.process())
self.config.refresh_from_db()
self.assertFalse(self.config.migration_imminent)
@mock.patch.object(MigrationZeroConfiguration, "is_migration_applicable", return_value=False)
|
@freeze_time("2023-06-26")
class DatabasePreparationServiceTest(TestCase):
config: MigrationZeroConfiguration
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.service = DatabasePreparationService()
cls.config, _ = MigrationZeroConfiguration.objects.get_or_create()
def test_init_logger_set(self):
self.assertIsInstance(self.service.logger, Logger)
def test_process_regular(self):
# Setup
self.config.migration_imminent = True
self.config.migration_date = timezone.now().date()
self.config.save()
# Assertions
self.assertIsNone(self.service.process())
self.config.refresh_from_db()
self.assertFalse(self.config.migration_imminent)
@mock.patch.object(MigrationZeroConfiguration, "is_migration_applicable", return_value=False) | @mock.patch.object(MigrationZeroConfigurationManager, "fetch_singleton", return_value=None) | 1 | 2023-10-18 12:51:36+00:00 | 2k |
Lucchetto/model_converter | src/api.py | [
{
"identifier": "setup_pub_key",
"path": "src/licensing.py",
"snippet": "def setup_pub_key() -> (rsa.RSAPublicKey | None):\n str = os.environ.get('LICENSING_PUB_KEY')\n if str:\n logging.info(\"LICENSING_PUB_KEY defined, Play Store licensing validation will be performed\")\n key = se... | from enum import Enum
from flask import Flask, Response, jsonify, request, send_file
from src.licensing import setup_pub_key, validate_license
from .converter import UnsupportedModelArch, convert_pth_to_onnx
import logging
import os
import uuid | 972 |
class ApiErrorReason(Enum):
UNSUPPORTED_ARCH = "UNSUPPORTED_ARCH"
INVALID_LICENSE = 'INVALID_LICENSE'
UNSUPPORTED_FORMAT = 'UNSUPPORTED_FORMAT'
UNKNOWN = 'UNKNOWN'
def api_error(reason: ApiErrorReason):
if reason == ApiErrorReason.INVALID_LICENSE:
status_code = 401
else:
status_code = 400
return jsonify({"reason": reason.value}), status_code
def create_app():
logging.basicConfig(level=logging.NOTSET)
app = Flask(__name__)
# Ensure the directory exists
os.makedirs("tmp", exist_ok=True)
|
class ApiErrorReason(Enum):
UNSUPPORTED_ARCH = "UNSUPPORTED_ARCH"
INVALID_LICENSE = 'INVALID_LICENSE'
UNSUPPORTED_FORMAT = 'UNSUPPORTED_FORMAT'
UNKNOWN = 'UNKNOWN'
def api_error(reason: ApiErrorReason):
if reason == ApiErrorReason.INVALID_LICENSE:
status_code = 401
else:
status_code = 400
return jsonify({"reason": reason.value}), status_code
def create_app():
logging.basicConfig(level=logging.NOTSET)
app = Flask(__name__)
# Ensure the directory exists
os.makedirs("tmp", exist_ok=True)
| pub_key = setup_pub_key() | 0 | 2023-10-18 18:18:55+00:00 | 2k |
hpsaturn/pilauncher | main.py | [
{
"identifier": "GuiManager",
"path": "gui.py",
"snippet": "class GuiManager():\n def __init__(self):\n self.am = AppManager()\n self.wlevel = 0\n self.showApp()\n\n def showApp(self):\n if self.wlevel == 0:\n print(self.am.getCurrentApp().name)\n ... | import time
import subprocess
import threading
import RPi.GPIO as GPIO
from gui import GuiManager
from display import Display | 1,214 |
BTNLFT = 23
BTNRGT = 6
onAppStatusTask = False
onSystemStatsTask = False
isBtnRgtPresed = False
isBtnLftPresed = False
onStats = False
# GUI Apps Manager
gui = GuiManager()
cfg = gui.getConfig()
|
BTNLFT = 23
BTNRGT = 6
onAppStatusTask = False
onSystemStatsTask = False
isBtnRgtPresed = False
isBtnLftPresed = False
onStats = False
# GUI Apps Manager
gui = GuiManager()
cfg = gui.getConfig() | dsp = Display() | 1 | 2023-10-23 20:21:51+00:00 | 2k |
CAMeL-Lab/camel_parser | src/initialize_disambiguator/disambiguator_interface.py | [
{
"identifier": "log",
"path": "src/logger.py",
"snippet": "def log(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n\n start_time = time.time()\n result = func(*args, **kwargs)\n end_time = time.time()\n \n with ... | from typing import Union
from camel_tools.morphology.database import MorphologyDB
from camel_tools.morphology.analyzer import Analyzer
from camel_tools.disambig.bert import BERTUnfactoredDisambiguator
from src.logger import log
from src.initialize_disambiguator.bert_disambiguator import create_bert_disambiguator
from src.initialize_disambiguator.mle_disambiguator import MLEDisambiguatorAdapter | 693 |
def set_up_analyzer(morphology_db: str) -> Analyzer:
# used to initialize an Analyzer with ADD_PROP backoff
# db = MorphologyDB.builtin_db('calima-msa-s31')
db_type = None if morphology_db == 'r13' else morphology_db
db = MorphologyDB.builtin_db(db_name=db_type)
return Analyzer(db=db, backoff='ADD_PROP', cache_size=100000)
@log
def get_disambiguator(model_name: str, morphology_db: str) -> Union[MLEDisambiguatorAdapter, BERTUnfactoredDisambiguator]:
analyzer = set_up_analyzer(morphology_db)
if model_name == 'mle':
model = MLEDisambiguatorAdapter(analyzer)
elif model_name == 'bert':
|
def set_up_analyzer(morphology_db: str) -> Analyzer:
# used to initialize an Analyzer with ADD_PROP backoff
# db = MorphologyDB.builtin_db('calima-msa-s31')
db_type = None if morphology_db == 'r13' else morphology_db
db = MorphologyDB.builtin_db(db_name=db_type)
return Analyzer(db=db, backoff='ADD_PROP', cache_size=100000)
@log
def get_disambiguator(model_name: str, morphology_db: str) -> Union[MLEDisambiguatorAdapter, BERTUnfactoredDisambiguator]:
analyzer = set_up_analyzer(morphology_db)
if model_name == 'mle':
model = MLEDisambiguatorAdapter(analyzer)
elif model_name == 'bert': | model = create_bert_disambiguator(analyzer) | 1 | 2023-10-21 10:39:28+00:00 | 2k |
JerBouma/FinancePortfolio | financeportfolio/portfolio_controller.py | [
{
"identifier": "excel_model",
"path": "financeportfolio/excel_model.py",
"snippet": "def create_portfolio_performance_excel_report(\n writer: pd.ExcelWriter, dataset: pd.DataFrame, sheet_name: str, currency: str = \"$\"\n):\ndef create_transactions_performance_excel_report(\n writer: pd.ExcelWrit... | import pandas as pd
from financetoolkit import Toolkit
from financeportfolio import excel_model, helpers, portfolio_model
| 1,298 | """Portfolio Module"""
# pylint: disable=too-many-instance-attributes,abstract-class-instantiated,
# pylint: disable=too-few-public-methods,protected-access,too-many-lines
class Portfolio:
"""
A class for managing and analyzing your portfolio.
This class provides functionality for loading, preprocessing, categorizing, and analyzing
cash flow data based on a specified configuration file. It offers methods to read and format
the dataset, apply cost or income indicators, categorize transactions, and create periodical
cash flow overviews.
Parameters:
configuration_file (str): The file path to the configuration file in YAML format. The
configuration file should define various settings and columns used in cash flow
analysis.
Attributes:
_configuration_file (str): The file path to the configuration file.
_cash_flow_dataset (pd.DataFrame): The cash flow dataset as a pandas DataFrame.
Note:
- The configuration file should be in YAML format and contain settings for date columns,
description columns, amount columns, and optionally cost/income columns.
- Initialize an instance of this class to begin cash flow analysis.
"""
def __init__(
self,
configuration_file: str | None = None,
portfolio_dataset: pd.DataFrame = pd.DataFrame(),
example: bool = False,
):
"""
Initialize a Cashflow instance with the provided configuration file.
This constructor sets up the Cashflow instance by loading the configuration file, defining
default attributes, and initializing the cash flow dataset as an empty DataFrame.
Parameters:
configuration_file (str): The file path to the configuration file in YAML format.
Raises:
ValueError: If the provided configuration file does not have a '.yaml' extension.
Only '.yaml' configuration files are supported.
"""
if example:
| """Portfolio Module"""
# pylint: disable=too-many-instance-attributes,abstract-class-instantiated,
# pylint: disable=too-few-public-methods,protected-access,too-many-lines
class Portfolio:
"""
A class for managing and analyzing your portfolio.
This class provides functionality for loading, preprocessing, categorizing, and analyzing
cash flow data based on a specified configuration file. It offers methods to read and format
the dataset, apply cost or income indicators, categorize transactions, and create periodical
cash flow overviews.
Parameters:
configuration_file (str): The file path to the configuration file in YAML format. The
configuration file should define various settings and columns used in cash flow
analysis.
Attributes:
_configuration_file (str): The file path to the configuration file.
_cash_flow_dataset (pd.DataFrame): The cash flow dataset as a pandas DataFrame.
Note:
- The configuration file should be in YAML format and contain settings for date columns,
description columns, amount columns, and optionally cost/income columns.
- Initialize an instance of this class to begin cash flow analysis.
"""
def __init__(
self,
configuration_file: str | None = None,
portfolio_dataset: pd.DataFrame = pd.DataFrame(),
example: bool = False,
):
"""
Initialize a Cashflow instance with the provided configuration file.
This constructor sets up the Cashflow instance by loading the configuration file, defining
default attributes, and initializing the cash flow dataset as an empty DataFrame.
Parameters:
configuration_file (str): The file path to the configuration file in YAML format.
Raises:
ValueError: If the provided configuration file does not have a '.yaml' extension.
Only '.yaml' configuration files are supported.
"""
if example:
| configuration_file = helpers.download_yaml_configuration(example=True)
| 1 | 2023-10-15 09:16:04+00:00 | 2k |
S2-group/UPISAS | UPISAS/tests/upisas/test_exemplar.py | [
{
"identifier": "DockerImageNotFoundOnDockerHub",
"path": "UPISAS/exceptions.py",
"snippet": "class DockerImageNotFoundOnDockerHub(UPISASException):\n pass"
},
{
"identifier": "Exemplar",
"path": "UPISAS/exemplar.py",
"snippet": "class Exemplar(ABC):\n \"\"\"\n A class which enc... | import unittest
from UPISAS.exceptions import DockerImageNotFoundOnDockerHub
from UPISAS.exemplar import Exemplar
from UPISAS.exemplars.demo_exemplar import DemoExemplar | 1,392 |
class TestExemplar(unittest.TestCase):
"""
Test cases for the Exemplar class using the DemoExemplar.
"""
def setUp(self):
self.exemplar = None
def tearDown(self):
if self.exemplar and self.exemplar.exemplar_container:
self.exemplar.stop_container()
def test_init_successfully_wihout_auto_start(self):
|
class TestExemplar(unittest.TestCase):
"""
Test cases for the Exemplar class using the DemoExemplar.
"""
def setUp(self):
self.exemplar = None
def tearDown(self):
if self.exemplar and self.exemplar.exemplar_container:
self.exemplar.stop_container()
def test_init_successfully_wihout_auto_start(self): | self.exemplar = DemoExemplar(auto_start=False) | 2 | 2023-10-15 12:46:54+00:00 | 2k |
developerlin/excelchat-streamlit | Home.py | [
{
"identifier": "CustomChartsMiddleware",
"path": "middleware/base.py",
"snippet": "class CustomChartsMiddleware(ChartsMiddleware):\n def run(self, code: str) -> str:\n # code = super().run(code)\n\n processed = []\n for line in code.split(\"\\n\"):\n if line.find(\"pl... | import io
import logging
import uuid
import matplotlib
import pandas as pd
import streamlit as st
from pathlib import Path
from typing import Dict
from pandasai import SmartDataframe, Agent, Config
from pandasai.callbacks import StdoutCallback
from pandasai.helpers import Logger
from middleware.base import CustomChartsMiddleware
from parser.response_parser import CustomResponseParser
from util import get_open_ai_model, get_ollama_model, get_baidu_as_model, get_prompt_template, get_baidu_qianfan_model | 1,300 |
logger = Logger()
matplotlib.rc_file("./.matplotlib/.matplotlibrc");
# page settings
st.set_page_config(page_title="Excel Chat", layout="wide")
st.header("What ExcelChat can do?")
st.text("ExcelChat is a lightweight data analysis app powered by LLM, showcasing how LLM can revolutionize the future"
"of data analysis.")
st.markdown("""List of todos
- [x] Add memory
- [x] Support non-latin text in chart
- [ ] Sub questions support
""")
class AgentWrapper:
id: str
agent: Agent
def __init__(self) -> None:
self.agent = None
self.id = str(uuid.uuid4())
def get_llm(self):
op = st.session_state.last_option
llm = None
if op == "Ollama":
llm = get_ollama_model(st.session_state.ollama_model, st.session_state.ollama_base_url)
elif op == "OpenAI":
if st.session_state.api_token != "":
llm = get_open_ai_model(st.session_state.api_token)
elif op == "Baidu/AIStudio-Ernie-Bot":
if st.session_state.access_token != "":
llm = get_baidu_as_model(st.session_state.access_token)
elif op == "Baidu/Qianfan-Ernie-Bot":
if st.session_state.client_id != "" and st.session_state.client_secret != "":
llm = get_baidu_qianfan_model(st.session_state.client_id, st.session_state.client_secret)
if llm is None:
st.toast("LLM initialization failed, check LLM configuration", icon="🫤")
return llm
def set_file_data(self, df):
llm = self.get_llm()
if llm is not None:
print("llm.type", llm.type)
config = Config(
llm=llm,
callback=StdoutCallback(),
# middlewares=[CustomChartsMiddleware()],
response_parser=CustomResponseParser,
custom_prompts={
|
logger = Logger()
matplotlib.rc_file("./.matplotlib/.matplotlibrc");
# page settings
st.set_page_config(page_title="Excel Chat", layout="wide")
st.header("What ExcelChat can do?")
st.text("ExcelChat is a lightweight data analysis app powered by LLM, showcasing how LLM can revolutionize the future"
"of data analysis.")
st.markdown("""List of todos
- [x] Add memory
- [x] Support non-latin text in chart
- [ ] Sub questions support
""")
class AgentWrapper:
id: str
agent: Agent
def __init__(self) -> None:
self.agent = None
self.id = str(uuid.uuid4())
def get_llm(self):
op = st.session_state.last_option
llm = None
if op == "Ollama":
llm = get_ollama_model(st.session_state.ollama_model, st.session_state.ollama_base_url)
elif op == "OpenAI":
if st.session_state.api_token != "":
llm = get_open_ai_model(st.session_state.api_token)
elif op == "Baidu/AIStudio-Ernie-Bot":
if st.session_state.access_token != "":
llm = get_baidu_as_model(st.session_state.access_token)
elif op == "Baidu/Qianfan-Ernie-Bot":
if st.session_state.client_id != "" and st.session_state.client_secret != "":
llm = get_baidu_qianfan_model(st.session_state.client_id, st.session_state.client_secret)
if llm is None:
st.toast("LLM initialization failed, check LLM configuration", icon="🫤")
return llm
def set_file_data(self, df):
llm = self.get_llm()
if llm is not None:
print("llm.type", llm.type)
config = Config(
llm=llm,
callback=StdoutCallback(),
# middlewares=[CustomChartsMiddleware()],
response_parser=CustomResponseParser,
custom_prompts={ | "generate_python_code": get_prompt_template() | 5 | 2023-10-20 00:58:45+00:00 | 2k |
ZiaWang/jqtrade | jqtrade/account/portfolio.py | [
{
"identifier": "OrderSide",
"path": "jqtrade/account/order.py",
"snippet": "class OrderSide(Enum):\n # 多仓\n long = \"long\"\n\n # 空仓\n short = \"short\"\n\n @classmethod\n def is_valid_side(cls, side):\n return side in cls.__members__\n\n @classmethod\n def get_side(cls, ... | from .order import OrderSide
from .api import UserPosition, UserPositionDict | 729 | # -*- coding: utf-8 -*-
class Portfolio(object):
""" 账户资金/持仓信息聚合类 """
def __init__(self, account):
self.__account = account
@property
def long_positions(self):
| # -*- coding: utf-8 -*-
class Portfolio(object):
""" 账户资金/持仓信息聚合类 """
def __init__(self, account):
self.__account = account
@property
def long_positions(self): | positions = UserPositionDict(OrderSide.long) | 0 | 2023-10-24 01:34:27+00:00 | 2k |
Glasgow-AI4BioMed/GenKIE | data/mm_data/vqa_gen_dataset.py | [
{
"identifier": "data_utils",
"path": "data/data_utils.py",
"snippet": "def infer_language_pair(path):\ndef collate_tokens(\n values,\n pad_idx,\n eos_idx=None,\n left_pad=False,\n move_eos_to_beginning=False,\n pad_to_length=None,\n pad_to_multiple=1,\n pad_to_bsz=None,\n):\n ... | from io import BytesIO
from torchvision import transforms
from PIL import Image, ImageFile
from data import data_utils
from data.ofa_dataset import OFADataset
import logging
import warnings
import numpy as np
import torch
import base64 | 1,291 | # Copyright 2022 The OFA-Sys Team.
# All rights reserved.
# This source code is licensed under the Apache 2.0 license
# found in the LICENSE file in the root directory.
ImageFile.LOAD_TRUNCATED_IMAGES = True
ImageFile.MAX_IMAGE_PIXELS = None
Image.MAX_IMAGE_PIXELS = None
logger = logging.getLogger(__name__)
warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data", UserWarning)
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key):
| # Copyright 2022 The OFA-Sys Team.
# All rights reserved.
# This source code is licensed under the Apache 2.0 license
# found in the LICENSE file in the root directory.
ImageFile.LOAD_TRUNCATED_IMAGES = True
ImageFile.MAX_IMAGE_PIXELS = None
Image.MAX_IMAGE_PIXELS = None
logger = logging.getLogger(__name__)
warnings.filterwarnings("ignore", "(Possibly )?corrupt EXIF data", UserWarning)
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
def collate(samples, pad_idx, eos_idx):
if len(samples) == 0:
return {}
def merge(key): | return data_utils.collate_tokens( | 0 | 2023-10-20 20:01:42+00:00 | 2k |
ArnaudParant/sel | tests/test_sel.py | [
{
"identifier": "elastic",
"path": "scripts/elastic.py",
"snippet": "def options():\ndef create_index(filepath, schema_filepath, index, overwrite=False):\ndef _delete_index(elastic, index):\ndef loads_ndjson(fd):\ndef insert(elastic, index, data):\ndef _create_index(elastic, index, schema_filepath):\nde... | import pytest
import json
import test_utils
from scripts import elastic
from sel import utils | 750 |
TEST_INDEX_FILE = "/tests/data/sample_2017.json"
TEST_SCHEMA_FILE = "/scripts/schema.json"
TEST_INDEX = "test_index"
class TestSEL:
@pytest.fixture(scope="function", autouse=True)
def init(self):
elastic.create_index(TEST_INDEX_FILE, TEST_SCHEMA_FILE, TEST_INDEX, overwrite=True)
def __cleaner(self, obj):
if "_score" in obj:
del obj["_score"]
return obj
@pytest.mark.parametrize(["query"], [
[{}],
[{"meta": {"size": 100}}],
[{"meta": {"size": 5}}],
])
def test_scroll(self, sel, query):
with open(TEST_INDEX_FILE, "r") as f:
expected_lines = {d["id"]: d for d in load_ndjson(f)}
documents = []
scroll_id = None
while True:
res = sel.scroll(TEST_INDEX, query, "1m", scroll_id=scroll_id)
documents += res["documents"]
scroll_id = res["scroll_id"]
if not len(res["documents"]):
break
sel.clear_scroll(res["scroll_id"])
found = {}
for line in documents:
j = self.__cleaner(line)
found[j["id"]] = j
for j2 in expected_lines.values():
j = found.get(j2["id"])
j2["_index"] = TEST_INDEX
assert test_utils.dict_equals(j, j2), f"Got: {j}\nExpected: {j2}"
size = len(found)
file_size = len(expected_lines)
assert size == file_size, f"Download line {size} != {file_size}"
@pytest.mark.parametrize(["query"], [
[{"aggregations": {"labels": {"field": "label"}}}],
[{"aggregations": {"ids": {"field": ".id"}}}],
])
def test_download_aggreg(self, sel, query):
def sort_aggreg(aggreg):
aggreg = sorted(aggreg, key=lambda o: o["key"])
return sorted(aggreg, key=lambda o: o["doc_count"], reverse=True)
aggreg_key = list(query["aggregations"].keys())[0]
query["aggregations"][aggreg_key]["size"] = 0
base_aggreg = {"field": "date", "interval": "week"}
res = sel.search(TEST_INDEX, query)
|
TEST_INDEX_FILE = "/tests/data/sample_2017.json"
TEST_SCHEMA_FILE = "/scripts/schema.json"
TEST_INDEX = "test_index"
class TestSEL:
@pytest.fixture(scope="function", autouse=True)
def init(self):
elastic.create_index(TEST_INDEX_FILE, TEST_SCHEMA_FILE, TEST_INDEX, overwrite=True)
def __cleaner(self, obj):
if "_score" in obj:
del obj["_score"]
return obj
@pytest.mark.parametrize(["query"], [
[{}],
[{"meta": {"size": 100}}],
[{"meta": {"size": 5}}],
])
def test_scroll(self, sel, query):
with open(TEST_INDEX_FILE, "r") as f:
expected_lines = {d["id"]: d for d in load_ndjson(f)}
documents = []
scroll_id = None
while True:
res = sel.scroll(TEST_INDEX, query, "1m", scroll_id=scroll_id)
documents += res["documents"]
scroll_id = res["scroll_id"]
if not len(res["documents"]):
break
sel.clear_scroll(res["scroll_id"])
found = {}
for line in documents:
j = self.__cleaner(line)
found[j["id"]] = j
for j2 in expected_lines.values():
j = found.get(j2["id"])
j2["_index"] = TEST_INDEX
assert test_utils.dict_equals(j, j2), f"Got: {j}\nExpected: {j2}"
size = len(found)
file_size = len(expected_lines)
assert size == file_size, f"Download line {size} != {file_size}"
@pytest.mark.parametrize(["query"], [
[{"aggregations": {"labels": {"field": "label"}}}],
[{"aggregations": {"ids": {"field": ".id"}}}],
])
def test_download_aggreg(self, sel, query):
def sort_aggreg(aggreg):
aggreg = sorted(aggreg, key=lambda o: o["key"])
return sorted(aggreg, key=lambda o: o["doc_count"], reverse=True)
aggreg_key = list(query["aggregations"].keys())[0]
query["aggregations"][aggreg_key]["size"] = 0
base_aggreg = {"field": "date", "interval": "week"}
res = sel.search(TEST_INDEX, query) | expected = utils.get_lastest_sub_data(res["results"]["aggregations"][aggreg_key])["buckets"] | 1 | 2023-10-16 09:03:13+00:00 | 2k |
Qualcomm-AI-research/outlier-free-transformers | quantization/quantizers/uniform_quantizers.py | [
{
"identifier": "QuantizerBase",
"path": "quantization/quantizers/base_quantizers.py",
"snippet": "class QuantizerBase(nn.Module):\n def __init__(self, n_bits, *args, per_channel=False, act_quant=False, **kwargs):\n super().__init__(*args, **kwargs)\n self.n_bits = n_bits\n self.... | import torch
from quantization.quantizers.base_quantizers import QuantizerBase
from quantization.quantizers.quantizer_utils import (
QuantizerNotInitializedError,
round_ste_func,
scale_grad_func,
) | 918 | # Copyright (c) 2023 Qualcomm Technologies, Inc.
# All Rights Reserved.
class AsymmetricUniformQuantizer(QuantizerBase):
"""
PyTorch Module that implements Asymmetric Uniform Quantization using STE.
Quantizes its argument in the forward pass, passes the gradient 'straight
through' on the backward pass, ignoring the quantization that occurred.
Parameters
----------
n_bits: int
Number of bits for quantization.
scale_domain: str ('log', 'linear) with default='linear'
Domain of scale factor
per_channel: bool
If True: allows for per-channel quantization
"""
def __init__(self, n_bits, scale_domain="linear", grad_scaling=False, eps=1e-8, **kwargs):
super().__init__(n_bits=n_bits, **kwargs)
assert scale_domain in ("linear", "log")
self.register_buffer("_delta", None)
self.register_buffer("_zero_float", None)
self.scale_domain = scale_domain
self.grad_scaling = grad_scaling
self.eps = eps
# A few useful properties
@property
def delta(self):
if self._delta is not None:
return self._delta
else:
raise QuantizerNotInitializedError()
@property
def zero_float(self):
if self._zero_float is not None:
return self._zero_float
else:
raise QuantizerNotInitializedError()
@property
def is_initialized(self):
return self._delta is not None
@property
def symmetric(self):
return False
@property
def int_min(self):
# integer grid minimum
return 0.0
@property
def int_max(self):
# integer grid maximum
return 2.0**self.n_bits - 1
@property
def scale(self):
if self.scale_domain == "linear":
return torch.clamp(self.delta, min=self.eps)
elif self.scale_domain == "log":
return torch.exp(self.delta)
@property
def zero_point(self):
| # Copyright (c) 2023 Qualcomm Technologies, Inc.
# All Rights Reserved.
class AsymmetricUniformQuantizer(QuantizerBase):
"""
PyTorch Module that implements Asymmetric Uniform Quantization using STE.
Quantizes its argument in the forward pass, passes the gradient 'straight
through' on the backward pass, ignoring the quantization that occurred.
Parameters
----------
n_bits: int
Number of bits for quantization.
scale_domain: str ('log', 'linear) with default='linear'
Domain of scale factor
per_channel: bool
If True: allows for per-channel quantization
"""
def __init__(self, n_bits, scale_domain="linear", grad_scaling=False, eps=1e-8, **kwargs):
super().__init__(n_bits=n_bits, **kwargs)
assert scale_domain in ("linear", "log")
self.register_buffer("_delta", None)
self.register_buffer("_zero_float", None)
self.scale_domain = scale_domain
self.grad_scaling = grad_scaling
self.eps = eps
# A few useful properties
@property
def delta(self):
if self._delta is not None:
return self._delta
else:
raise QuantizerNotInitializedError()
@property
def zero_float(self):
if self._zero_float is not None:
return self._zero_float
else:
raise QuantizerNotInitializedError()
@property
def is_initialized(self):
return self._delta is not None
@property
def symmetric(self):
return False
@property
def int_min(self):
# integer grid minimum
return 0.0
@property
def int_max(self):
# integer grid maximum
return 2.0**self.n_bits - 1
@property
def scale(self):
if self.scale_domain == "linear":
return torch.clamp(self.delta, min=self.eps)
elif self.scale_domain == "log":
return torch.exp(self.delta)
@property
def zero_point(self): | zero_point = round_ste_func(self.zero_float) | 1 | 2023-10-23 15:59:50+00:00 | 2k |
QgZhan/ESVAE | main_ann_ae.py | [
{
"identifier": "AverageMeter",
"path": "utils.py",
"snippet": "class AverageMeter(object):\r\n \"\"\"Computes and stores the average and current value\"\"\"\r\n def __init__(self):\r\n self.reset()\r\n\r\n def reset(self):\r\n self.val = 0\r\n self.avg = 0\r\n self.... | import os
import os.path
import numpy as np
import logging
import argparse
import pycuda.driver as cuda
import torch
import torchvision
import models.ann_ae as ann_ae
from torch.nn.utils import clip_grad_norm_
from torch.nn.utils import clip_grad_value_
from torch.utils.tensorboard import SummaryWriter
from utils import AverageMeter
from utils import aboutCudaDevices
from datasets import load_dataset_ann
| 663 |
max_accuracy = 0
min_loss = 1000
def train(network, trainloader, opti, epoch):
|
max_accuracy = 0
min_loss = 1000
def train(network, trainloader, opti, epoch):
| loss_meter = AverageMeter()
| 0 | 2023-10-23 07:33:27+00:00 | 2k |
iesl/softmax_CPR_recommend | recbole/model/sequential_recommender/sasrec.py | [
{
"identifier": "SequentialRecommender",
"path": "recbole/model/abstract_recommender.py",
"snippet": "class SequentialRecommender(AbstractRecommender):\n \"\"\"\n This is a abstract sequential recommender. All the sequential model should implement This class.\n \"\"\"\n type = ModelType.SEQU... | import sys
import torch
import torch.nn.functional as F
import math
from torch import nn
from recbole.model.abstract_recommender import SequentialRecommender
from recbole.model.layers import TransformerEncoder
from recbole.model.loss import BPRLoss | 1,321 | # -*- coding: utf-8 -*-
# @Time : 2020/9/18 11:33
# @Author : Hui Wang
# @Email : hui.wang@ruc.edu.cn
"""
SASRec
################################################
Reference:
Wang-Cheng Kang et al. "Self-Attentive Sequential Recommendation." in ICDM 2018.
Reference:
https://github.com/kang205/SASRec
"""
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
| # -*- coding: utf-8 -*-
# @Time : 2020/9/18 11:33
# @Author : Hui Wang
# @Email : hui.wang@ruc.edu.cn
"""
SASRec
################################################
Reference:
Wang-Cheng Kang et al. "Self-Attentive Sequential Recommendation." in ICDM 2018.
Reference:
https://github.com/kang205/SASRec
"""
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
| class SASRec(SequentialRecommender): | 0 | 2023-10-21 16:31:44+00:00 | 2k |
timapage/pyqt6-yolov8 | src/models/detection/yolov8_detector_onnx.py | [
{
"identifier": "DetectorBase",
"path": "src/models/detection/detector_base.py",
"snippet": "class DetectorBase(YoloPredictorBase):\n def draw_results(image, model_results):\n FONT_SCALE = 1e-3 \n THICKNESS_SCALE = 6e-4 "
},
{
"identifier": "ModelError",
"path": "src... | import numpy as np
import cv2 as cv
from onnxruntime import InferenceSession
from src.models.detection.detector_base import DetectorBase, Model
from src.models.base.yolov8_base import ModelError
from src.utils.boxes import xywh2xyxy, multiclass_nms_class_agnostic
from src.utils.general import get_classes | 648 |
class YoloDetector(DetectorBase):
def __init__(self):
self._model = None
def init(self, model_path, class_txt_path, confidence_threshold=0.3, iou_threshold=0.45):
|
class YoloDetector(DetectorBase):
def __init__(self):
self._model = None
def init(self, model_path, class_txt_path, confidence_threshold=0.3, iou_threshold=0.45): | _class_names = get_classes(class_txt_path) | 4 | 2023-10-18 09:21:01+00:00 | 2k |
OthersideAI/self-operating-computer | operate/main.py | [
{
"identifier": "ANSI_BRIGHT_MAGENTA",
"path": "operate/utils/style.py",
"snippet": "ANSI_BRIGHT_MAGENTA = \"\\033[95m\" if supports_ansi() else \"\" # Bright magenta text"
},
{
"identifier": "main",
"path": "operate/dialog.py",
"snippet": "def main(model, terminal_prompt, voice_mode=Fa... | import argparse
from operate.utils.style import ANSI_BRIGHT_MAGENTA
from operate.dialog import main | 1,350 | """
Self-Operating Computer
"""
def main_entry():
parser = argparse.ArgumentParser(
description="Run the self-operating-computer with a specified model."
)
parser.add_argument(
"-m",
"--model",
help="Specify the model to use",
required=False,
default="gpt-4",
)
# Add a voice flag
parser.add_argument(
"--voice",
help="Use voice input mode",
action="store_true",
)
# Allow for direct input of prompt
parser.add_argument(
"--prompt",
help="Directly input the objective prompt",
type=str,
required=False,
)
try:
args = parser.parse_args()
| """
Self-Operating Computer
"""
def main_entry():
parser = argparse.ArgumentParser(
description="Run the self-operating-computer with a specified model."
)
parser.add_argument(
"-m",
"--model",
help="Specify the model to use",
required=False,
default="gpt-4",
)
# Add a voice flag
parser.add_argument(
"--voice",
help="Use voice input mode",
action="store_true",
)
# Allow for direct input of prompt
parser.add_argument(
"--prompt",
help="Directly input the objective prompt",
type=str,
required=False,
)
try:
args = parser.parse_args() | main( | 1 | 2023-11-04 03:13:45+00:00 | 2k |
netease-youdao/EmotiVoice | frontend.py | [
{
"identifier": "g2p_cn",
"path": "frontend_cn.py",
"snippet": "def split_py(py):\ndef has_chinese_punctuation(text):\ndef has_english_punctuation(text):\ndef number_to_chinese(number):\ndef tn_chinese(text):\ndef g2p_cn(text):"
},
{
"identifier": "ROOT_DIR",
"path": "frontend_en.py",
"s... | import re
import sys
from frontend_cn import g2p_cn, re_digits, tn_chinese
from frontend_en import ROOT_DIR, read_lexicon, G2p, get_eng_phoneme
from os.path import isfile | 865 | # Copyright 2023, YOUDAO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Thanks to GuGCoCo and PatroxGaurab for identifying the issue:
# the results differ between frontend.py and frontend_en.py. Here's a quick fix.
#re_english_word = re.compile('([a-z\-\.\'\s,;\:\!\?]+|\d+[\d\.]*)', re.I)
re_english_word = re.compile('([^\u4e00-\u9fa5]+|[ \u3002\uff0c\uff1f\uff01\uff1b\uff1a\u201c\u201d\u2018\u2019\u300a\u300b\u3008\u3009\u3010\u3011\u300e\u300f\u2014\u2026\u3001\uff08\uff09\u4e00-\u9fa5]+)', re.I)
def g2p_cn_en(text, g2p, lexicon):
# Our policy dictates that if the text contains Chinese, digits are to be converted into Chinese.
text=tn_chinese(text)
parts = re_english_word.split(text)
parts=list(filter(None, parts))
tts_text = ["<sos/eos>"]
chartype = ''
text_contains_chinese = contains_chinese(text)
for part in parts:
if part == ' ' or part == '': continue
if re_digits.match(part) and (text_contains_chinese or chartype == '') or contains_chinese(part):
if chartype == 'en':
tts_text.append('eng_cn_sp')
phoneme = g2p_cn(part).split()[1:-1]
chartype = 'cn'
elif re_english_word.match(part):
if chartype == 'cn':
if "sp" in tts_text[-1]:
""
else:
tts_text.append('cn_eng_sp')
phoneme = get_eng_phoneme(part, g2p, lexicon, False).split()
if not phoneme :
# tts_text.pop()
continue
else:
chartype = 'en'
else:
continue
tts_text.extend( phoneme )
tts_text=" ".join(tts_text).split()
if "sp" in tts_text[-1]:
tts_text.pop()
tts_text.append("<sos/eos>")
return " ".join(tts_text)
def contains_chinese(text):
pattern = re.compile(r'[\u4e00-\u9fa5]')
match = re.search(pattern, text)
return match is not None
if __name__ == "__main__":
lexicon = read_lexicon(f"{ROOT_DIR}/lexicon/librispeech-lexicon.txt")
| # Copyright 2023, YOUDAO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Thanks to GuGCoCo and PatroxGaurab for identifying the issue:
# the results differ between frontend.py and frontend_en.py. Here's a quick fix.
#re_english_word = re.compile('([a-z\-\.\'\s,;\:\!\?]+|\d+[\d\.]*)', re.I)
re_english_word = re.compile('([^\u4e00-\u9fa5]+|[ \u3002\uff0c\uff1f\uff01\uff1b\uff1a\u201c\u201d\u2018\u2019\u300a\u300b\u3008\u3009\u3010\u3011\u300e\u300f\u2014\u2026\u3001\uff08\uff09\u4e00-\u9fa5]+)', re.I)
def g2p_cn_en(text, g2p, lexicon):
# Our policy dictates that if the text contains Chinese, digits are to be converted into Chinese.
text=tn_chinese(text)
parts = re_english_word.split(text)
parts=list(filter(None, parts))
tts_text = ["<sos/eos>"]
chartype = ''
text_contains_chinese = contains_chinese(text)
for part in parts:
if part == ' ' or part == '': continue
if re_digits.match(part) and (text_contains_chinese or chartype == '') or contains_chinese(part):
if chartype == 'en':
tts_text.append('eng_cn_sp')
phoneme = g2p_cn(part).split()[1:-1]
chartype = 'cn'
elif re_english_word.match(part):
if chartype == 'cn':
if "sp" in tts_text[-1]:
""
else:
tts_text.append('cn_eng_sp')
phoneme = get_eng_phoneme(part, g2p, lexicon, False).split()
if not phoneme :
# tts_text.pop()
continue
else:
chartype = 'en'
else:
continue
tts_text.extend( phoneme )
tts_text=" ".join(tts_text).split()
if "sp" in tts_text[-1]:
tts_text.pop()
tts_text.append("<sos/eos>")
return " ".join(tts_text)
def contains_chinese(text):
pattern = re.compile(r'[\u4e00-\u9fa5]')
match = re.search(pattern, text)
return match is not None
if __name__ == "__main__":
lexicon = read_lexicon(f"{ROOT_DIR}/lexicon/librispeech-lexicon.txt")
| g2p = G2p() | 1 | 2023-11-08 10:15:27+00:00 | 2k |
daveshap/OpenAI_Agent_Swarm | agents/tool_maker/tool_user.py | [
{
"identifier": "chat",
"path": "shared/utils.py",
"snippet": "def chat(client, thread, assistant, functions):\n while True:\n user_message = input(\"You: \")\n\n # add user message to thread\n thread_message = client.beta.threads.messages.create(\n thread.id,\n ... | import os
import json
from shared.utils import chat as chat_loop
from shared.openai_config import get_openai_client | 1,171 | """
Create an assistant using the tools from tool_creator using the assistant creation API
"""
client = get_openai_client()
def create_tool_user(assistant_details):
# create the assistant
tool_user = client.beta.assistants.create(**assistant_details["build_params"])
print(f"Created assistant {tool_user.id} to use tools\n\n" + 90*"-" + "\n\n", flush=True)
# save the assistant info to a json file
info_to_export = {
"assistant_id": tool_user.id,
"assistant_details": assistant_details,
}
os.makedirs('assistants', exist_ok=True)
with open('assistants/tool_user.json', 'w') as f:
json.dump(info_to_export, f, indent=4)
return tool_user
def talk_to_tool_user(assistant_details):
"""
talk to the assistant to use the tools
"""
# check if json file exists
try:
os.makedirs('assistants', exist_ok=True)
with open('assistants/tool_user.json') as f:
create_new = input(f'Assistant details found in tool_user.json. Create a new assistant? [y/N]')
if create_new == 'y':
raise Exception("User wants a new assistant")
assistant_from_json = json.load(f)
tool_user = client.beta.assistants.retrieve(assistant_from_json['assistant_id'])
print(f"Loaded assistant details from tool_user.json\n\n" + 90*"-" + "\n\n", flush=True)
print(f'Assistant {tool_user.id}:\n')
assistant_details = assistant_from_json["assistant_details"]
except:
# create the assistant first
tool_user = create_tool_user(assistant_details)
# exec the functions from the py files
os.makedirs('tools', exist_ok=True)
functions = assistant_details["functions"]
for func in functions:
print(f"Loading function {func} into execution environment", flush=True)
with open('tools/' + func + '.py') as f:
exec(f.read(), globals())
functions.update({func: eval(func)})
# Create thread
thread = client.beta.threads.create()
# chat with the assistant
| """
Create an assistant using the tools from tool_creator using the assistant creation API
"""
client = get_openai_client()
def create_tool_user(assistant_details):
# create the assistant
tool_user = client.beta.assistants.create(**assistant_details["build_params"])
print(f"Created assistant {tool_user.id} to use tools\n\n" + 90*"-" + "\n\n", flush=True)
# save the assistant info to a json file
info_to_export = {
"assistant_id": tool_user.id,
"assistant_details": assistant_details,
}
os.makedirs('assistants', exist_ok=True)
with open('assistants/tool_user.json', 'w') as f:
json.dump(info_to_export, f, indent=4)
return tool_user
def talk_to_tool_user(assistant_details):
"""
talk to the assistant to use the tools
"""
# check if json file exists
try:
os.makedirs('assistants', exist_ok=True)
with open('assistants/tool_user.json') as f:
create_new = input(f'Assistant details found in tool_user.json. Create a new assistant? [y/N]')
if create_new == 'y':
raise Exception("User wants a new assistant")
assistant_from_json = json.load(f)
tool_user = client.beta.assistants.retrieve(assistant_from_json['assistant_id'])
print(f"Loaded assistant details from tool_user.json\n\n" + 90*"-" + "\n\n", flush=True)
print(f'Assistant {tool_user.id}:\n')
assistant_details = assistant_from_json["assistant_details"]
except:
# create the assistant first
tool_user = create_tool_user(assistant_details)
# exec the functions from the py files
os.makedirs('tools', exist_ok=True)
functions = assistant_details["functions"]
for func in functions:
print(f"Loading function {func} into execution environment", flush=True)
with open('tools/' + func + '.py') as f:
exec(f.read(), globals())
functions.update({func: eval(func)})
# Create thread
thread = client.beta.threads.create()
# chat with the assistant | chat_loop(client, thread, tool_user, functions) | 1 | 2023-11-07 23:12:05+00:00 | 2k |
S-LoRA/S-LoRA | slora/common/basemodel/layer_infer/base_layer_infer.py | [
{
"identifier": "mark_cost_time",
"path": "slora/utils/infer_utils.py",
"snippet": "def mark_cost_time(func_name):\n def inner_func(func):\n def time_func(*args, **kwargs):\n if dist.get_rank() in [0, 1] and is_show_cost_time:\n torch.cuda.synchronize()\n ... | from slora.utils.infer_utils import mark_cost_time
from slora.common.basemodel.infer_struct import InferStateInfo
from slora.common.basemodel.layer_weights.base_layer_weight import BaseLayerWeight | 707 |
class BaseLayerInfer:
def __init__(self) -> None:
pass
|
class BaseLayerInfer:
def __init__(self) -> None:
pass
| @mark_cost_time("pre context forward") # dont to remove this, will make performence down, did not know why | 0 | 2023-11-05 04:08:36+00:00 | 2k |
disler/multi-agent-postgres-data-analytics | postgres_da_ai_agent/modules/orchestrator.py | [
{
"identifier": "AgentInstruments",
"path": "postgres_da_ai_agent/agents/instruments.py",
"snippet": "class AgentInstruments:\n \"\"\"\n Base class for multli-agent instruments that are tools, state, and functions that an agent can use across the lifecycle of conversations\n \"\"\"\n\n def _... | import dataclasses
import json
import autogen
from typing import List, Optional, Tuple
from postgres_da_ai_agent.agents.instruments import AgentInstruments
from postgres_da_ai_agent.modules import llm
from postgres_da_ai_agent.types import Chat, ConversationResult | 705 |
class Orchestrator:
"""
Orchestrators manage conversations between multi-agent teams.
"""
def __init__(
self,
name: str,
agents: List[autogen.ConversableAgent],
|
class Orchestrator:
"""
Orchestrators manage conversations between multi-agent teams.
"""
def __init__(
self,
name: str,
agents: List[autogen.ConversableAgent], | instruments: AgentInstruments, | 0 | 2023-11-04 20:15:46+00:00 | 2k |
fleet-ai/context | utils/ai.py | [
{
"identifier": "OPENAI_MODELS",
"path": "constants/cli.py",
"snippet": "OPENAI_MODELS = [\n \"gpt-4-1106-preview\",\n \"gpt-4\",\n \"gpt-3.5-turbo\",\n \"gpt-3.5-turbo-16k\",\n]"
},
{
"identifier": "SYSTEM_PROMPT",
"path": "constants/ai.py",
"snippet": "SYSTEM_PROMPT = \"\"\... | import os
import json
import tiktoken
import openai
import requests
from openai import OpenAI
from constants.cli import OPENAI_MODELS
from constants.ai import SYSTEM_PROMPT, PROMPT, API_URL | 874 | # pylint: disable=W0707
# pylint: disable=W0719
def retrieve(query, k=10, filters=None):
"""Retrieves and returns dict.
Args:
query (str): User query to pass in
k (int, optional): number of results passed back. Defaults to 10.
filters (dict, optional): Filters to apply to the query. You can filter based off
any piece of metadata by passing in a dict of the format {metadata_name: filter_value}
ie {"library_id": "1234"}.
See the README for more details:
https://github.com/fleet-ai/context/tree/main#using-fleet-contexts-rich-metadata
Returns:
list: List of queried results
"""
| # pylint: disable=W0707
# pylint: disable=W0719
def retrieve(query, k=10, filters=None):
"""Retrieves and returns dict.
Args:
query (str): User query to pass in
k (int, optional): number of results passed back. Defaults to 10.
filters (dict, optional): Filters to apply to the query. You can filter based off
any piece of metadata by passing in a dict of the format {metadata_name: filter_value}
ie {"library_id": "1234"}.
See the README for more details:
https://github.com/fleet-ai/context/tree/main#using-fleet-contexts-rich-metadata
Returns:
list: List of queried results
"""
| url = f"{API_URL}/query" | 3 | 2023-11-02 07:07:13+00:00 | 2k |
OpenBMB/ProAgent | ProAgent/agent/gpt4_function.py | [
{
"identifier": "logger",
"path": "ProAgent/loggers/logs.py",
"snippet": "class JsonFileHandler(logging.FileHandler):\nclass JsonFormatter(logging.Formatter):\nclass Logger(metaclass=Singleton):\nclass TypingConsoleHandler(logging.StreamHandler):\nclass ConsoleHandler(logging.StreamHandler):\nclass Auto... | import logging
import json
from typing import List, Dict
from colorama import Fore, Style
from ProAgent.loggers.logs import logger
from ProAgent.agent.utils import _chat_completion_request | 849 |
class OpenAIFunction():
def __init__(self):
pass
def parse(self, **args):
"""
Parses the given arguments by making a chat completion request.
Args:
**args: The keyword arguments to be passed to the chat completion request.
Returns:
Tuple: A tuple containing the parsed content, function name, function arguments, and the original message.
Raises:
None.
"""
retry_time = 1
max_time = 3
for i in range(max_time):
output = _chat_completion_request(**args)
if isinstance(output, Dict):
usage = output["usage"]
message = output["choices"][0]["message"]
print(usage)
if "function_call" in message.keys():
break
else:
args['messages'].append({"role": "assistant", "content": message['content']})
args['messages'].append({"role": 'user', "content": "No Function call here! You should always use a function call as your response."})
retry_time += 1
|
class OpenAIFunction():
def __init__(self):
pass
def parse(self, **args):
"""
Parses the given arguments by making a chat completion request.
Args:
**args: The keyword arguments to be passed to the chat completion request.
Returns:
Tuple: A tuple containing the parsed content, function name, function arguments, and the original message.
Raises:
None.
"""
retry_time = 1
max_time = 3
for i in range(max_time):
output = _chat_completion_request(**args)
if isinstance(output, Dict):
usage = output["usage"]
message = output["choices"][0]["message"]
print(usage)
if "function_call" in message.keys():
break
else:
args['messages'].append({"role": "assistant", "content": message['content']})
args['messages'].append({"role": 'user', "content": "No Function call here! You should always use a function call as your response."})
retry_time += 1 | logger._log(f"{Fore.RED} Retry for the {retry_time}'th time{Style.RESET_ALL}") | 0 | 2023-11-03 01:20:14+00:00 | 2k |
LLaVA-VL/LLaVA-Plus-Codebase | serve/blip2grounding_worker.py | [
{
"identifier": "WORKER_HEART_BEAT_INTERVAL",
"path": "serve/constants.py",
"snippet": "WORKER_HEART_BEAT_INTERVAL = int(os.getenv(\"FASTCHAT_WORKER_HEART_BEAT_INTERVAL\", 45))"
},
{
"identifier": "ErrorCode",
"path": "serve/constants.py",
"snippet": "class ErrorCode(IntEnum):\n \"\"\... | import sys, os
import argparse
import asyncio
import dataclasses
import logging
import json
import os
import sys
import time
import threading
import uuid
import base64
import numpy as np
import requests
import groundingdino.datasets.transforms as T
import pycocotools.mask as mask_util
import torch
import torch.nn.functional as F
import uvicorn
from groundingdino.util import box_ops
from segment_anything import build_sam
from segment_anything.predictor import SamPredictor
from typing import List, Tuple, Union
from io import BytesIO
from fastapi import FastAPI, Request, BackgroundTasks
from fastapi.responses import StreamingResponse, JSONResponse
from PIL import Image
from demo.inference_on_a_image import get_grounding_output
from groundingdino.util.inference import load_model, predict
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
LlamaTokenizer,
AutoModel,
)
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
LLaMATokenizer,
AutoModel,
)
from serve.constants import WORKER_HEART_BEAT_INTERVAL, ErrorCode, SERVER_ERROR_MSG
from serve.utils import build_logger, pretty_print_semaphore | 1,147 | """
A model worker executes the model.
"""
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
try:
except ImportError:
GB = 1 << 30
now_file_name = os.__file__
logdir = "logs/workers/"
os.makedirs(logdir, exist_ok=True)
logfile = os.path.join(logdir, f"{now_file_name}.log")
worker_id = str(uuid.uuid4())[:6]
| """
A model worker executes the model.
"""
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
try:
except ImportError:
GB = 1 << 30
now_file_name = os.__file__
logdir = "logs/workers/"
os.makedirs(logdir, exist_ok=True)
logfile = os.path.join(logdir, f"{now_file_name}.log")
worker_id = str(uuid.uuid4())[:6] | logger = build_logger(now_file_name, logfile) | 3 | 2023-11-07 13:06:02+00:00 | 2k |
opendilab/LLMRiddles | llmriddles/questions/level2.py | [
{
"identifier": "register_question",
"path": "llmriddles/questions/question.py",
"snippet": "def register_question(text: Union[Mapping[str, str], str],\n checkers: Union[Mapping[str, SingleLangCheckerTyping], MultiLangCheckerTyping],\n name=Union[Mapping[str, st... | import re
import sympy
from typing import Optional, Tuple
from .question import register_question
from .math_tools import get_all_numbers | 679 |
CN_TEXT_1 = """
第二章第一题(质数长度),你需要提出一个字数是质数的问题,使回答的长度刚好是它的下一个质数。
"""
EN_TEXT_1 = """
For the first question in chapter 2, You need to come up with a question that has a prime number of words, so the answer's length is exactly the next prime number.
"""
def _is_prime(v):
return sympy.isprime(v)
def _next_prime(v):
while v:
v += 1
if _is_prime(v):
return v
def _cn_checker_1(question_text: str, user_text: str, answer_text: str) -> Tuple[bool, Optional[str]]:
qs_length = len(user_text.strip())
if not _is_prime(qs_length):
return False, f'问题长度为{qs_length},非质数'
answer_value = len(answer_text)
next_prime = _next_prime(qs_length)
if answer_value != next_prime:
return False, f'下一个质数为{next_prime},但回答长度为{answer_value}'
return True, None
def _en_words(text: str):
return len(re.findall(r'\w+', text))
def _en_checker_1(question_text: str, user_text: str, answer_text: str) -> Tuple[bool, Optional[str]]:
qs_length = _en_words(user_text.strip())
if not _is_prime(qs_length):
return False, f'The question has a length of {qs_length}, which is not a prime number'
answer_value = _en_words(answer_text)
next_prime = _next_prime(qs_length)
if answer_value != next_prime:
return False, f'The next prime number is {next_prime}, but the answer\'s length is {answer_value}'
return True, None
|
CN_TEXT_1 = """
第二章第一题(质数长度),你需要提出一个字数是质数的问题,使回答的长度刚好是它的下一个质数。
"""
EN_TEXT_1 = """
For the first question in chapter 2, You need to come up with a question that has a prime number of words, so the answer's length is exactly the next prime number.
"""
def _is_prime(v):
return sympy.isprime(v)
def _next_prime(v):
while v:
v += 1
if _is_prime(v):
return v
def _cn_checker_1(question_text: str, user_text: str, answer_text: str) -> Tuple[bool, Optional[str]]:
qs_length = len(user_text.strip())
if not _is_prime(qs_length):
return False, f'问题长度为{qs_length},非质数'
answer_value = len(answer_text)
next_prime = _next_prime(qs_length)
if answer_value != next_prime:
return False, f'下一个质数为{next_prime},但回答长度为{answer_value}'
return True, None
def _en_words(text: str):
return len(re.findall(r'\w+', text))
def _en_checker_1(question_text: str, user_text: str, answer_text: str) -> Tuple[bool, Optional[str]]:
qs_length = _en_words(user_text.strip())
if not _is_prime(qs_length):
return False, f'The question has a length of {qs_length}, which is not a prime number'
answer_value = _en_words(answer_text)
next_prime = _next_prime(qs_length)
if answer_value != next_prime:
return False, f'The next prime number is {next_prime}, but the answer\'s length is {answer_value}'
return True, None
| register_question( | 0 | 2023-11-07 03:09:55+00:00 | 2k |
codefuse-ai/CodeFuse-ModelCache | modelcache/manager/vector_data/manager.py | [
{
"identifier": "NotFoundError",
"path": "modelcache/utils/error.py",
"snippet": "class NotFoundError(CacheError):\n \"\"\"Raise when getting an unsupported store.\"\"\"\n def __init__(self, store_type, current_type_name):\n super().__init__(f\"Unsupported ${store_type}: {current_type_name}... | from modelcache.utils.error import NotFoundError, ParamError
from modelcache.manager.vector_data.milvus import Milvus
from modelcache.manager.vector_data.faiss import Faiss
from modelcache.manager.vector_data.chroma import Chromadb
from modelcache.manager.vector_data.hnswlib_store import Hnswlib | 924 | # -*- coding: utf-8 -*-
TOP_K = 1
FAISS_INDEX_PATH = "faiss.index"
DIMENSION = 0
MILVUS_HOST = "localhost"
MILVUS_PORT = 19530
MILVUS_USER = ""
MILVUS_PSW = ""
MILVUS_SECURE = False
MILVUS_INDEX_PARAMS = {
"metric_type": "L2",
"index_type": "HNSW",
"params": {"M": 8, "efConstruction": 64},
}
COLLECTION_NAME = "modelcache"
class VectorBase:
"""
VectorBase to manager the vector base.
"""
def __init__(self):
raise EnvironmentError(
"VectorBase is designed to be instantiated, please using the `VectorBase.get(name)`."
)
@staticmethod
def check_dimension(dimension):
if dimension <= 0:
raise ParamError(
f"the dimension should be greater than zero, current value: {dimension}."
)
@staticmethod
def get(name, **kwargs):
top_k = kwargs.get("top_k", TOP_K)
if name == "milvus":
dimension = kwargs.get("dimension", DIMENSION)
milvus_config = kwargs.get("milvus_config")
VectorBase.check_dimension(dimension)
host = milvus_config.get('milvus', 'host')
port = milvus_config.get('milvus', 'port')
user = milvus_config.get('milvus', 'user')
password = milvus_config.get('milvus', 'password')
secure = kwargs.get("secure", MILVUS_SECURE)
collection_name = kwargs.get("collection_name", COLLECTION_NAME)
index_params = kwargs.get("index_params", MILVUS_INDEX_PARAMS)
search_params = kwargs.get("search_params", None)
local_mode = kwargs.get("local_mode", False)
local_data = kwargs.get("local_data", "./milvus_data")
vector_base = Milvus(
host=host,
port=port,
user=user,
password=password,
secure=secure,
collection_name=collection_name,
dimension=dimension,
top_k=top_k,
index_params=index_params,
search_params=search_params,
local_mode=local_mode,
local_data=local_data
)
elif name == "faiss":
dimension = kwargs.get("dimension", DIMENSION)
index_path = kwargs.pop("index_path", FAISS_INDEX_PATH)
VectorBase.check_dimension(dimension)
vector_base = Faiss(
index_file_path=index_path, dimension=dimension, top_k=top_k
)
elif name == "chromadb":
client_settings = kwargs.get("client_settings", None)
persist_directory = kwargs.get("persist_directory", None)
collection_name = kwargs.get("collection_name", COLLECTION_NAME)
vector_base = Chromadb(
client_settings=client_settings,
persist_directory=persist_directory,
collection_name=collection_name,
top_k=top_k,
)
elif name == "hnswlib":
dimension = kwargs.get("dimension", DIMENSION)
index_path = kwargs.pop("index_path", "./hnswlib_index.bin")
max_elements = kwargs.pop("max_elements", 100000)
VectorBase.check_dimension(dimension)
vector_base = Hnswlib(
index_file_path=index_path, dimension=dimension,
top_k=top_k, max_elements=max_elements
)
else:
| # -*- coding: utf-8 -*-
TOP_K = 1
FAISS_INDEX_PATH = "faiss.index"
DIMENSION = 0
MILVUS_HOST = "localhost"
MILVUS_PORT = 19530
MILVUS_USER = ""
MILVUS_PSW = ""
MILVUS_SECURE = False
MILVUS_INDEX_PARAMS = {
"metric_type": "L2",
"index_type": "HNSW",
"params": {"M": 8, "efConstruction": 64},
}
COLLECTION_NAME = "modelcache"
class VectorBase:
"""
VectorBase to manager the vector base.
"""
def __init__(self):
raise EnvironmentError(
"VectorBase is designed to be instantiated, please using the `VectorBase.get(name)`."
)
@staticmethod
def check_dimension(dimension):
if dimension <= 0:
raise ParamError(
f"the dimension should be greater than zero, current value: {dimension}."
)
@staticmethod
def get(name, **kwargs):
top_k = kwargs.get("top_k", TOP_K)
if name == "milvus":
dimension = kwargs.get("dimension", DIMENSION)
milvus_config = kwargs.get("milvus_config")
VectorBase.check_dimension(dimension)
host = milvus_config.get('milvus', 'host')
port = milvus_config.get('milvus', 'port')
user = milvus_config.get('milvus', 'user')
password = milvus_config.get('milvus', 'password')
secure = kwargs.get("secure", MILVUS_SECURE)
collection_name = kwargs.get("collection_name", COLLECTION_NAME)
index_params = kwargs.get("index_params", MILVUS_INDEX_PARAMS)
search_params = kwargs.get("search_params", None)
local_mode = kwargs.get("local_mode", False)
local_data = kwargs.get("local_data", "./milvus_data")
vector_base = Milvus(
host=host,
port=port,
user=user,
password=password,
secure=secure,
collection_name=collection_name,
dimension=dimension,
top_k=top_k,
index_params=index_params,
search_params=search_params,
local_mode=local_mode,
local_data=local_data
)
elif name == "faiss":
dimension = kwargs.get("dimension", DIMENSION)
index_path = kwargs.pop("index_path", FAISS_INDEX_PATH)
VectorBase.check_dimension(dimension)
vector_base = Faiss(
index_file_path=index_path, dimension=dimension, top_k=top_k
)
elif name == "chromadb":
client_settings = kwargs.get("client_settings", None)
persist_directory = kwargs.get("persist_directory", None)
collection_name = kwargs.get("collection_name", COLLECTION_NAME)
vector_base = Chromadb(
client_settings=client_settings,
persist_directory=persist_directory,
collection_name=collection_name,
top_k=top_k,
)
elif name == "hnswlib":
dimension = kwargs.get("dimension", DIMENSION)
index_path = kwargs.pop("index_path", "./hnswlib_index.bin")
max_elements = kwargs.pop("max_elements", 100000)
VectorBase.check_dimension(dimension)
vector_base = Hnswlib(
index_file_path=index_path, dimension=dimension,
top_k=top_k, max_elements=max_elements
)
else: | raise NotFoundError("vector store", name) | 0 | 2023-11-01 01:56:10+00:00 | 2k |
ForceFledgling/proxyhub | tests/test_utils.py | [
{
"identifier": "BadStatusLine",
"path": "proxyhub/errors.py",
"snippet": "class BadStatusLine(Exception):\n errmsg = 'bad_status_line'"
},
{
"identifier": "get_all_ip",
"path": "proxyhub/utils.py",
"snippet": "def get_all_ip(page):\n # TODO: add IPv6 support\n return set(IPPatt... | import pytest
from proxyhub.errors import BadStatusLine
from proxyhub.utils import (
get_all_ip,
get_status_code,
parse_headers,
parse_status_line,
) | 747 |
def test_get_all_ip():
page = "abc127.0.0.1:80abc127.0.0.1xx127.0.0.2:8080h"
assert get_all_ip(page) == {'127.0.0.1', '127.0.0.2'}
def test_get_status_code():
assert get_status_code('HTTP/1.1 200 OK\r\n') == 200
assert get_status_code('<html>123</html>\r\n') == 400
assert get_status_code(b'HTTP/1.1 403 Forbidden\r\n') == 403
assert get_status_code(b'HTTP/1.1 400 Bad Request\r\n') == 400
def test_parse_status_line():
|
def test_get_all_ip():
page = "abc127.0.0.1:80abc127.0.0.1xx127.0.0.2:8080h"
assert get_all_ip(page) == {'127.0.0.1', '127.0.0.2'}
def test_get_status_code():
assert get_status_code('HTTP/1.1 200 OK\r\n') == 200
assert get_status_code('<html>123</html>\r\n') == 400
assert get_status_code(b'HTTP/1.1 403 Forbidden\r\n') == 403
assert get_status_code(b'HTTP/1.1 400 Bad Request\r\n') == 400
def test_parse_status_line(): | assert parse_status_line('HTTP/1.1 200 OK') == { | 4 | 2023-11-05 13:28:57+00:00 | 2k |
WithSecureLabs/IceKube | icekube/cli.py | [
{
"identifier": "config",
"path": "icekube/config.py",
"snippet": "class Neo4j(TypedDict):\nclass Config(TypedDict):"
},
{
"identifier": "create_indices",
"path": "icekube/icekube.py",
"snippet": "def create_indices():\n for resource in api_resources():\n if \"list\" not in res... | import json
import logging
import typer
from pathlib import Path
from typing import Iterator, List, Optional, cast
from icekube.config import config
from icekube.icekube import (
create_indices,
enumerate_resource_kind,
generate_relationships,
purge_neo4j,
remove_attack_paths,
setup_attack_paths,
)
from icekube.kube import (
APIResource,
Resource,
all_resources,
metadata_download,
)
from icekube.log_config import build_logger
from tqdm import tqdm
from icekube import kube
from icekube import icekube | 1,369 |
app = typer.Typer()
IGNORE_DEFAULT = "events,componentstatuses"
@app.command()
def run(
ignore: str = typer.Option(
IGNORE_DEFAULT,
help="Names of resource types to ignore",
),
):
enumerate(ignore)
attack_path()
@app.command()
def enumerate(
ignore: str = typer.Option(
IGNORE_DEFAULT,
help="Names of resource types to ignore",
),
):
create_indices()
enumerate_resource_kind(ignore.split(","))
generate_relationships()
@app.command()
def relationships():
generate_relationships()
@app.command()
def attack_path():
|
app = typer.Typer()
IGNORE_DEFAULT = "events,componentstatuses"
@app.command()
def run(
ignore: str = typer.Option(
IGNORE_DEFAULT,
help="Names of resource types to ignore",
),
):
enumerate(ignore)
attack_path()
@app.command()
def enumerate(
ignore: str = typer.Option(
IGNORE_DEFAULT,
help="Names of resource types to ignore",
),
):
create_indices()
enumerate_resource_kind(ignore.split(","))
generate_relationships()
@app.command()
def relationships():
generate_relationships()
@app.command()
def attack_path(): | remove_attack_paths() | 5 | 2023-11-02 13:54:21+00:00 | 2k |
IAAR-Shanghai/UHGEval | tests/llm/test_api.py | [
{
"identifier": "Baichuan2_53B_Chat",
"path": "uhgeval/llm/api.py",
"snippet": "class Baichuan2_53B_Chat(BaseLLM):\n def request(self, query) -> str:\n import time\n url = conf.Baichuan2_53B_url\n api_key = conf.Baichuan2_53B_api_key\n secret_key = conf.Baichuan2_53B_secre... | import unittest
from uhgeval.llm.api import (
Baichuan2_53B_Chat,
GPT,
) | 831 | # @Author : Shichao Song
# @Email : song.shichao@outlook.com
class TestBaichuan253BChat(unittest.TestCase):
def setUp(self):
self.model = Baichuan2_53B_Chat(temperature=0.1)
def test_request(self):
query = "How are you?"
response = self.model.request(query)
self.assertIsInstance(response, str)
self.assertGreater(len(response), 0)
def test_continue_writing(self):
obj = {"headLine": "Story", "broadcastDate": "2023-11-15", "newsBeginning": "Once upon a time, there is a"}
result = self.model.continue_writing(obj)
self.assertIsInstance(result, str)
self.assertGreater(len(result), 0)
class TestGPT(unittest.TestCase):
def setUp(self):
| # @Author : Shichao Song
# @Email : song.shichao@outlook.com
class TestBaichuan253BChat(unittest.TestCase):
def setUp(self):
self.model = Baichuan2_53B_Chat(temperature=0.1)
def test_request(self):
query = "How are you?"
response = self.model.request(query)
self.assertIsInstance(response, str)
self.assertGreater(len(response), 0)
def test_continue_writing(self):
obj = {"headLine": "Story", "broadcastDate": "2023-11-15", "newsBeginning": "Once upon a time, there is a"}
result = self.model.continue_writing(obj)
self.assertIsInstance(result, str)
self.assertGreater(len(result), 0)
class TestGPT(unittest.TestCase):
def setUp(self): | self.gpt35 = GPT(model_name='gpt-3.5-turbo', temperature=0.1) | 1 | 2023-11-06 11:46:22+00:00 | 2k |
mobiusml/hqq | examples/lora/train_hqq_lora_example.py | [
{
"identifier": "HQQModelForCausalLM",
"path": "hqq/engine/hf.py",
"snippet": "_HQQ_REGISTRY = {}\n\t_HQQ_REGISTRY = _HQQ_REGISTRY\nclass HQQModelForCausalLM(_Parent, HQQWrapper):\n\tdef __init__(self, *args, **kwargs):\n\tdef _make_quantizable(cls, model, quantized):\n\tdef _validate_params(cls, params... | from hqq.engine.hf import HQQModelForCausalLM, AutoTokenizer
from hqq.core.quantize import *
from hqq.core.peft import PeftUtils
from hqq.core.quantize import *
from datasets import load_dataset, Dataset
from tqdm import tqdm
from trl import SFTTrainer
import transformers
import numpy as np
import random | 1,458 | #Settings
######################################################################################
hf_auth = None #HuggingFace token
cache_path = '' #cache directory to store data
#Chose a model
model_id = "meta-llama/Llama-2-7b-hf"
#model_id = "meta-llama/Llama-2-13b-hf"
#model_id = "meta-llama/Llama-2-70b-hf"
#HQQ Quantize
######################################################################################
model = HQQModelForCausalLM.from_pretrained(model_id, use_auth_token=hf_auth, cache_dir=cache_path)
tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=hf_auth, cache_dir=cache_path)
#Quantize the model
quant_config = BaseQuantizeConfig(nbits=4, group_size=64, quant_scale=False, quant_zero=False)
model.quantize_model(quant_config=quant_config)
#Add Peft
######################################################################################
train_dtype = torch.bfloat16 #torch.float32 / torch.bfloat16
base_lora_params = {'lora_type':'default', 'r':32, 'lora_alpha':64, 'dropout':0.05, 'train_dtype':train_dtype}
lora_params = {'self_attn.q_proj': base_lora_params,
'self_attn.k_proj': base_lora_params,
'self_attn.v_proj': base_lora_params,
'self_attn.o_proj': base_lora_params,
'mlp.gate_proj' : None,
'mlp.up_proj' : None,
'mlp.down_proj' : None}
#Apply LoRA
| #Settings
######################################################################################
hf_auth = None #HuggingFace token
cache_path = '' #cache directory to store data
#Chose a model
model_id = "meta-llama/Llama-2-7b-hf"
#model_id = "meta-llama/Llama-2-13b-hf"
#model_id = "meta-llama/Llama-2-70b-hf"
#HQQ Quantize
######################################################################################
model = HQQModelForCausalLM.from_pretrained(model_id, use_auth_token=hf_auth, cache_dir=cache_path)
tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=hf_auth, cache_dir=cache_path)
#Quantize the model
quant_config = BaseQuantizeConfig(nbits=4, group_size=64, quant_scale=False, quant_zero=False)
model.quantize_model(quant_config=quant_config)
#Add Peft
######################################################################################
train_dtype = torch.bfloat16 #torch.float32 / torch.bfloat16
base_lora_params = {'lora_type':'default', 'r':32, 'lora_alpha':64, 'dropout':0.05, 'train_dtype':train_dtype}
lora_params = {'self_attn.q_proj': base_lora_params,
'self_attn.k_proj': base_lora_params,
'self_attn.v_proj': base_lora_params,
'self_attn.o_proj': base_lora_params,
'mlp.gate_proj' : None,
'mlp.up_proj' : None,
'mlp.down_proj' : None}
#Apply LoRA | PeftUtils.add_lora(model, lora_params) | 1 | 2023-11-07 20:15:00+00:00 | 2k |
TheFunny/ArisuAutoSweeper | gui.py | [
{
"identifier": "logger",
"path": "module/logger/logger.py",
"snippet": "def empty_function(*args, **kwargs):\n def __init__(self, *args, func: Callable[[ConsoleRenderable], None] = None, **kwargs):\n def emit(self, record: logging.LogRecord) -> None:\n def handle(self, record: logging.LogRecor... | import threading
import argparse
import asyncio
import sys
import uvicorn
from multiprocessing import Event, Process
from module.logger import logger
from module.webui.setting import State
from module.logger.logger import console_hdlr | 750 |
def func(ev: threading.Event):
if sys.platform.startswith("win"):
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
|
def func(ev: threading.Event):
if sys.platform.startswith("win"):
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
| State.restart_event = ev | 1 | 2023-11-01 07:09:45+00:00 | 2k |
liuzhao1225/YouDub | youdub/tts_paddle.py | [
{
"identifier": "save_wav",
"path": "youdub/utils.py",
"snippet": "def save_wav(wav: np.ndarray, path: str, sample_rate: int = 24000) -> None:\n \"\"\"Save float waveform to a file using Scipy.\n\n Args:\n wav (np.ndarray): Waveform with float values in range [-1, 1] to save.\n path ... | import os, sys
import numpy as np
import json
import logging
from paddlespeech.cli.tts import TTSExecutor
from youdub.utils import save_wav, adjust_audio_length | 758 |
sys.path.append(os.getcwd())
class TTS_Clone:
def __init__(self, model_path="fastspeech2_male", voc='pwgan_male',device='gpu:0', language='mix'):
logging.info(f'Loading TTS model {model_path}...')
self.am = model_path
self.voc = voc
self.tts = TTSExecutor()
self.language = language
logging.info('Model TTS loaded.')
def inference(self, text, output) -> np.ndarray:
self.tts(
text=text,
am=self.am,
voc=self.voc,
lang=self.language,
output=output,
use_onnx=True)
print(f'{output}: {text}')
return self.tts._outputs['wav']
def process_folder(folder, tts: TTS_Clone):
logging.info(f'TTS processing folder {folder}...')
with open(os.path.join(folder, 'zh.json'), 'r', encoding='utf-8') as f:
transcript = json.load(f)
full_wav = []
if not os.path.exists(os.path.join(folder, 'temp')):
os.makedirs(os.path.join(folder, 'temp'))
previous_end = 0
for i, line in enumerate(transcript):
text = line['text']
start = line['start']
end = line['end']
wav = tts.inference(text, os.path.join(folder, 'temp', f'zh_{i}.wav'))
|
sys.path.append(os.getcwd())
class TTS_Clone:
def __init__(self, model_path="fastspeech2_male", voc='pwgan_male',device='gpu:0', language='mix'):
logging.info(f'Loading TTS model {model_path}...')
self.am = model_path
self.voc = voc
self.tts = TTSExecutor()
self.language = language
logging.info('Model TTS loaded.')
def inference(self, text, output) -> np.ndarray:
self.tts(
text=text,
am=self.am,
voc=self.voc,
lang=self.language,
output=output,
use_onnx=True)
print(f'{output}: {text}')
return self.tts._outputs['wav']
def process_folder(folder, tts: TTS_Clone):
logging.info(f'TTS processing folder {folder}...')
with open(os.path.join(folder, 'zh.json'), 'r', encoding='utf-8') as f:
transcript = json.load(f)
full_wav = []
if not os.path.exists(os.path.join(folder, 'temp')):
os.makedirs(os.path.join(folder, 'temp'))
previous_end = 0
for i, line in enumerate(transcript):
text = line['text']
start = line['start']
end = line['end']
wav = tts.inference(text, os.path.join(folder, 'temp', f'zh_{i}.wav')) | wav_adjusted = adjust_audio_length(wav, os.path.join(folder, 'temp', f'zh_{i}.wav'), os.path.join( | 1 | 2023-11-02 08:21:31+00:00 | 2k |
dtiesling/flask-muck | tests/test.py | [
{
"identifier": "GuardianModel",
"path": "tests/app.py",
"snippet": "class GuardianModel(db.Model):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String, nullable=False, unique=True)\n age = db.Column(db.Integer, nullable=True)\n family_id = db.Colu... | import json
import pytest
from unittest.mock import patch
from pydantic import BaseModel, ConfigDict
from flask_muck.exceptions import MuckImplementationError
from flask_muck.utils import (
get_url_rule,
get_fk_column,
get_query_filters_from_request_path,
get_join_models_from_parent_views,
)
from tests.app import (
GuardianModel,
ToyApiView,
ChildModel,
ToyModel,
BaseApiView,
PreCallback,
PostCallback,
GuardianApiView,
) | 1,082 |
class TestBasicCrud:
def test_create(self, post, user):
response = post("/guardians/", json={"name": "Jill"})
parent = GuardianModel.query.one()
assert response == {"name": parent.name}
# Verify integrity errors are handled.
post("/guardians/", json={"name": "Jill"}, expected_status_code=409)
def test_read(self, get, user, guardian, child):
assert get(f"/guardians/") == [{"name": guardian.name}]
assert get(f"/guardians/{guardian.id}/") == {
"name": "Samantha",
"children": [{"name": "Tamara"}],
}
def test_update(self, put, patch, guardian):
assert put(f"/guardians/{guardian.id}/", json={"name": "updated"}) == {
"name": "updated"
}
assert patch(f"/guardians/{guardian.id}/", json={"name": "patched"}) == {
"name": "patched"
}
def test_delete(self, client, guardian):
client.delete(f"/guardians/{guardian.id}/")
assert GuardianModel.query.count() == 0
class TestAllowedMethods:
def test_get_only(self, client, monkeypatch):
|
class TestBasicCrud:
def test_create(self, post, user):
response = post("/guardians/", json={"name": "Jill"})
parent = GuardianModel.query.one()
assert response == {"name": parent.name}
# Verify integrity errors are handled.
post("/guardians/", json={"name": "Jill"}, expected_status_code=409)
def test_read(self, get, user, guardian, child):
assert get(f"/guardians/") == [{"name": guardian.name}]
assert get(f"/guardians/{guardian.id}/") == {
"name": "Samantha",
"children": [{"name": "Tamara"}],
}
def test_update(self, put, patch, guardian):
assert put(f"/guardians/{guardian.id}/", json={"name": "updated"}) == {
"name": "updated"
}
assert patch(f"/guardians/{guardian.id}/", json={"name": "patched"}) == {
"name": "patched"
}
def test_delete(self, client, guardian):
client.delete(f"/guardians/{guardian.id}/")
assert GuardianModel.query.count() == 0
class TestAllowedMethods:
def test_get_only(self, client, monkeypatch): | monkeypatch.setattr(BaseApiView, "allowed_methods", {"GET"}) | 4 | 2023-11-07 03:44:49+00:00 | 2k |
BrianPugh/cyclopts | cyclopts/parameter.py | [
{
"identifier": "AnnotatedType",
"path": "cyclopts/_convert.py",
"snippet": "def _bool(s: str) -> bool:\ndef _int(s: str) -> int:\ndef _bytes(s: str) -> bytes:\ndef _bytearray(s: str) -> bytearray:\ndef _convert(type_, element, converter=None):\ndef get_origin_and_validate(type_: Type):\ndef resolve(typ... | import inspect
import attrs
from typing import Any, Callable, Optional, Tuple, Type, Union, cast, get_args, get_origin
from attrs import field, frozen
from cyclopts._convert import (
AnnotatedType,
convert,
get_origin_and_validate,
optional_to_tuple_converter,
resolve,
resolve_optional,
to_tuple_converter,
)
from cyclopts.group import Group
from cyclopts.utils import record_init | 1,282 |
def _double_hyphen_validator(instance, attribute, values):
if not values:
return
for value in values:
if value is not None and not value.startswith("--"):
raise ValueError(f'{attribute.alias} value must start with "--".')
def _negative_converter(default: Tuple[str, ...]):
def converter(value) -> Tuple[str, ...]:
if value is None:
return default
else:
return to_tuple_converter(value)
return converter
@record_init("_provided_args")
@frozen
class Parameter:
"""Cyclopts configuration for individual function parameters."""
# All documentation has been moved to ``docs/api.rst`` for greater control with attrs.
name: Tuple[str, ...] = field(
default=None,
converter=lambda x: cast(Tuple[str, ...], to_tuple_converter(x)),
)
|
def _double_hyphen_validator(instance, attribute, values):
if not values:
return
for value in values:
if value is not None and not value.startswith("--"):
raise ValueError(f'{attribute.alias} value must start with "--".')
def _negative_converter(default: Tuple[str, ...]):
def converter(value) -> Tuple[str, ...]:
if value is None:
return default
else:
return to_tuple_converter(value)
return converter
@record_init("_provided_args")
@frozen
class Parameter:
"""Cyclopts configuration for individual function parameters."""
# All documentation has been moved to ``docs/api.rst`` for greater control with attrs.
name: Tuple[str, ...] = field(
default=None,
converter=lambda x: cast(Tuple[str, ...], to_tuple_converter(x)),
)
| converter: Callable = field(default=None, converter=attrs.converters.default_if_none(convert)) | 0 | 2023-11-03 02:24:25+00:00 | 2k |
RoboFlamingo/RoboFlamingo | open_flamingo/open_flamingo/src/flamingo_lm.py | [
{
"identifier": "GatedCrossAttentionBlock",
"path": "open_flamingo/open_flamingo/src/helpers.py",
"snippet": "class GatedCrossAttentionBlock(nn.Module):\n def __init__(\n self,\n *,\n dim,\n dim_visual,\n dim_head=64,\n heads=8,\n ff_mult=4,\n o... | import torch.nn as nn
import copy
from .helpers import GatedCrossAttentionBlock
from .utils import getattr_recursive, setattr_recursive | 1,188 |
class FlamingoLayer(nn.Module):
"""
FlamingoLayer is a wrapper around the GatedCrossAttentionBlock and DecoderLayer.
"""
def __init__(
self, gated_cross_attn_layer, decoder_layer, gradient_checkpointing=False, residual=False
):
super().__init__()
self.gated_cross_attn_layer = gated_cross_attn_layer
self.decoder_layer = decoder_layer
self.vis_x = None
self.media_locations = None
self.residual = residual
if self.gated_cross_attn_layer is not None:
self.gated_cross_attn_layer._use_gradient_checkpointing = (
gradient_checkpointing
)
self.decoder_layer._use_gradient_checkpointing = gradient_checkpointing
def clone_parameters(self):
self.res_layer = copy.deepcopy(self.gated_cross_attn_layer)
if self.res_layer is not None:
self.res_layer.requires_grad_(False)
def is_conditioned(self) -> bool:
"""Check whether the layer is conditioned."""
return self.vis_x is not None and self.media_locations is not None
# Used this great idea from this implementation of Flamingo (https://github.com/dhansmair/flamingo-mini/)
def condition_vis_x(self, vis_x):
self.vis_x = vis_x
def condition_media_locations(self, media_locations):
self.media_locations = media_locations
def condition_use_cached_media(self, use_cached_media):
self.use_cached_media = use_cached_media
def forward(
self,
lang_x,
attention_mask=None,
**decoder_layer_kwargs,
):
# Cross attention
if self.gated_cross_attn_layer is not None:
if self.vis_x is None:
raise ValueError("vis_x must be conditioned before forward pass")
if self.media_locations is None:
raise ValueError(
"media_locations must be conditioned before forward pass"
)
lang_x = self.gated_cross_attn_layer(
lang_x,
self.vis_x,
media_locations=self.media_locations,
use_cached_media=self.use_cached_media,
)
# Residual
if self.residual and self.res_layer is not None:
lang_x_res = self.res_layer(
lang_x,
self.vis_x,
media_locations=self.media_locations,
attend_previous=self.attend_previous,
)
lang_x = (lang_x + lang_x_res) / 2.0
# Normal decoder layer
lang_x = self.decoder_layer(
lang_x, attention_mask=attention_mask, **decoder_layer_kwargs
)
return lang_x
class FlamingoLMMixin(nn.Module):
"""
Mixin to add cross-attention layers to a language model.
"""
def set_decoder_layers_attr_name(self, decoder_layers_attr_name):
self.decoder_layers_attr_name = decoder_layers_attr_name
def _get_decoder_layers(self):
|
class FlamingoLayer(nn.Module):
"""
FlamingoLayer is a wrapper around the GatedCrossAttentionBlock and DecoderLayer.
"""
def __init__(
self, gated_cross_attn_layer, decoder_layer, gradient_checkpointing=False, residual=False
):
super().__init__()
self.gated_cross_attn_layer = gated_cross_attn_layer
self.decoder_layer = decoder_layer
self.vis_x = None
self.media_locations = None
self.residual = residual
if self.gated_cross_attn_layer is not None:
self.gated_cross_attn_layer._use_gradient_checkpointing = (
gradient_checkpointing
)
self.decoder_layer._use_gradient_checkpointing = gradient_checkpointing
def clone_parameters(self):
self.res_layer = copy.deepcopy(self.gated_cross_attn_layer)
if self.res_layer is not None:
self.res_layer.requires_grad_(False)
def is_conditioned(self) -> bool:
"""Check whether the layer is conditioned."""
return self.vis_x is not None and self.media_locations is not None
# Used this great idea from this implementation of Flamingo (https://github.com/dhansmair/flamingo-mini/)
def condition_vis_x(self, vis_x):
self.vis_x = vis_x
def condition_media_locations(self, media_locations):
self.media_locations = media_locations
def condition_use_cached_media(self, use_cached_media):
self.use_cached_media = use_cached_media
def forward(
self,
lang_x,
attention_mask=None,
**decoder_layer_kwargs,
):
# Cross attention
if self.gated_cross_attn_layer is not None:
if self.vis_x is None:
raise ValueError("vis_x must be conditioned before forward pass")
if self.media_locations is None:
raise ValueError(
"media_locations must be conditioned before forward pass"
)
lang_x = self.gated_cross_attn_layer(
lang_x,
self.vis_x,
media_locations=self.media_locations,
use_cached_media=self.use_cached_media,
)
# Residual
if self.residual and self.res_layer is not None:
lang_x_res = self.res_layer(
lang_x,
self.vis_x,
media_locations=self.media_locations,
attend_previous=self.attend_previous,
)
lang_x = (lang_x + lang_x_res) / 2.0
# Normal decoder layer
lang_x = self.decoder_layer(
lang_x, attention_mask=attention_mask, **decoder_layer_kwargs
)
return lang_x
class FlamingoLMMixin(nn.Module):
"""
Mixin to add cross-attention layers to a language model.
"""
def set_decoder_layers_attr_name(self, decoder_layers_attr_name):
self.decoder_layers_attr_name = decoder_layers_attr_name
def _get_decoder_layers(self): | return getattr_recursive(self, self.decoder_layers_attr_name) | 1 | 2023-11-02 01:36:23+00:00 | 2k |
XinyuanLiao/ComplexNN | complexNN/nn.py | [
{
"identifier": "complexRelu",
"path": "complexNN/functional.py",
"snippet": "def complexRelu(inp):\n return torch.complex(relu(inp.real), relu(inp.imag))"
},
{
"identifier": "complexGelu",
"path": "complexNN/functional.py",
"snippet": "def complexGelu(inp):\n return torch.complex(... | import numpy as np
import torch
import torch.nn as nn
from complexNN.functional import complexRelu, complexGelu, complexTanh, complexSigmoid, complexMaxPool2d, \
complexAvgPool2d, complexAvgPool1d, complexDropout, complexDropout2d, complexElu, complexLeakyRelu, complexSoftmax | 1,087 |
class cRelu(nn.Module):
@staticmethod
def forward(inp):
return complexRelu(inp)
class cElu(nn.Module):
@staticmethod
def forward(inp):
return complexElu(inp)
class cLeakyRelu(nn.Module):
@staticmethod
def forward(inp):
return complexLeakyRelu(inp)
class cSoftmax(nn.Module):
@staticmethod
def forward(inp):
return complexSoftmax(inp)
class cGelu(nn.Module):
@staticmethod
def forward(inp):
return complexGelu(inp)
class cTanh(nn.Module):
@staticmethod
def forward(inp):
return complexTanh(inp)
class cSigmoid(nn.Module):
@staticmethod
def forward(inp):
|
class cRelu(nn.Module):
@staticmethod
def forward(inp):
return complexRelu(inp)
class cElu(nn.Module):
@staticmethod
def forward(inp):
return complexElu(inp)
class cLeakyRelu(nn.Module):
@staticmethod
def forward(inp):
return complexLeakyRelu(inp)
class cSoftmax(nn.Module):
@staticmethod
def forward(inp):
return complexSoftmax(inp)
class cGelu(nn.Module):
@staticmethod
def forward(inp):
return complexGelu(inp)
class cTanh(nn.Module):
@staticmethod
def forward(inp):
return complexTanh(inp)
class cSigmoid(nn.Module):
@staticmethod
def forward(inp): | return complexSigmoid(inp) | 3 | 2023-11-02 04:52:23+00:00 | 2k |
sanmusen214/BAAH | modules/configs/MyConfig.py | [
{
"identifier": "defaultUserDict",
"path": "modules/configs/defaultSettings.py",
"snippet": ""
},
{
"identifier": "configname2screenshotname",
"path": "modules/configs/settingMaps.py",
"snippet": "def configname2screenshotname(configfilename):\n \"\"\"\n 根据config文件名,返回截图文件名\n co... | import json
import logging
import os
import time
from modules.configs.defaultSettings import defaultUserDict, defaultSoftwareDict
from modules.configs.settingMaps import configname2screenshotname | 702 |
# 程序入口应当先import这个类,然后调用parse_user_config方法解析该config实例
# 然后程序入口再import其他模块,在其他模块中import这个类,就可以直接使用这个类的实例了
class MyConfigger:
"""
维护config字典,包含软件config,用户任务config,语言包
"""
NOWVERSION="1.2.0"
USER_CONFIG_FOLDER="./BAAH_CONFIGS"
SOFTWARE_CONFIG_FOLDER="./DATA/CONFIGS"
LANGUAGE_PACKAGE_FOLDER="./DATA/i18n"
SOFTWARE_CONFIG_NAME="software_config.json"
# 读取config这个py里面的配置
def __init__(self):
self.current_dir = os.getcwd()
# 软件的config
self.softwareconfigdict = {}
# 软件的语言包
self.languagepackagedict = {}
# 一次区服任务的config
self.userconfigdict = {}
# 一次区服任务运行的session
self.sessiondict = {}
# 读取软件的config
self.parse_software_config(self.SOFTWARE_CONFIG_NAME)
def parse_user_config(self, file_name):
"""
读取config文件并解析
同时会清空sessiondict
"""
file_path = os.path.join(self.current_dir, self.USER_CONFIG_FOLDER, file_name)
# 字典新值
self.userconfigdict = self._read_config_file(file_path)
# 清空sessiondict
self.sessiondict = {}
# 检查缺失的配置
self._check_user_config()
# 强制设置截图文件名为配置名
self.userconfigdict["SCREENSHOT_NAME"] = configname2screenshotname(file_name)
# 检查截图文件夹路径里是否有DATA, 如果没有DATA,说明是1.1.x版本的配置,需要转换
if "DATA" not in self.userconfigdict["PIC_PATH"]:
|
# 程序入口应当先import这个类,然后调用parse_user_config方法解析该config实例
# 然后程序入口再import其他模块,在其他模块中import这个类,就可以直接使用这个类的实例了
class MyConfigger:
"""
维护config字典,包含软件config,用户任务config,语言包
"""
NOWVERSION="1.2.0"
USER_CONFIG_FOLDER="./BAAH_CONFIGS"
SOFTWARE_CONFIG_FOLDER="./DATA/CONFIGS"
LANGUAGE_PACKAGE_FOLDER="./DATA/i18n"
SOFTWARE_CONFIG_NAME="software_config.json"
# 读取config这个py里面的配置
def __init__(self):
self.current_dir = os.getcwd()
# 软件的config
self.softwareconfigdict = {}
# 软件的语言包
self.languagepackagedict = {}
# 一次区服任务的config
self.userconfigdict = {}
# 一次区服任务运行的session
self.sessiondict = {}
# 读取软件的config
self.parse_software_config(self.SOFTWARE_CONFIG_NAME)
def parse_user_config(self, file_name):
"""
读取config文件并解析
同时会清空sessiondict
"""
file_path = os.path.join(self.current_dir, self.USER_CONFIG_FOLDER, file_name)
# 字典新值
self.userconfigdict = self._read_config_file(file_path)
# 清空sessiondict
self.sessiondict = {}
# 检查缺失的配置
self._check_user_config()
# 强制设置截图文件名为配置名
self.userconfigdict["SCREENSHOT_NAME"] = configname2screenshotname(file_name)
# 检查截图文件夹路径里是否有DATA, 如果没有DATA,说明是1.1.x版本的配置,需要转换
if "DATA" not in self.userconfigdict["PIC_PATH"]: | fromkey = defaultUserDict["PIC_PATH"]["m"]["from"] | 0 | 2023-11-09 22:28:39+00:00 | 2k |
lucidrains/gateloop-transformer | gateloop_transformer/simplified_gate_loop.py | [
{
"identifier": "RMSNorm",
"path": "gateloop_transformer/gateloop_transformer.py",
"snippet": "class RMSNorm(Module):\n def __init__(self, dim):\n super().__init__()\n self.scale = dim ** 0.5\n self.gamma = nn.Parameter(torch.ones(dim))\n\n def forward(self, x):\n retur... | from functools import partial
from torch import nn, Tensor
from torch.nn import Module
from typing import Tuple
from einops import rearrange, pack, unpack
from einops.layers.torch import Rearrange
from gateloop_transformer.gateloop_transformer import RMSNorm
from gateloop_transformer.associative_scan import associative_scan
from jax import jit, numpy as jnp
from jax.lax import associative_scan
from jax2torch import jax2torch
import torch | 1,050 |
# plain pytorch non-fused associative scan
def exists(v):
return v is not None
def abs_clamp_eps(t, eps = 1e-20):
sign = torch.sign(t)
return sign * t.abs().clamp(min = eps)
# associative scan using heinsen sequences
# https://github.com/glassroom/heinsen_sequence
# graciously shared to the world by Franz A. Heinsen in https://arxiv.org/abs/2311.06281 in October 2023
def heinsen_associative_scan(a, kv, eps = 1e-20):
log_a = a.clamp(min = eps).log()
log_kv = abs_clamp_eps(kv, eps = eps).to(dtype = torch.complex64).log()
a_star = torch.cumsum(log_a, dim = 1)
log_x0_plus_b_star = torch.logcumsumexp(log_kv - a_star, dim = 1)
log_x = a_star + log_x0_plus_b_star
return a_star.exp().real, log_x.exp().real
# naive associative scan with some torchscript of binary operator
@torch.jit.script
def binary_operator(
a: Tuple[Tensor, Tensor],
b: Tuple[Tensor, Tensor]
):
a_i, kv_i = a
a_j, kv_j = b
return a_j * a_i, torch.addcmul(kv_j, a_j, kv_i)
# gate loop operator
def gate_loop_operator(q, kv, a, cache = None, heinsen = False):
if exists(cache):
cache_a, cache_kv = cache
a, a_ps = pack([cache_a, a], 'b * d')
kv, kv_ps = pack([cache_kv, kv], 'b * d')
if heinsen:
a, kv = heinsen_associative_scan(a, kv)
else:
|
# plain pytorch non-fused associative scan
def exists(v):
return v is not None
def abs_clamp_eps(t, eps = 1e-20):
sign = torch.sign(t)
return sign * t.abs().clamp(min = eps)
# associative scan using heinsen sequences
# https://github.com/glassroom/heinsen_sequence
# graciously shared to the world by Franz A. Heinsen in https://arxiv.org/abs/2311.06281 in October 2023
def heinsen_associative_scan(a, kv, eps = 1e-20):
log_a = a.clamp(min = eps).log()
log_kv = abs_clamp_eps(kv, eps = eps).to(dtype = torch.complex64).log()
a_star = torch.cumsum(log_a, dim = 1)
log_x0_plus_b_star = torch.logcumsumexp(log_kv - a_star, dim = 1)
log_x = a_star + log_x0_plus_b_star
return a_star.exp().real, log_x.exp().real
# naive associative scan with some torchscript of binary operator
@torch.jit.script
def binary_operator(
a: Tuple[Tensor, Tensor],
b: Tuple[Tensor, Tensor]
):
a_i, kv_i = a
a_j, kv_j = b
return a_j * a_i, torch.addcmul(kv_j, a_j, kv_i)
# gate loop operator
def gate_loop_operator(q, kv, a, cache = None, heinsen = False):
if exists(cache):
cache_a, cache_kv = cache
a, a_ps = pack([cache_a, a], 'b * d')
kv, kv_ps = pack([cache_kv, kv], 'b * d')
if heinsen:
a, kv = heinsen_associative_scan(a, kv)
else: | a, kv = associative_scan(binary_operator, (a, kv)) | 1 | 2023-11-06 21:56:40+00:00 | 2k |
QingruZhang/PASTA | evaluation/data.py | [
{
"identifier": "env_utils",
"path": "evaluation/utils/env_utils.py",
"snippet": "ENV_DATA_DIR = \"CM_DATA_DIR\"\nENV_MODELS_DIR = \"CM_MODELS_DIR\"\nENV_RESULTS_DIR = \"CM_RESULTS_DIR\"\nDEFAULT_DATA_DIR = \"data\"\nDEFAULT_MODELS_DIR = \"models\"\nDEFAULT_RESULTS_DIR = \"results\"\ndef maybe_relative_... | import argparse
import csv
import json
import logging
import pickle
import random
import datasets
import numpy
import scipy.sparse
import spacy
import wget
from collections import defaultdict
from functools import cache
from itertools import chain
from pathlib import Path
from typing import Any, Sequence, TypedDict, cast
from evaluation.utils import env_utils, lang_utils
from evaluation.utils.typing import Dataset, PathLike, StrSequence
from sklearn.feature_extraction.text import TfidfVectorizer
from tqdm.auto import tqdm | 1,035 | """Datasets for evaluating context mediation in LMs."""
logger = logging.getLogger(__name__)
SUPPORTED_DATASETS = ("counterfact", "winoventi", "biosbias", "mcrae")
ROME_BASE_URL = "https://rome.baulab.info/data/dsets"
COUNTERFACT_URL = f"{ROME_BASE_URL}/counterfact.json"
ATTRIBUTE_SNIPPETS_URL = f"{ROME_BASE_URL}/attribute_snippets.json"
TFIDF_IDF_URL = f"{ROME_BASE_URL}/idf.npy"
TFIDF_VOCAB_URL = f"{ROME_BASE_URL}/tfidf_vocab.json"
WINOVENTI_URL = "https://raw.githubusercontent.com/commonsense-exception/commonsense-exception/main/data/winoventi_bert_large_final.tsv"
_MCRAE_BLACKLISTED_FEATURE_PREFIXES = ("bought/sold", "eg -", "killed", "king of")
_MCRAE_SPLITTABLE_FEATURE_PREFIXES = (
"associated with",
"an",
"a",
"becomes a",
"causes",
"comes from",
"comes in",
"comes on",
"different",
"found at",
"found below",
"found by",
"found in",
"found on",
"found over",
"found near",
"has an",
"has a",
"has",
"is an",
"is attached to",
"is a",
"is",
"like a",
"made by",
"made of",
"made with",
"made from",
"owned by",
"part of a",
"part of",
"requires a",
"requires",
"used as",
"used at",
"used by",
"used for",
"used in",
"used on",
"used with",
"uses",
)
_BIOS_BIAS_BLACKLISTED_NAMES = frozenset(
{
"Non-Residential",
}
)
# These prefixes do not make as much sense when put in front of the first name, so
# we'll try to remove them as much as possible.
_BIOS_BIAS_PREFIXES = (
"professor",
"prof.",
"prof",
"dr.",
"dr",
"doctor",
"mr.",
"mr",
"ms.",
"ms",
"mrs.",
"mrs",
"rev.",
"rev",
"pastor",
)
_COUNTERFACT_PARAPHRASE_PROMPT_ARTIFACTS = (" (b. ", "(tr. ", "(min. ")
class ContextMediationSample(TypedDict):
"""Single sample that can be used for context mediation analysis."""
id: str # Identifier
entity: str # "Barack Obama"
attribute: str # "invented the iPhone"
context: str # "Everyone knows that Barack Obama invented the iPhone."
prompt: str # "Barack Obama received a degree in"
target_mediated: str | None # "computer science" or not set for generation
target_unmediated: str | None # "law" or not set for generation
source: dict | None # Where this sample was derived from, e.g. counterfact sample.
class ContextMediationBatch(TypedDict):
"""Batch of context mediation samples."""
| """Datasets for evaluating context mediation in LMs."""
logger = logging.getLogger(__name__)
SUPPORTED_DATASETS = ("counterfact", "winoventi", "biosbias", "mcrae")
ROME_BASE_URL = "https://rome.baulab.info/data/dsets"
COUNTERFACT_URL = f"{ROME_BASE_URL}/counterfact.json"
ATTRIBUTE_SNIPPETS_URL = f"{ROME_BASE_URL}/attribute_snippets.json"
TFIDF_IDF_URL = f"{ROME_BASE_URL}/idf.npy"
TFIDF_VOCAB_URL = f"{ROME_BASE_URL}/tfidf_vocab.json"
WINOVENTI_URL = "https://raw.githubusercontent.com/commonsense-exception/commonsense-exception/main/data/winoventi_bert_large_final.tsv"
_MCRAE_BLACKLISTED_FEATURE_PREFIXES = ("bought/sold", "eg -", "killed", "king of")
_MCRAE_SPLITTABLE_FEATURE_PREFIXES = (
"associated with",
"an",
"a",
"becomes a",
"causes",
"comes from",
"comes in",
"comes on",
"different",
"found at",
"found below",
"found by",
"found in",
"found on",
"found over",
"found near",
"has an",
"has a",
"has",
"is an",
"is attached to",
"is a",
"is",
"like a",
"made by",
"made of",
"made with",
"made from",
"owned by",
"part of a",
"part of",
"requires a",
"requires",
"used as",
"used at",
"used by",
"used for",
"used in",
"used on",
"used with",
"uses",
)
_BIOS_BIAS_BLACKLISTED_NAMES = frozenset(
{
"Non-Residential",
}
)
# These prefixes do not make as much sense when put in front of the first name, so
# we'll try to remove them as much as possible.
_BIOS_BIAS_PREFIXES = (
"professor",
"prof.",
"prof",
"dr.",
"dr",
"doctor",
"mr.",
"mr",
"ms.",
"ms",
"mrs.",
"mrs",
"rev.",
"rev",
"pastor",
)
_COUNTERFACT_PARAPHRASE_PROMPT_ARTIFACTS = (" (b. ", "(tr. ", "(min. ")
class ContextMediationSample(TypedDict):
"""Single sample that can be used for context mediation analysis."""
id: str # Identifier
entity: str # "Barack Obama"
attribute: str # "invented the iPhone"
context: str # "Everyone knows that Barack Obama invented the iPhone."
prompt: str # "Barack Obama received a degree in"
target_mediated: str | None # "computer science" or not set for generation
target_unmediated: str | None # "law" or not set for generation
source: dict | None # Where this sample was derived from, e.g. counterfact sample.
class ContextMediationBatch(TypedDict):
"""Batch of context mediation samples."""
| id: StrSequence | 2 | 2023-11-06 05:36:05+00:00 | 2k |
Ljzd-PRO/KToolBox | ktoolbox/api/base.py | [
{
"identifier": "config",
"path": "ktoolbox/configuration.py",
"snippet": "class APIConfiguration(BaseModel):\nclass DownloaderConfiguration(BaseModel):\nclass PostStructureConfiguration(BaseModel):\nclass JobConfiguration(BaseModel):\nclass LoggerConfiguration(BaseModel):\nclass Configuration(BaseSetti... | from abc import ABC, abstractmethod
from typing import Literal, Generic, TypeVar, Optional, Callable
from urllib.parse import urlunparse
from loguru import logger
from pydantic import BaseModel, ValidationError, RootModel
from tenacity import RetryCallState, wait_fixed, retry_if_result
from tenacity.stop import stop_base, stop_never, stop_after_attempt
from ktoolbox.configuration import config
from ktoolbox.enum import RetCodeEnum
from ktoolbox.utils import BaseRet, generate_msg
import httpx
import tenacity | 768 |
__all__ = ["APITenacityStop", "APIRet", "BaseAPI"]
_T = TypeVar('_T')
class APITenacityStop(stop_base):
"""APIs Stop strategies"""
def __call__(self, retry_state: RetryCallState) -> bool:
if config.api.retry_times is None:
return stop_never(retry_state)
else:
return stop_after_attempt(config.api.retry_times)(retry_state)
def _retry_error_callback(state: RetryCallState) -> "APIRet":
"""
Call after all reties failed
:return Keep the origin return value
"""
# noinspection SpellCheckingInspection
logger.error(
generate_msg(
f"Kemono API call failed",
ret=state.outcome.result(),
)
)
return state.outcome.result()
def _retry(*args, **kwargs):
"""Wrap an API method with a new ``Retrying`` object"""
wrapper = tenacity.retry(
stop=APITenacityStop(),
wait=wait_fixed(config.api.retry_interval),
retry=retry_if_result(lambda x: not bool(x)),
reraise=True,
retry_error_callback=_retry_error_callback,
**kwargs
)
if len(args) == 1 and callable(args[0]):
return wrapper(args[0])
else:
return wrapper
|
__all__ = ["APITenacityStop", "APIRet", "BaseAPI"]
_T = TypeVar('_T')
class APITenacityStop(stop_base):
"""APIs Stop strategies"""
def __call__(self, retry_state: RetryCallState) -> bool:
if config.api.retry_times is None:
return stop_never(retry_state)
else:
return stop_after_attempt(config.api.retry_times)(retry_state)
def _retry_error_callback(state: RetryCallState) -> "APIRet":
"""
Call after all reties failed
:return Keep the origin return value
"""
# noinspection SpellCheckingInspection
logger.error(
generate_msg(
f"Kemono API call failed",
ret=state.outcome.result(),
)
)
return state.outcome.result()
def _retry(*args, **kwargs):
"""Wrap an API method with a new ``Retrying`` object"""
wrapper = tenacity.retry(
stop=APITenacityStop(),
wait=wait_fixed(config.api.retry_interval),
retry=retry_if_result(lambda x: not bool(x)),
reraise=True,
retry_error_callback=_retry_error_callback,
**kwargs
)
if len(args) == 1 and callable(args[0]):
return wrapper(args[0])
else:
return wrapper
| class APIRet(BaseRet[_T]): | 2 | 2023-11-06 15:24:12+00:00 | 2k |
jpjacobpadilla/Google-Colab-Selenium | google_colab_selenium/chromedriver.py | [
{
"identifier": "ColabSeleniumManager",
"path": "google_colab_selenium/colab_selenium_manager.py",
"snippet": "class ColabSeleniumManager:\n default_colab_options = [\n '--headless',\n '--no-sandbox',\n '--disable-dev-shm-usage',\n '--lang=en'\n ]\n\n _downloaded_chr... | from google_colab_selenium.colab_selenium_manager import ColabSeleniumManager
from google_colab_selenium.spinner import Spinner
from google_colab_selenium.exceptions import StartingChromeDriverError
from selenium.webdriver.chrome.options import Options
from selenium import webdriver | 1,448 |
class ChromeDriver(webdriver.Chrome):
"""
A thin wrapper around the Selenium Chrome Webdriver which makes it easy
to use in Google Colab Notebooks.
The ColabSeleniumManager class installs Google-Chrome-Stable and adds the
nessasary headers to use in a Colab Notebook.
The headers that are automatically added are:
--headless
--no-sandbox
--disable-dev-shm-usage
--lang=en
"""
def __init__(self, options: Options = None, keep_alive: bool = True):
self.manager = ColabSeleniumManager(options)
try:
|
class ChromeDriver(webdriver.Chrome):
"""
A thin wrapper around the Selenium Chrome Webdriver which makes it easy
to use in Google Colab Notebooks.
The ColabSeleniumManager class installs Google-Chrome-Stable and adds the
nessasary headers to use in a Colab Notebook.
The headers that are automatically added are:
--headless
--no-sandbox
--disable-dev-shm-usage
--lang=en
"""
def __init__(self, options: Options = None, keep_alive: bool = True):
self.manager = ColabSeleniumManager(options)
try: | with Spinner('Initializing Chromedriver', done='Initialized Chromedriver'): | 1 | 2023-11-06 21:18:41+00:00 | 2k |
microsoft/monitors4codegen | tests/monitor_guided_decoding/test_numargs_monitor_java.py | [
{
"identifier": "create_test_context",
"path": "tests/test_utils.py",
"snippet": "@contextlib.contextmanager\ndef create_test_context(params: dict) -> Iterator[MultilspyContext]:\n \"\"\"\n Creates a test context for the given parameters.\n \"\"\"\n config = MultilspyConfig.from_dict(params)... | import torch
import transformers
import pytest
from pathlib import PurePath
from monitors4codegen.multilspy.language_server import SyncLanguageServer
from monitors4codegen.multilspy.multilspy_config import Language
from tests.test_utils import create_test_context, is_cuda_available
from transformers import AutoTokenizer, AutoModelForCausalLM
from monitors4codegen.multilspy.multilspy_utils import TextUtils
from monitors4codegen.monitor_guided_decoding.monitors.numargs_monitor import NumMethodArgumentsMonitor
from monitors4codegen.monitor_guided_decoding.monitor import MonitorFileBuffer
from monitors4codegen.monitor_guided_decoding.hf_gen import MGDLogitsProcessor
from transformers.generation.utils import LogitsProcessorList
from monitors4codegen.multilspy.multilspy_types import Position
from monitors4codegen.monitor_guided_decoding.tokenizer_wrapper import HFTokenizerWrapper | 792 | """
This file contains tests for Monitor-Guided Decoding for correct number of arguments in Java
"""
pytest_plugins = ("pytest_asyncio",)
@pytest.mark.asyncio
async def test_multilspy_java_clickhouse_highlevel_sinker_modified_numargs():
"""
Test the working of numargs_monitor with Java repository - clickhouse-highlevel-sinker modified
"""
code_language = Language.JAVA
params = {
"code_language": code_language,
"repo_url": "https://github.com/LakshyAAAgrawal/clickhouse-highlevel-sinker/",
"repo_commit": "5775fd7a67e7b60998e1614cf44a8a1fc3190ab0"
}
device = torch.device('cuda' if is_cuda_available() else 'cpu')
model: transformers.modeling_utils.PreTrainedModel = AutoModelForCausalLM.from_pretrained(
"bigcode/santacoder", trust_remote_code=True
).to(device)
tokenizer = AutoTokenizer.from_pretrained("bigcode/santacoder")
| """
This file contains tests for Monitor-Guided Decoding for correct number of arguments in Java
"""
pytest_plugins = ("pytest_asyncio",)
@pytest.mark.asyncio
async def test_multilspy_java_clickhouse_highlevel_sinker_modified_numargs():
"""
Test the working of numargs_monitor with Java repository - clickhouse-highlevel-sinker modified
"""
code_language = Language.JAVA
params = {
"code_language": code_language,
"repo_url": "https://github.com/LakshyAAAgrawal/clickhouse-highlevel-sinker/",
"repo_commit": "5775fd7a67e7b60998e1614cf44a8a1fc3190ab0"
}
device = torch.device('cuda' if is_cuda_available() else 'cpu')
model: transformers.modeling_utils.PreTrainedModel = AutoModelForCausalLM.from_pretrained(
"bigcode/santacoder", trust_remote_code=True
).to(device)
tokenizer = AutoTokenizer.from_pretrained("bigcode/santacoder")
| with create_test_context(params) as context: | 0 | 2023-11-04 21:49:04+00:00 | 2k |
bigai-nlco/langsuite | langsuite/envs/teach/libs/teach/dataset/episode.py | [
{
"identifier": "Initialization",
"path": "langsuite/envs/teach/libs/teach/dataset/initialization.py",
"snippet": "class Initialization:\n def __init__(\n self, time_start, agents=None, objects=None, custom_object_metadata=None\n ):\n self.time_start = time_start\n self.agents... | from collections import OrderedDict
from langsuite.envs.teach.libs.teach.dataset.initialization import Initialization
from langsuite.envs.teach.libs.teach.dataset.interaction import Interaction | 1,457 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
from __future__ import annotations
class Episode:
def __init__(
self,
episode_id,
world,
world_type,
commander_embodied,
initial_state=None,
interactions=None,
):
self.episode_id = episode_id
self.world = world
self.world_type = world_type
self.commander_embodied = commander_embodied
self.initial_state = initial_state
self.interactions = interactions if interactions is not None else []
self.final_state = None
def reset_initial_state(self, initialization):
self.initialization = initialization
def add_interaction(self, interaction):
self.interactions.append(interaction)
def remove_interaction(self):
if len(self.interactions) > 0:
del self.interactions[-1]
def to_dict(self):
_dict = OrderedDict()
_dict["episode_id"] = self.episode_id
_dict["world"] = self.world
_dict["world_type"] = self.world_type
_dict["commander_embodied"] = str(self.commander_embodied)
if self.initial_state is not None:
_dict["initial_state"] = self.initial_state.to_dict()
_dict["interactions"] = [x.to_dict() for x in self.interactions]
if self.final_state is not None:
_dict["final_state"] = self.final_state.to_dict()
return _dict
@classmethod
def from_dict(cls, episode_dict, definitions, process_init_state=True) -> "Episode":
interactions = []
for interaction_dict in episode_dict.get("interactions"):
action_type = definitions.map_actions_id2info[
interaction_dict["action_id"]
]["action_type"]
interaction = Interaction.from_dict(interaction_dict, action_type)
interactions.append(interaction)
return cls(
episode_dict["episode_id"],
episode_dict["world"],
episode_dict["world_type"],
episode_dict["commander_embodied"],
| # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
from __future__ import annotations
class Episode:
def __init__(
self,
episode_id,
world,
world_type,
commander_embodied,
initial_state=None,
interactions=None,
):
self.episode_id = episode_id
self.world = world
self.world_type = world_type
self.commander_embodied = commander_embodied
self.initial_state = initial_state
self.interactions = interactions if interactions is not None else []
self.final_state = None
def reset_initial_state(self, initialization):
self.initialization = initialization
def add_interaction(self, interaction):
self.interactions.append(interaction)
def remove_interaction(self):
if len(self.interactions) > 0:
del self.interactions[-1]
def to_dict(self):
_dict = OrderedDict()
_dict["episode_id"] = self.episode_id
_dict["world"] = self.world
_dict["world_type"] = self.world_type
_dict["commander_embodied"] = str(self.commander_embodied)
if self.initial_state is not None:
_dict["initial_state"] = self.initial_state.to_dict()
_dict["interactions"] = [x.to_dict() for x in self.interactions]
if self.final_state is not None:
_dict["final_state"] = self.final_state.to_dict()
return _dict
@classmethod
def from_dict(cls, episode_dict, definitions, process_init_state=True) -> "Episode":
interactions = []
for interaction_dict in episode_dict.get("interactions"):
action_type = definitions.map_actions_id2info[
interaction_dict["action_id"]
]["action_type"]
interaction = Interaction.from_dict(interaction_dict, action_type)
interactions.append(interaction)
return cls(
episode_dict["episode_id"],
episode_dict["world"],
episode_dict["world_type"],
episode_dict["commander_embodied"], | initial_state=Initialization.from_dict(episode_dict["initial_state"]) | 0 | 2023-11-01 01:47:00+00:00 | 2k |
tmlr-group/DeepInception | conversers.py | [
{
"identifier": "FALCON_PATH",
"path": "config.py",
"snippet": "FALCON_PATH = f\"{ROOT_PATH}/falcon-7b-instruct\""
},
{
"identifier": "LLAMA_PATH",
"path": "config.py",
"snippet": "LLAMA_PATH = f\"{ROOT_PATH}/Llama-2-7b-hf\""
},
{
"identifier": "TARGET_TEMP",
"path": "config.... | import torch
import common
from transformers import AutoModelForCausalLM, AutoTokenizer
from config import (FALCON_PATH, LLAMA_PATH, TARGET_TEMP, TARGET_TOP_P,
VICUNA_PATH)
from language_models import GPT, HuggingFace | 1,083 |
def load_attack_and_target_models(args):
targetLM = TargetLM(model_name = args.target_model,
max_n_tokens = args.target_max_n_tokens,
|
def load_attack_and_target_models(args):
targetLM = TargetLM(model_name = args.target_model,
max_n_tokens = args.target_max_n_tokens, | temperature = TARGET_TEMP, # init to 0 | 2 | 2023-11-07 12:47:47+00:00 | 2k |
radekd91/inferno | inferno/datasets/FaceAlignmentTools.py | [
{
"identifier": "bbox2point",
"path": "inferno/datasets/ImageDatasetHelpers.py",
"snippet": "def bbox2point(left, right, top, bottom, type='bbox'):\n ''' bbox from detector and landmarks are different\n '''\n if type == 'kpt68':\n old_size = (right - left + bottom - top) / 2 * 1.1\n ... | import numpy as np
import skvideo
import types
from pathlib import Path
from inferno.datasets.ImageDatasetHelpers import bbox2point, bbpoint_warp | 1,135 |
def align_face(image, landmarks, landmark_type, scale_adjustment, target_size_height, target_size_width=None,):
"""
Returns an image with the face aligned to the center of the image.
:param image: The full resolution image in which to align the face.
:param landmarks: The landmarks of the face in the image (in the original image coordinates).
:param landmark_type: The type of landmarks. Such as 'kpt68' or 'bbox' or 'mediapipe'.
:param scale_adjustment: The scale adjustment to apply to the image.
:param target_size_height: The height of the output image.
:param target_size_width: The width of the output image. If not provided, it is assumed to be the same as target_size_height.
:return: The aligned face image. The image will be in range [0,1].
"""
# landmarks_for_alignment = "mediapipe"
left = landmarks[:,0].min()
top = landmarks[:,1].min()
right = landmarks[:,0].max()
bottom = landmarks[:,1].max()
old_size, center = bbox2point(left, right, top, bottom, type=landmark_type)
size = (old_size * scale_adjustment).astype(np.int32)
|
def align_face(image, landmarks, landmark_type, scale_adjustment, target_size_height, target_size_width=None,):
"""
Returns an image with the face aligned to the center of the image.
:param image: The full resolution image in which to align the face.
:param landmarks: The landmarks of the face in the image (in the original image coordinates).
:param landmark_type: The type of landmarks. Such as 'kpt68' or 'bbox' or 'mediapipe'.
:param scale_adjustment: The scale adjustment to apply to the image.
:param target_size_height: The height of the output image.
:param target_size_width: The width of the output image. If not provided, it is assumed to be the same as target_size_height.
:return: The aligned face image. The image will be in range [0,1].
"""
# landmarks_for_alignment = "mediapipe"
left = landmarks[:,0].min()
top = landmarks[:,1].min()
right = landmarks[:,0].max()
bottom = landmarks[:,1].max()
old_size, center = bbox2point(left, right, top, bottom, type=landmark_type)
size = (old_size * scale_adjustment).astype(np.int32)
| img_warped, lmk_warped = bbpoint_warp(image, center, size, target_size_height, target_size_width, landmarks=landmarks) | 1 | 2023-11-07 20:13:32+00:00 | 2k |
hxz393/ConfigCenterComparer | module/get_query_sql.py | [
{
"identifier": "SQL_CONFIG_NACOS",
"path": "config/settings.py",
"snippet": "SQL_CONFIG_NACOS = \"\"\"\nSELECT\n data_id,\n group_id,\n content,\n gmt_modified\nFROM\n config_info\n\"\"\""
},
{
"identifier": "SQL_CONFIG_APOLLO_ID",
"path": "config/settings.py",
"snippet": "SQL_CONF... | import logging
from typing import Dict, Optional
from config.settings import SQL_CONFIG_NACOS, SQL_CONFIG_APOLLO_ID, SQL_CONFIG_APOLLO_NAME, APOLLO_NAME_LIST | 753 | """
此模块用于处理配置中心相关的查询,包括从不同的配置中心获取 SQL 查询语句。
本模块提供了 `get_query_sql` 函数,用于根据配置中心类型和 Apollo 应用名称获取对应的查询 SQL。支持从 Nacos 和 Apollo 配置中心获取数据。
:author: assassing
:contact: https://github.com/hxz393
:copyright: Copyright 2023, hxz393. 保留所有权利。
"""
logger = logging.getLogger(__name__)
def get_query_sql(config_main: Dict[str, str]) -> Optional[str]:
"""
根据配置中心类型和 Apollo 应用名称获取查询 SQL。
此函数接收一个字典,包含配置中心类型和 Apollo 应用名称。它根据配置中心类型(Nacos 或 Apollo)以及 Apollo 应用名称('AppId' 或 'Name'),返回相应的 SQL 查询语句。
:param config_main: 包含配置中心类型和 Apollo 应用名称的字典。
:type config_main: Dict[str, str]
:return: 对应的查询 SQL 语句。如果无法匹配到合适的配置中心或应用名称,则返回 None。
:rtype: Optional[str]
:example:
>>> get_query_sql({"config_center": "Nacos"})
SQL_CONFIG_NACOS
>>> get_query_sql({"config_center": "Apollo", "apollo_name": "AppId"})
SQL_CONFIG_APOLLO_ID
>>> get_query_sql({"config_center": "Apollo", "apollo_name": "Name"})
SQL_CONFIG_APOLLO_NAME
"""
try:
config_center = config_main.get('config_center')
apollo_name = config_main.get('apollo_name')
if config_center == 'Nacos':
return SQL_CONFIG_NACOS
| """
此模块用于处理配置中心相关的查询,包括从不同的配置中心获取 SQL 查询语句。
本模块提供了 `get_query_sql` 函数,用于根据配置中心类型和 Apollo 应用名称获取对应的查询 SQL。支持从 Nacos 和 Apollo 配置中心获取数据。
:author: assassing
:contact: https://github.com/hxz393
:copyright: Copyright 2023, hxz393. 保留所有权利。
"""
logger = logging.getLogger(__name__)
def get_query_sql(config_main: Dict[str, str]) -> Optional[str]:
"""
根据配置中心类型和 Apollo 应用名称获取查询 SQL。
此函数接收一个字典,包含配置中心类型和 Apollo 应用名称。它根据配置中心类型(Nacos 或 Apollo)以及 Apollo 应用名称('AppId' 或 'Name'),返回相应的 SQL 查询语句。
:param config_main: 包含配置中心类型和 Apollo 应用名称的字典。
:type config_main: Dict[str, str]
:return: 对应的查询 SQL 语句。如果无法匹配到合适的配置中心或应用名称,则返回 None。
:rtype: Optional[str]
:example:
>>> get_query_sql({"config_center": "Nacos"})
SQL_CONFIG_NACOS
>>> get_query_sql({"config_center": "Apollo", "apollo_name": "AppId"})
SQL_CONFIG_APOLLO_ID
>>> get_query_sql({"config_center": "Apollo", "apollo_name": "Name"})
SQL_CONFIG_APOLLO_NAME
"""
try:
config_center = config_main.get('config_center')
apollo_name = config_main.get('apollo_name')
if config_center == 'Nacos':
return SQL_CONFIG_NACOS | elif config_center == 'Apollo' and apollo_name in APOLLO_NAME_LIST: | 3 | 2023-11-07 01:02:38+00:00 | 2k |
pytorch-labs/ao | torchao/quantization/smoothquant.py | [
{
"identifier": "dynamically_quantize_per_channel",
"path": "torchao/quantization/quant_primitives.py",
"snippet": "def dynamically_quantize_per_channel(x, quant_min, quant_max, target_dtype):\n # assumes symmetric quantization\n # assumes axis == 0\n # assumes dense memory format\n # TODO(f... | import torch
import torch.nn.functional as F
import torchao.quantization.quant_api as quant_api
from .quant_primitives import (
dynamically_quantize_per_channel,
quant_int8_dynamic_per_token_linear,
) | 1,488 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Testing out accuracy-only implementation of SmoothQuant
(https://arxiv.org/pdf/2211.10438.pdf)
Note: this is an application of input-weight equalization, with the addition that the
multiplication by scale is fused into the preceding layer, specifically for relevant
parts of transformer blocks.
"""
__all__ = [
"get_scale",
"SmoothFakeDynQuantMixin",
"SmoothFakeDynamicallyQuantizedLinear",
"swap_linear_with_smooth_fq_linear",
"smooth_fq_linear_to_inference",
"set_smooth_fq_attribute",
]
def get_scale(X_absmax, W_absmax, alpha=0.5):
"""
Calculate the scale based on abs(max(X)), abs(max(W)) and alpha
If X is of dimension `b*n*k` and W is dimension `k*m`, the returned
scale is of dimension `k`.
Note: X_absmax is calculated outside of this function because we
need to keep a running version of it during calibration. W_absmax
is calculated outside of this function for consistency with X_absmax.
"""
X_pow = torch.pow(X_absmax, alpha)
W_pow = torch.pow(W_absmax, 1.0 - alpha)
div = X_pow / W_pow
return div.reshape(-1)
class SmoothFakeDynQuantMixin(torch.nn.Module):
def init_smoothquant_variables(self, alpha):
self.calibrating = True
self.x_running_abs_max = None
self.register_buffer("smooth_scale", None)
self.alpha = alpha
# debug only
self.debug_skip_scaling = False
# self.debug_skip_scaling = True
# Currently torch._int_mm cuBLAS underlying kernel does not work with
# non-contiguous weight. However, torch.compil'ing through
# torch._int_mm leads to triton code which is ~2x faster if the weight
# is transposed. So, for now we have a debug flag to toggle whether
# we store the quantized weight transposed, so that we can get correct
# numerics both in eager mode and after torch.compile.
# The default is True for cuBLAS / eager mode, set to False for
# torch.compile.
# self.store_w_int_repr_t = True
self.store_w_int_repr_t = False
def update_x_running_abs_max(self, X):
# update the running max of incoming activations
all_dims_except_last = tuple(range(len(X.shape) - 1))
cur_abs_max = torch.amax(torch.abs(X), dim=all_dims_except_last)
if self.x_running_abs_max is None:
self.x_running_abs_max = cur_abs_max
else:
self.x_running_abs_max = torch.max(cur_abs_max, self.x_running_abs_max)
def get_scaled_quantized_w(self):
# inference
assert (
self.smooth_scale is not None
), "self.smooth_scale is None, did you turn on inference?"
W = self.weight
# scale weight
# in the future, this can be done ahead of time instead of
# during inference
if not self.debug_skip_scaling:
# TODO(future): do below in `to_inference` instead of here
W = torch.matmul(
torch.diag(self.smooth_scale), W.transpose(0, 1)
).transpose(0, 1)
# fake quantize input and weight, and then do matmul in fp32/fp16
# in the future, this should be replaced with quantized kernels which
# work on NVIDIA GPUs (such as protoquant's implementation)
W_dq_dtype = W.dtype
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Testing out accuracy-only implementation of SmoothQuant
(https://arxiv.org/pdf/2211.10438.pdf)
Note: this is an application of input-weight equalization, with the addition that the
multiplication by scale is fused into the preceding layer, specifically for relevant
parts of transformer blocks.
"""
__all__ = [
"get_scale",
"SmoothFakeDynQuantMixin",
"SmoothFakeDynamicallyQuantizedLinear",
"swap_linear_with_smooth_fq_linear",
"smooth_fq_linear_to_inference",
"set_smooth_fq_attribute",
]
def get_scale(X_absmax, W_absmax, alpha=0.5):
"""
Calculate the scale based on abs(max(X)), abs(max(W)) and alpha
If X is of dimension `b*n*k` and W is dimension `k*m`, the returned
scale is of dimension `k`.
Note: X_absmax is calculated outside of this function because we
need to keep a running version of it during calibration. W_absmax
is calculated outside of this function for consistency with X_absmax.
"""
X_pow = torch.pow(X_absmax, alpha)
W_pow = torch.pow(W_absmax, 1.0 - alpha)
div = X_pow / W_pow
return div.reshape(-1)
class SmoothFakeDynQuantMixin(torch.nn.Module):
def init_smoothquant_variables(self, alpha):
self.calibrating = True
self.x_running_abs_max = None
self.register_buffer("smooth_scale", None)
self.alpha = alpha
# debug only
self.debug_skip_scaling = False
# self.debug_skip_scaling = True
# Currently torch._int_mm cuBLAS underlying kernel does not work with
# non-contiguous weight. However, torch.compil'ing through
# torch._int_mm leads to triton code which is ~2x faster if the weight
# is transposed. So, for now we have a debug flag to toggle whether
# we store the quantized weight transposed, so that we can get correct
# numerics both in eager mode and after torch.compile.
# The default is True for cuBLAS / eager mode, set to False for
# torch.compile.
# self.store_w_int_repr_t = True
self.store_w_int_repr_t = False
def update_x_running_abs_max(self, X):
# update the running max of incoming activations
all_dims_except_last = tuple(range(len(X.shape) - 1))
cur_abs_max = torch.amax(torch.abs(X), dim=all_dims_except_last)
if self.x_running_abs_max is None:
self.x_running_abs_max = cur_abs_max
else:
self.x_running_abs_max = torch.max(cur_abs_max, self.x_running_abs_max)
def get_scaled_quantized_w(self):
# inference
assert (
self.smooth_scale is not None
), "self.smooth_scale is None, did you turn on inference?"
W = self.weight
# scale weight
# in the future, this can be done ahead of time instead of
# during inference
if not self.debug_skip_scaling:
# TODO(future): do below in `to_inference` instead of here
W = torch.matmul(
torch.diag(self.smooth_scale), W.transpose(0, 1)
).transpose(0, 1)
# fake quantize input and weight, and then do matmul in fp32/fp16
# in the future, this should be replaced with quantized kernels which
# work on NVIDIA GPUs (such as protoquant's implementation)
W_dq_dtype = W.dtype | W_int_repr, W_scales, W_zps = dynamically_quantize_per_channel( | 0 | 2023-11-03 21:27:36+00:00 | 2k |
Subsets and Splits
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have consistent code formatting levels across multiple scales (2k, 4k, 8k, 12k) and reveals the structured formatting patterns within these repositories.
SQL Console for tianyang/repobench_python_v1.1
Compares cross-file and in-file code structure patterns across different complexity levels, revealing how file organization strategies vary with code size and potentially informing better code architecture decisions.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have complete performance data across all seven code complexity levels, revealing consistent benchmarking patterns across different code sizes.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that contain all 7 distinct quality levels (2k through 32k), revealing complete datasets that might be useful for comprehensive analysis.