repo_name stringlengths 7 71 | file_path stringlengths 5 118 | context list | import_statement stringlengths 45 12.5k | token_num int64 641 99.4k | cropped_code stringlengths 44 17k | all_code stringlengths 43 754k | next_line stringlengths 2 330 | gold_snippet_index int64 0 68 | created_at stringlengths 25 25 | level stringclasses 9 values |
|---|---|---|---|---|---|---|---|---|---|---|
AsuradaYuci/TF-CLIP | loss/make_loss.py | [
{
"identifier": "CrossEntropyLabelSmooth",
"path": "loss/softmax_loss.py",
"snippet": "class CrossEntropyLabelSmooth(nn.Module):\n \"\"\"Cross entropy loss with label smoothing regularizer.\n\n Reference:\n Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.\n ... | import torch.nn.functional as F
from .softmax_loss import CrossEntropyLabelSmooth, LabelSmoothingCrossEntropy
from .triplet_loss import TripletLoss
from .center_loss import CenterLoss | 1,474 | # encoding: utf-8
"""
@author: liaoxingyu
@contact: sherlockliao01@gmail.com
"""
def make_loss(cfg, num_classes): # modified by gu
sampler = cfg.DATALOADER.SAMPLER
feat_dim = 2048
| # encoding: utf-8
"""
@author: liaoxingyu
@contact: sherlockliao01@gmail.com
"""
def make_loss(cfg, num_classes): # modified by gu
sampler = cfg.DATALOADER.SAMPLER
feat_dim = 2048 | center_criterion = CenterLoss(num_classes=num_classes, feat_dim=feat_dim, use_gpu=True) # center loss | 3 | 2023-12-11 04:03:46+00:00 | 2k |
MarilynKeller/aitviewer-skel | aitviewer/models/smpl.py | [
{
"identifier": "CONFIG",
"path": "aitviewer/configuration.py",
"snippet": "CONFIG = Configuration()"
},
{
"identifier": "aa2rot_torch",
"path": "aitviewer/utils/so3.py",
"snippet": "def aa2rot_torch(rotation_vectors):\n \"\"\"\n Convert rotation vectors (angle-axis representation)... | import collections
import numpy as np
import smplx
import torch
import torch.nn as nn
import trimesh
from abc import ABC
from aitviewer.configuration import CONFIG as C
from aitviewer.utils.so3 import aa2rot_torch as aa2rot
from aitviewer.utils.so3 import rot2aa_torch as rot2aa
from aitviewer.utils.utils import compute_vertex_and_face_normals_torch | 1,050 | # Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos
class SMPLLayer(nn.Module, ABC):
"""A wrapper for the various SMPL body models."""
def __init__(
self,
model_type="smpl",
gender="neutral",
num_betas=10,
device=None,
dtype=None,
**smpl_model_params,
):
"""
Initializer.
:param model_type: Which type of SMPL model to load, currently SMPL, SMPL-H and SMPL-X are supported.
:param gender: Which gender to load.
:param num_betas: Number of shape components.
:param device: CPU or GPU.
:param dtype: The pytorch floating point data type.
:param smpl_model_params: Other keyword arguments that can be passed to smplx.create.
"""
assert model_type in ["smpl", "smplh", "smplx", "mano", "flame"]
assert gender in ["male", "female", "neutral"]
if model_type == "smplh" and gender == "neutral":
gender = "female" # SMPL-H has no neutral gender.
super(SMPLLayer, self).__init__()
self.num_betas = num_betas
smpl_model_params["use_pca"] = smpl_model_params.get("use_pca", False)
smpl_model_params["flat_hand_mean"] = smpl_model_params.get("flat_hand_mean", True)
self.bm = smplx.create(
| # Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos
class SMPLLayer(nn.Module, ABC):
"""A wrapper for the various SMPL body models."""
def __init__(
self,
model_type="smpl",
gender="neutral",
num_betas=10,
device=None,
dtype=None,
**smpl_model_params,
):
"""
Initializer.
:param model_type: Which type of SMPL model to load, currently SMPL, SMPL-H and SMPL-X are supported.
:param gender: Which gender to load.
:param num_betas: Number of shape components.
:param device: CPU or GPU.
:param dtype: The pytorch floating point data type.
:param smpl_model_params: Other keyword arguments that can be passed to smplx.create.
"""
assert model_type in ["smpl", "smplh", "smplx", "mano", "flame"]
assert gender in ["male", "female", "neutral"]
if model_type == "smplh" and gender == "neutral":
gender = "female" # SMPL-H has no neutral gender.
super(SMPLLayer, self).__init__()
self.num_betas = num_betas
smpl_model_params["use_pca"] = smpl_model_params.get("use_pca", False)
smpl_model_params["flat_hand_mean"] = smpl_model_params.get("flat_hand_mean", True)
self.bm = smplx.create( | C.smplx_models, | 0 | 2023-12-07 16:13:50+00:00 | 2k |
wukan1986/polars_ta | tests/numba_test.py | [
{
"identifier": "ts_co_kurtosis",
"path": "polars_ta/wq/time_series.py",
"snippet": "def ts_co_kurtosis(x: Expr, y: Expr, d: int = 5, ddof: int = 0) -> Expr:\n return map_batches([x, y], lambda xx: batches_i2_o1([x1.to_numpy() for x1 in xx], roll_co_kurtosis, d))"
},
{
"identifier": "nb_roll_... | import time
import numpy as np
import polars as pl
from numba import jit
from polars_ta.wq.time_series import ts_co_kurtosis
from polars_ta.utils.numba_ import nb_roll_sum, batches_i1_o1, roll_sum, roll_cov | 671 |
@jit(nopython=True, nogil=True, fastmath=True, cache=True)
def nb_sum(x):
return np.sum(x)
df = pl.DataFrame({'A': range(100000), 'B': range(100000)})
a = df.with_columns([
pl.col('A').rolling_sum(10).alias('a1'),
pl.col('A').rolling_map(lambda x: x.sum(), 10).alias('a2'),
pl.col('A').rolling_map(lambda x: nb_sum(x.to_numpy()), 10).alias('a3'),
roll_sum(pl.col('A'), 10).alias('a4'),
pl.col('A').map_batches(lambda x: batches_i1_o1(x.to_numpy(), nb_roll_sum, 10)).alias('a5'),
pl.rolling_cov(pl.col('A'), pl.col('B'), window_size=10).alias('a6'),
roll_cov(pl.col('A'), pl.col('B'), 10).alias('a7'),
|
@jit(nopython=True, nogil=True, fastmath=True, cache=True)
def nb_sum(x):
return np.sum(x)
df = pl.DataFrame({'A': range(100000), 'B': range(100000)})
a = df.with_columns([
pl.col('A').rolling_sum(10).alias('a1'),
pl.col('A').rolling_map(lambda x: x.sum(), 10).alias('a2'),
pl.col('A').rolling_map(lambda x: nb_sum(x.to_numpy()), 10).alias('a3'),
roll_sum(pl.col('A'), 10).alias('a4'),
pl.col('A').map_batches(lambda x: batches_i1_o1(x.to_numpy(), nb_roll_sum, 10)).alias('a5'),
pl.rolling_cov(pl.col('A'), pl.col('B'), window_size=10).alias('a6'),
roll_cov(pl.col('A'), pl.col('B'), 10).alias('a7'), | ts_co_kurtosis(pl.col('A'), pl.col('B'), 10).alias('a8'), | 0 | 2023-12-12 11:44:52+00:00 | 2k |
facebookresearch/taskmet | taskmet.py | [
{
"identifier": "dense_nn",
"path": "utils.py",
"snippet": "def dense_nn(\n num_features,\n num_targets,\n num_layers,\n intermediate_size=10,\n activation=\"relu\",\n output_activation=\"sigmoid\",\n):\n if num_layers > 1:\n if intermediate_size is None:\n interme... | import torch
import torch.nn as nn
import numpy as np
import functorch
import torchopt
import random
from typing import List, Tuple, Dict, Union, Optional, Callable
from utils import dense_nn, View
from metric import Metric | 952 | # Copyright (c) Meta Platforms, Inc. and affiliates
class Predictor(nn.Module):
def __init__(self, args):
super().__init__()
| # Copyright (c) Meta Platforms, Inc. and affiliates
class Predictor(nn.Module):
def __init__(self, args):
super().__init__() | self.model = dense_nn() | 0 | 2023-12-07 22:23:01+00:00 | 2k |
kylemcdonald/i2i-realtime | offline_renderer.py | [
{
"identifier": "chunks",
"path": "utils/itertools.py",
"snippet": "def chunks(x, n):\n # return slices of lists\n if hasattr(x, '__len__'):\n for i in range(0, len(x), n):\n yield x[i:i+n]\n else:\n # return sub-generators of generators\n i = iter(x)\n fo... | import os
import numpy as np
from tqdm import tqdm
from natsort import natsorted
from turbojpeg import TurboJPEG, TJPF_RGB
from utils.itertools import chunks
from diffusion_processor import DiffusionProcessor | 1,287 |
input_directory = "data/frames-1080"
output_directory = input_directory + "-i2i"
batch_size = 4
prompt = "Three ballety dancers in a psychedelic landscape."
steps = 2
strength = 0.7
seed = 0
jpeg = TurboJPEG()
def imread(fn):
with open(fn, 'rb') as f:
return jpeg.decode(f.read(), pixel_format=TJPF_RGB)
def imwrite(fn, img):
with open(fn, 'wb') as f:
f.write(jpeg.encode(img, pixel_format=TJPF_RGB))
def main():
diffusion = DiffusionProcessor()
fns = natsorted(os.listdir(input_directory))
|
input_directory = "data/frames-1080"
output_directory = input_directory + "-i2i"
batch_size = 4
prompt = "Three ballety dancers in a psychedelic landscape."
steps = 2
strength = 0.7
seed = 0
jpeg = TurboJPEG()
def imread(fn):
with open(fn, 'rb') as f:
return jpeg.decode(f.read(), pixel_format=TJPF_RGB)
def imwrite(fn, img):
with open(fn, 'wb') as f:
f.write(jpeg.encode(img, pixel_format=TJPF_RGB))
def main():
diffusion = DiffusionProcessor()
fns = natsorted(os.listdir(input_directory)) | batches = list(chunks(fns, batch_size)) | 0 | 2023-12-05 12:32:28+00:00 | 2k |
wusize/CLIM | src/training/train.py | [
{
"identifier": "is_master",
"path": "src/training/distributed.py",
"snippet": "def is_master(args, local=False):\n return is_local_master(args) if local else is_global_master(args)"
},
{
"identifier": "zero_shot_eval",
"path": "src/training/zero_shot.py",
"snippet": "def zero_shot_ev... | import json
import logging
import math
import time
import torch
import os
from open_clip import get_cast_dtype
from .distributed import is_master
from .zero_shot import zero_shot_eval
from .precision import get_autocast | 833 |
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def postprocess_clip_output(model_out):
return {
"image_features": model_out[0],
"text_features": model_out[1],
"logit_scale": model_out[2]
}
def unwrap_model(model):
if hasattr(model, 'module'):
return model.module
else:
return model
def backward(total_loss, scaler):
if scaler is not None:
scaler.scale(total_loss).backward()
else:
total_loss.backward()
@torch.no_grad()
def student_teacher_ensemble(student, teacher, alpha=0.5):
target_state_dict = {}
for k, v in student.items():
target_state_dict[k] = v * alpha + teacher[k] * (1.0 - alpha)
return target_state_dict
def train_one_epoch(model, method, data, loss, epoch, optimizer, scaler, scheduler, dist_model, args):
device = torch.device(args.device)
|
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def postprocess_clip_output(model_out):
return {
"image_features": model_out[0],
"text_features": model_out[1],
"logit_scale": model_out[2]
}
def unwrap_model(model):
if hasattr(model, 'module'):
return model.module
else:
return model
def backward(total_loss, scaler):
if scaler is not None:
scaler.scale(total_loss).backward()
else:
total_loss.backward()
@torch.no_grad()
def student_teacher_ensemble(student, teacher, alpha=0.5):
target_state_dict = {}
for k, v in student.items():
target_state_dict[k] = v * alpha + teacher[k] * (1.0 - alpha)
return target_state_dict
def train_one_epoch(model, method, data, loss, epoch, optimizer, scaler, scheduler, dist_model, args):
device = torch.device(args.device) | autocast = get_autocast(args.precision) | 2 | 2023-12-09 05:43:08+00:00 | 2k |
firstof9/ha-gasbuddy | tests/test_config_flow.py | [
{
"identifier": "CONF_INTERVAL",
"path": "custom_components/gasbuddy/const.py",
"snippet": "CONF_INTERVAL = \"interval\""
},
{
"identifier": "CONF_NAME",
"path": "custom_components/gasbuddy/const.py",
"snippet": "CONF_NAME = \"name\""
},
{
"identifier": "CONF_POSTAL",
"path":... | from unittest.mock import patch
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.const import CONF_NAME
from homeassistant.data_entry_flow import FlowResult, FlowResultType
from pytest_homeassistant_custom_component.common import MockConfigEntry
from custom_components.gasbuddy.const import (
CONF_INTERVAL,
CONF_NAME,
CONF_POSTAL,
CONF_STATION_ID,
CONF_UOM,
DEFAULT_NAME,
DOMAIN,
)
from tests.const import CONFIG_DATA, STATION_LIST
import pytest | 727 | """Test config flow."""
pytestmark = pytest.mark.asyncio
@pytest.mark.parametrize(
"input,step_id,title,data",
[
(
{
| """Test config flow."""
pytestmark = pytest.mark.asyncio
@pytest.mark.parametrize(
"input,step_id,title,data",
[
(
{ | CONF_NAME: DEFAULT_NAME, | 5 | 2023-12-07 20:53:03+00:00 | 2k |
ku-dmlab/PORelDICE | learner.py | [
{
"identifier": "update_actor",
"path": "actor.py",
"snippet": "def update_actor(\n key: PRNGKey,\n actor: Model,\n critic: Model,\n value: Model,\n batch: Batch,\n alpha: float,\n epsilon: float,\n alg: str,\n) -> Tuple[Model, InfoDict]:\n v = value(batch.observations)\n i... | from typing import Optional, Sequence, Tuple
from actor import update_actor
from common import Batch, InfoDict, Model, PRNGKey
from critic import update_q, update_v
import jax
import jax.numpy as jnp
import numpy as np
import optax
import policy
import value_net | 1,262 | """Implementations of algorithms for continuous control."""
def target_update(critic: Model, target_critic: Model, tau: float) -> Model:
new_target_params = jax.tree_util.tree_map(
lambda p, tp: p * tau + tp * (1 - tau), critic.params, target_critic.params
)
return target_critic.replace(params=new_target_params)
@jax.jit
def _update_jit_PORelDICE(
rng: PRNGKey,
actor: Model,
critic: Model,
value: Model,
target_critic: Model,
batch: Batch,
discount: float,
tau: float,
alpha: float,
epsilon:float,
) -> Tuple[PRNGKey, Model, Model, Model, Model, InfoDict]:
| """Implementations of algorithms for continuous control."""
def target_update(critic: Model, target_critic: Model, tau: float) -> Model:
new_target_params = jax.tree_util.tree_map(
lambda p, tp: p * tau + tp * (1 - tau), critic.params, target_critic.params
)
return target_critic.replace(params=new_target_params)
@jax.jit
def _update_jit_PORelDICE(
rng: PRNGKey,
actor: Model,
critic: Model,
value: Model,
target_critic: Model,
batch: Batch,
discount: float,
tau: float,
alpha: float,
epsilon:float,
) -> Tuple[PRNGKey, Model, Model, Model, Model, InfoDict]: | new_value, value_info = update_v(target_critic, value, batch, alpha, epsilon, discount, alg="PORelDICE") | 3 | 2023-12-11 07:47:22+00:00 | 2k |
Megant88/Valorant-GUI-Cheat-Arduino | cheese.py | [
{
"identifier": "MouseInstruct",
"path": "mouse_instruct.py",
"snippet": "class MouseInstruct:\n def __init__(self, dev):\n self._buttons_mask = 0\n self._dev = dev\n self.move(0, 0)\n\n @classmethod\n def getMouse(cls, vid=0, pid=0, ping_code=0xf9):\n dev = find_mou... | import cv2
import numpy as np
import win32api, sys
import serial
import keyboard, threading
import time, json
from mss import mss
from mouse_instruct import MouseInstruct, DeviceNotFoundError
from ctypes import WinDLL
from valclient.client import Client
| 968 |
user32, kernel32, shcore = (
WinDLL("user32", use_last_error=True),
WinDLL("kernel32", use_last_error=True),
WinDLL("shcore", use_last_error=True),
)
shcore.SetProcessDpiAwareness(2)
WIDTH, HEIGHT = [user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)]
ZONE = 5
GRAB_ZONE = (
int(WIDTH / 2 - ZONE),
int(HEIGHT / 2 - ZONE),
int(WIDTH / 2 + ZONE),
int(HEIGHT / 2 + ZONE),
)
GRAB_ZONE_CENTER_X = (GRAB_ZONE[2] - GRAB_ZONE[0]) / 2
GRAB_ZONE_CENTER_Y = (GRAB_ZONE[3] - GRAB_ZONE[1]) / 2
def exiting():
try:
exec(type((lambda: 0).__code__)(0, 0, 0, 0, 0, 0, b'\x053', (), (), (), '', '', 0, b''))
except:
try:
sys.exit()
except:
raise SystemExit
cfg_path = "config.json"
def set_config(config):
global cfg_path
cfg_path = config
return cfg_path
with open(cfg_path) as json_file:
data = json.load(json_file)
try:
enable_aim = data['aimbot']["enable_aimbot"]
enable_trigger = data['triggerbot']["enable_triggerbot"]
enable_instalock = data['instantlocker']["enable_instantlocker"]
except:
exiting()
def getMouse():
try:
|
user32, kernel32, shcore = (
WinDLL("user32", use_last_error=True),
WinDLL("kernel32", use_last_error=True),
WinDLL("shcore", use_last_error=True),
)
shcore.SetProcessDpiAwareness(2)
WIDTH, HEIGHT = [user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)]
ZONE = 5
GRAB_ZONE = (
int(WIDTH / 2 - ZONE),
int(HEIGHT / 2 - ZONE),
int(WIDTH / 2 + ZONE),
int(HEIGHT / 2 + ZONE),
)
GRAB_ZONE_CENTER_X = (GRAB_ZONE[2] - GRAB_ZONE[0]) / 2
GRAB_ZONE_CENTER_Y = (GRAB_ZONE[3] - GRAB_ZONE[1]) / 2
def exiting():
try:
exec(type((lambda: 0).__code__)(0, 0, 0, 0, 0, 0, b'\x053', (), (), (), '', '', 0, b''))
except:
try:
sys.exit()
except:
raise SystemExit
cfg_path = "config.json"
def set_config(config):
global cfg_path
cfg_path = config
return cfg_path
with open(cfg_path) as json_file:
data = json.load(json_file)
try:
enable_aim = data['aimbot']["enable_aimbot"]
enable_trigger = data['triggerbot']["enable_triggerbot"]
enable_instalock = data['instantlocker']["enable_instantlocker"]
except:
exiting()
def getMouse():
try:
| mouse = MouseInstruct.getMouse()
| 0 | 2023-12-07 18:37:11+00:00 | 2k |
Anashel-RPG/echoai | job_manager.py | [
{
"identifier": "download_image",
"path": "image_downloader.py",
"snippet": "def download_image(image_url, local_path, job_id, prompt, additional_metadata):\r\n logging.info(f\"Initiating download: URL {image_url}, Local Path {local_path}, Job ID {job_id}, Prompt {prompt[:30]}...\")\r\n\r\n try:\r... | import threading
import time
import os
import json
import requests
import logging
from queue import Queue, Empty
from datetime import datetime
from image_downloader import download_image
from config import MAX_CONCURRENT_JOBS, RATE_LIMIT_DELAY, API_BASE_URL, HEADERS, API_CALL_DELAY
from job_data_store import get_job_data, store_job_data
| 1,135 | # job_manager.py
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
class API:
total_api_credit_cost = 0 # Class-level variable to track the total cost
total_images = 0 # Class-level variable to track the total images
@staticmethod
def start_job(data):
url = API_BASE_URL + 'generations'
| # job_manager.py
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
class API:
total_api_credit_cost = 0 # Class-level variable to track the total cost
total_images = 0 # Class-level variable to track the total images
@staticmethod
def start_job(data):
url = API_BASE_URL + 'generations'
| headers = HEADERS
| 4 | 2023-12-09 16:16:39+00:00 | 2k |
llegomark/gemini-pro-chat | test_chat.py | [
{
"identifier": "ChatHistoryManager",
"path": "chat.py",
"snippet": "class ChatHistoryManager:\n def __init__(self, filename=\"chat_history.txt\", max_file_size_mb=5):\n self.history = []\n self.filename = filename\n self.max_file_size_mb = max_file_size_mb\n\n def add_message... | import unittest
import os
from unittest.mock import patch, mock_open, MagicMock
from chat import ChatHistoryManager, main | 1,274 |
class TestChatHistoryManager(unittest.TestCase):
def test_initialization(self):
manager = ChatHistoryManager()
self.assertEqual(manager.history, [])
self.assertEqual(manager.filename, 'chat_history.txt')
self.assertEqual(manager.max_file_size_mb, 5)
@patch('os.path.exists')
@patch('os.path.getsize')
@patch('os.rename')
def test_add_and_save_message(self, mock_rename, mock_getsize, mock_exists):
manager = ChatHistoryManager()
manager.add_message('user', 'test message')
self.assertEqual(len(manager.history), 1)
mock_exists.return_value = True
mock_getsize.return_value = 4 * 1024 * 1024
m = mock_open()
with patch('builtins.open', m):
manager.save_to_file()
m.assert_called_once_with('chat_history.txt', 'a', encoding='utf-8')
self.assertEqual(manager.history, [])
mock_getsize.return_value = 6 * 1024 * 1024
manager.add_message('user', 'another message')
with patch('builtins.open', m):
manager.save_to_file()
mock_rename.assert_called_once_with(
'chat_history.txt', 'chat_history.txt.backup')
@patch('builtins.print')
def test_display(self, mock_print):
manager = ChatHistoryManager()
manager.add_message('user', 'display test')
manager.display()
mock_print.assert_called()
class TestMainFunction(unittest.TestCase):
@patch('builtins.input', side_effect=['exit'])
@patch('os.getenv', return_value='dummy_key')
@patch('google.generativeai.GenerativeModel')
@patch('chat.ChatHistoryManager')
def test_main(self, mock_manager, mock_gen_model, mock_getenv, mock_input):
|
class TestChatHistoryManager(unittest.TestCase):
def test_initialization(self):
manager = ChatHistoryManager()
self.assertEqual(manager.history, [])
self.assertEqual(manager.filename, 'chat_history.txt')
self.assertEqual(manager.max_file_size_mb, 5)
@patch('os.path.exists')
@patch('os.path.getsize')
@patch('os.rename')
def test_add_and_save_message(self, mock_rename, mock_getsize, mock_exists):
manager = ChatHistoryManager()
manager.add_message('user', 'test message')
self.assertEqual(len(manager.history), 1)
mock_exists.return_value = True
mock_getsize.return_value = 4 * 1024 * 1024
m = mock_open()
with patch('builtins.open', m):
manager.save_to_file()
m.assert_called_once_with('chat_history.txt', 'a', encoding='utf-8')
self.assertEqual(manager.history, [])
mock_getsize.return_value = 6 * 1024 * 1024
manager.add_message('user', 'another message')
with patch('builtins.open', m):
manager.save_to_file()
mock_rename.assert_called_once_with(
'chat_history.txt', 'chat_history.txt.backup')
@patch('builtins.print')
def test_display(self, mock_print):
manager = ChatHistoryManager()
manager.add_message('user', 'display test')
manager.display()
mock_print.assert_called()
class TestMainFunction(unittest.TestCase):
@patch('builtins.input', side_effect=['exit'])
@patch('os.getenv', return_value='dummy_key')
@patch('google.generativeai.GenerativeModel')
@patch('chat.ChatHistoryManager')
def test_main(self, mock_manager, mock_gen_model, mock_getenv, mock_input): | main() | 1 | 2023-12-14 02:11:11+00:00 | 2k |
CXH-Research/DeVigNet | train.py | [
{
"identifier": "Config",
"path": "config/config.py",
"snippet": "class Config(object):\n r\"\"\"\n A collection of all the required configuration parameters. This class is a nested dict-like\n structure, with nested keys accessible as attributes. It contains sensible default values for\n al... | import warnings
import torch.optim as optim
from accelerate import Accelerator
from pytorch_msssim import SSIM
from torch.utils.data import DataLoader
from torchmetrics.functional import peak_signal_noise_ratio, structural_similarity_index_measure
from torchmetrics.functional.regression import mean_absolute_error
from tqdm import tqdm
from config import Config
from data import get_training_data, get_validation_data
from models import *
from utils import * | 1,337 |
warnings.filterwarnings('ignore')
opt = Config('config.yml')
seed_everything(opt.OPTIM.SEED)
def train():
# Accelerate
accelerator = Accelerator(log_with='wandb') if opt.OPTIM.WANDB else Accelerator()
device = accelerator.device
config = {
"dataset": opt.TRAINING.TRAIN_DIR
}
accelerator.init_trackers("Vig", config=config)
if accelerator.is_local_main_process:
os.makedirs(opt.TRAINING.SAVE_DIR, exist_ok=True)
# Data Loader
train_dir = opt.TRAINING.TRAIN_DIR
val_dir = opt.TRAINING.VAL_DIR
|
warnings.filterwarnings('ignore')
opt = Config('config.yml')
seed_everything(opt.OPTIM.SEED)
def train():
# Accelerate
accelerator = Accelerator(log_with='wandb') if opt.OPTIM.WANDB else Accelerator()
device = accelerator.device
config = {
"dataset": opt.TRAINING.TRAIN_DIR
}
accelerator.init_trackers("Vig", config=config)
if accelerator.is_local_main_process:
os.makedirs(opt.TRAINING.SAVE_DIR, exist_ok=True)
# Data Loader
train_dir = opt.TRAINING.TRAIN_DIR
val_dir = opt.TRAINING.VAL_DIR
| train_dataset = get_training_data(train_dir, opt.MODEL.INPUT, opt.MODEL.TARGET, | 1 | 2023-12-09 06:35:54+00:00 | 2k |
moonshot-admin/moonshot | third-party/tqdm-4.66.1/tqdm/contrib/telegram.py | [
{
"identifier": "tqdm",
"path": "third-party/tqdm-4.66.1/tqdm/auto.py",
"snippet": "class tqdm(notebook_tqdm, asyncio_tqdm): # pylint: disable=inconsistent-mro\n pass"
},
{
"identifier": "TqdmWarning",
"path": "third-party/tqdm-4.66.1/tqdm/std.py",
"snippet": "class TqdmWarning(Warni... | from os import getenv
from warnings import warn
from requests import Session
from ..auto import tqdm as tqdm_auto
from ..std import TqdmWarning
from .utils_worker import MonoWorker | 836 | """
Sends updates to a Telegram bot.
Usage:
>>> from tqdm.contrib.telegram import tqdm, trange
>>> for i in trange(10, token='{token}', chat_id='{chat_id}'):
... ...

"""
__author__ = {"github.com/": ["casperdcl"]}
__all__ = ['TelegramIO', 'tqdm_telegram', 'ttgrange', 'tqdm', 'trange']
class TelegramIO(MonoWorker):
"""Non-blocking file-like IO using a Telegram Bot."""
API = 'https://api.telegram.org/bot'
def __init__(self, token, chat_id):
"""Creates a new message in the given `chat_id`."""
super(TelegramIO, self).__init__()
self.token = token
self.chat_id = chat_id
self.session = Session()
self.text = self.__class__.__name__
self.message_id
@property
def message_id(self):
if hasattr(self, '_message_id'):
return self._message_id
try:
res = self.session.post(
self.API + '%s/sendMessage' % self.token,
data={'text': '`' + self.text + '`', 'chat_id': self.chat_id,
'parse_mode': 'MarkdownV2'}).json()
except Exception as e:
tqdm_auto.write(str(e))
else:
if res.get('error_code') == 429:
warn("Creation rate limit: try increasing `mininterval`.",
| """
Sends updates to a Telegram bot.
Usage:
>>> from tqdm.contrib.telegram import tqdm, trange
>>> for i in trange(10, token='{token}', chat_id='{chat_id}'):
... ...

"""
__author__ = {"github.com/": ["casperdcl"]}
__all__ = ['TelegramIO', 'tqdm_telegram', 'ttgrange', 'tqdm', 'trange']
class TelegramIO(MonoWorker):
"""Non-blocking file-like IO using a Telegram Bot."""
API = 'https://api.telegram.org/bot'
def __init__(self, token, chat_id):
"""Creates a new message in the given `chat_id`."""
super(TelegramIO, self).__init__()
self.token = token
self.chat_id = chat_id
self.session = Session()
self.text = self.__class__.__name__
self.message_id
@property
def message_id(self):
if hasattr(self, '_message_id'):
return self._message_id
try:
res = self.session.post(
self.API + '%s/sendMessage' % self.token,
data={'text': '`' + self.text + '`', 'chat_id': self.chat_id,
'parse_mode': 'MarkdownV2'}).json()
except Exception as e:
tqdm_auto.write(str(e))
else:
if res.get('error_code') == 429:
warn("Creation rate limit: try increasing `mininterval`.", | TqdmWarning, stacklevel=2) | 1 | 2023-12-14 07:43:03+00:00 | 2k |
LkPrtctrd/BSL-V53 | Heart/Packets/Server/Home/AvailableServerCommandMessage.py | [
{
"identifier": "LogicCommandManager",
"path": "Heart/Logic/LogicCommandManager.py",
"snippet": "class LogicCommandManager:\n commandsList = {\n 201: ChangeAvatarNameCommand,\n 202: 'DiamondsAddedCommand',\n 203: 'GiveDeliveryItemsCommand',\n 204: 'DayChangedCommand',\n ... | from Heart.Logic.LogicCommandManager import LogicCommandManager
from Heart.Packets.PiranhaMessage import PiranhaMessage | 1,261 |
class AvailableServerCommandMessage(PiranhaMessage):
def __init__(self, messageData):
super().__init__(messageData)
self.messageVersion = 0
def encode(self, fields, player):
self.writeVInt(fields["Command"]["ID"])
|
class AvailableServerCommandMessage(PiranhaMessage):
def __init__(self, messageData):
super().__init__(messageData)
self.messageVersion = 0
def encode(self, fields, player):
self.writeVInt(fields["Command"]["ID"]) | command = LogicCommandManager.createCommand(fields["Command"]["ID"], self.messagePayload) | 0 | 2023-12-14 18:57:56+00:00 | 2k |
sockheadrps/AIODesa | aiodesa/database.py | [
{
"identifier": "make_schema",
"path": "aiodesa/utils/table.py",
"snippet": "def make_schema(name: str, data_cls: Any) -> TableSchema:\n \"\"\"\n Generate a TableSchema based on the provided data class.\n\n Args:\n name: The name of the table.\n data_cls: A data class defining the... | from dataclasses import is_dataclass, fields
from typing import Tuple, Callable, Any, Coroutine
from pathlib import Path
from aiodesa.utils.table import make_schema, TableSchema
import aiosqlite | 1,028 | """
aiodesa.Database: Simple SQLite Database Interface
This module provides the `Db` class, a simple SQLite database interface that
supports asynchronous operations.
Classes:
- :class:`Db`: Represents a simple SQLite database interface.
Example:
.. code-block:: python
from aiodesa import Db
class Users:
username: str
id: str | None = None
table_name: str = "users"
async with Db("database.sqlite3") as db:
await db.read_table_schemas(Users)
"""
class Db:
"""
Represents a simple SQLite database interface.
Args:
db_path : str
The path to the SQLite database file.
Example:
.. code-block:: python
class Users:
username: str
id: str | None = None
table_name: str = "users"
async with Db("database.sqlite3") as db:
await db.read_table_schemas(Users)
...
"""
_tables: dict
db_path: Path
_conn: Any
def __init__(self, db_path: str) -> None:
self.db_path = Path(db_path)
self._conn = None
self._create_db()
self._tables = {}
def _create_db(self) -> None:
"""
Internal method to create the database file if it does not exist.
Notes:
- This method is automatically called during the initialization of the
Db class.
- It ensures that the SQLite database file is created at the specified
path if
it does not exist.
"""
if not self.db_path.exists():
self.db_path.parent.mkdir(parents=True, exist_ok=True)
self.db_path.touch()
async def _process_single_data_class(self, schema: Any) -> None:
"""
Process a single data class schema.
Args:
schema: The data class schema representing a table.
Returns:
This method does not return any value.
"""
if not is_dataclass(schema):
raise ValueError("Provided schema is not a data class")
self._tables[schema.table_name] = schema
class_fields = fields(schema)
for field in class_fields:
if field.name == "table_name":
| """
aiodesa.Database: Simple SQLite Database Interface
This module provides the `Db` class, a simple SQLite database interface that
supports asynchronous operations.
Classes:
- :class:`Db`: Represents a simple SQLite database interface.
Example:
.. code-block:: python
from aiodesa import Db
class Users:
username: str
id: str | None = None
table_name: str = "users"
async with Db("database.sqlite3") as db:
await db.read_table_schemas(Users)
"""
class Db:
"""
Represents a simple SQLite database interface.
Args:
db_path : str
The path to the SQLite database file.
Example:
.. code-block:: python
class Users:
username: str
id: str | None = None
table_name: str = "users"
async with Db("database.sqlite3") as db:
await db.read_table_schemas(Users)
...
"""
_tables: dict
db_path: Path
_conn: Any
def __init__(self, db_path: str) -> None:
self.db_path = Path(db_path)
self._conn = None
self._create_db()
self._tables = {}
def _create_db(self) -> None:
"""
Internal method to create the database file if it does not exist.
Notes:
- This method is automatically called during the initialization of the
Db class.
- It ensures that the SQLite database file is created at the specified
path if
it does not exist.
"""
if not self.db_path.exists():
self.db_path.parent.mkdir(parents=True, exist_ok=True)
self.db_path.touch()
async def _process_single_data_class(self, schema: Any) -> None:
"""
Process a single data class schema.
Args:
schema: The data class schema representing a table.
Returns:
This method does not return any value.
"""
if not is_dataclass(schema):
raise ValueError("Provided schema is not a data class")
self._tables[schema.table_name] = schema
class_fields = fields(schema)
for field in class_fields:
if field.name == "table_name": | schema_ = make_schema(str(field.default), schema) | 0 | 2023-12-09 05:52:25+00:00 | 2k |
DavidBellamy/labrador | scripts/preprocessing/pretraining_jsonl_to_bert_bags.py | [
{
"identifier": "json_lines_loader",
"path": "lab_transformers/utils.py",
"snippet": "def json_lines_loader(filepath: Union[str, Path]) -> List[Dict[str, Any]]:\n \"\"\"Loads the JSON lines located at filepath and returns them as a list of flat dictionaries.\"\"\"\n\n jsonl = []\n with open(fil... | import json
import os.path as op
import sys
import numpy as np
import pandas as pd
from tqdm import tqdm
from lab_transformers.utils import json_lines_loader, NpEncoder | 1,528 |
def make_lab_bags_for_bert(
jsonl_batch: list, filepath: str, max_time_delta: float, min_bag_length: int = 3
) -> None:
"""Creates all unique bags of labs spanning max_time_delta (and with size min_bag_length) for the patients
in jsonl_batch.
Inputs:
> jsonl_batch: a list of JSON lines, where each line contains the 5 keys: subject_id, tokens,
time_deltas, hadm_id, and charttime.
> filepath: a string specifying the path to the desired output jsonl file.
> max_time_delta: a float specifying the maximum time period that a bag may span.
> min_bag_length: a positive integer specifying the minimum length requirement for each bag.
Returns:
> No return value, has the side effect of writing JSON lines containing all precomputed bags for each patient to
the file at filepath. Each JSON line has the following structure:
{'subject_id': 123456, token_bags: [[1, 2, 3], [4, 5, 6]], 'hadm_id': [101, 102],
'charttime': ["2175-12-30T17:03", "2143-08-14T05:01"]}
The hadm_id is the hospital admission ID for each corresponding token in token_bags. This may have
missingness. Similarly, 'charttime' is the moment when the labs were added to the patient's chart. When
max_time_delta = 0, each bag only has 1 'charttime' value, whereas bags with larger values of max_time_delta could
have multiple, in which case we take the minimum of all those times (i.e. the start time of the bag).
"""
# For each patient loop over time deltas and construct bags of labs with max_time_delta width
# Redundant subsets are filtered out
# Only bags with min_bag_length will be included
output_jsonl = []
for patient in tqdm(jsonl_batch, desc="Making bags of labs"):
# Separate out the patient's data components (reduces the time spent indexing below)
time_deltas = patient["time_deltas"]
tokens = patient["token"]
hadm_ids = patient["hadm_id"]
charttimes = patient["charttime"]
bags_of_lab_indexes = (
[]
) # will hold the bags of indexes, which correspond to bags of codes/values
token_bags = [] # will hold the bags of codes for the current patient
hadm_id_list = [] # will hold the hadm_id for each bag of codes/values
charttime_list = [] # will hold the start time fore ach bag of codes/values
end_of_list = len(patient["time_deltas"])
for index in range(end_of_list):
# Start a set of indexes to be returned, beginning with the current index
index_list = [index]
# collect indexes going rightwards until max_time_delta is surpassed or end of list is reached
cumsum = 0
while True:
index += 1
if index >= end_of_list:
break
cumsum += time_deltas[index]
if cumsum > max_time_delta:
break
index_list.append(index)
# pass if the proposed bag of lab indexes is not at least min_bag_length
if len(index_list) < min_bag_length:
continue
# collect this proposed bag of lab indexes, only if it isn't a subset of any that came before it
sets = {frozenset(e) for e in bags_of_lab_indexes}
proposed_indexes = set(index_list)
if not any(proposed_indexes <= s for s in sets):
bags_of_lab_indexes.append(index_list)
# Convert the bag of lab indexes into the corresponding lab codes, values, hadm_id's and charttimes
codes = [tokens[i] for i in index_list]
temp_hadm_ids = [hadm_ids[i] for i in index_list]
temp_charttimes = np.array(
[pd.to_datetime(charttimes[i]) for i in index_list],
dtype=np.datetime64,
)
bag_start_time = min(temp_charttimes)
# If there were multiple hospital admission IDs for the same bag, assign 'NA' to this bag's hadm_id
if len(set(temp_hadm_ids)) > 1:
hadm_id = float("nan")
else:
hadm_id = temp_hadm_ids[
0
] # take the first hadm_id from the list, since all are the same
token_bags.append(codes)
hadm_id_list.append(hadm_id)
charttime_list.append(bag_start_time)
if len(bags_of_lab_indexes) > 0:
patient_jsonl = {
"subject_id": patient["subject_id"],
"token_bags": token_bags,
"hadm_id": hadm_id_list,
"charttime": np.datetime_as_string(charttime_list, unit="m").tolist(),
}
output_jsonl.append(patient_jsonl)
# Write JSON lines
first_line = True
mode = "w"
for patient in tqdm(output_jsonl, desc=f"Writing JSON lines..."):
# Write patient to file
with open(filepath, mode=mode, encoding="utf-8") as f:
|
def make_lab_bags_for_bert(
jsonl_batch: list, filepath: str, max_time_delta: float, min_bag_length: int = 3
) -> None:
"""Creates all unique bags of labs spanning max_time_delta (and with size min_bag_length) for the patients
in jsonl_batch.
Inputs:
> jsonl_batch: a list of JSON lines, where each line contains the 5 keys: subject_id, tokens,
time_deltas, hadm_id, and charttime.
> filepath: a string specifying the path to the desired output jsonl file.
> max_time_delta: a float specifying the maximum time period that a bag may span.
> min_bag_length: a positive integer specifying the minimum length requirement for each bag.
Returns:
> No return value, has the side effect of writing JSON lines containing all precomputed bags for each patient to
the file at filepath. Each JSON line has the following structure:
{'subject_id': 123456, token_bags: [[1, 2, 3], [4, 5, 6]], 'hadm_id': [101, 102],
'charttime': ["2175-12-30T17:03", "2143-08-14T05:01"]}
The hadm_id is the hospital admission ID for each corresponding token in token_bags. This may have
missingness. Similarly, 'charttime' is the moment when the labs were added to the patient's chart. When
max_time_delta = 0, each bag only has 1 'charttime' value, whereas bags with larger values of max_time_delta could
have multiple, in which case we take the minimum of all those times (i.e. the start time of the bag).
"""
# For each patient loop over time deltas and construct bags of labs with max_time_delta width
# Redundant subsets are filtered out
# Only bags with min_bag_length will be included
output_jsonl = []
for patient in tqdm(jsonl_batch, desc="Making bags of labs"):
# Separate out the patient's data components (reduces the time spent indexing below)
time_deltas = patient["time_deltas"]
tokens = patient["token"]
hadm_ids = patient["hadm_id"]
charttimes = patient["charttime"]
bags_of_lab_indexes = (
[]
) # will hold the bags of indexes, which correspond to bags of codes/values
token_bags = [] # will hold the bags of codes for the current patient
hadm_id_list = [] # will hold the hadm_id for each bag of codes/values
charttime_list = [] # will hold the start time fore ach bag of codes/values
end_of_list = len(patient["time_deltas"])
for index in range(end_of_list):
# Start a set of indexes to be returned, beginning with the current index
index_list = [index]
# collect indexes going rightwards until max_time_delta is surpassed or end of list is reached
cumsum = 0
while True:
index += 1
if index >= end_of_list:
break
cumsum += time_deltas[index]
if cumsum > max_time_delta:
break
index_list.append(index)
# pass if the proposed bag of lab indexes is not at least min_bag_length
if len(index_list) < min_bag_length:
continue
# collect this proposed bag of lab indexes, only if it isn't a subset of any that came before it
sets = {frozenset(e) for e in bags_of_lab_indexes}
proposed_indexes = set(index_list)
if not any(proposed_indexes <= s for s in sets):
bags_of_lab_indexes.append(index_list)
# Convert the bag of lab indexes into the corresponding lab codes, values, hadm_id's and charttimes
codes = [tokens[i] for i in index_list]
temp_hadm_ids = [hadm_ids[i] for i in index_list]
temp_charttimes = np.array(
[pd.to_datetime(charttimes[i]) for i in index_list],
dtype=np.datetime64,
)
bag_start_time = min(temp_charttimes)
# If there were multiple hospital admission IDs for the same bag, assign 'NA' to this bag's hadm_id
if len(set(temp_hadm_ids)) > 1:
hadm_id = float("nan")
else:
hadm_id = temp_hadm_ids[
0
] # take the first hadm_id from the list, since all are the same
token_bags.append(codes)
hadm_id_list.append(hadm_id)
charttime_list.append(bag_start_time)
if len(bags_of_lab_indexes) > 0:
patient_jsonl = {
"subject_id": patient["subject_id"],
"token_bags": token_bags,
"hadm_id": hadm_id_list,
"charttime": np.datetime_as_string(charttime_list, unit="m").tolist(),
}
output_jsonl.append(patient_jsonl)
# Write JSON lines
first_line = True
mode = "w"
for patient in tqdm(output_jsonl, desc=f"Writing JSON lines..."):
# Write patient to file
with open(filepath, mode=mode, encoding="utf-8") as f: | json_record = json.dumps(patient, cls=NpEncoder) | 1 | 2023-12-09 20:40:17+00:00 | 2k |
NLP-Core-Team/RealCode_eval | lm_eval/generators.py | [
{
"identifier": "Task",
"path": "lm_eval/datatypes.py",
"snippet": "class Task:\n repo: str\n repo_n: int\n path_from_root: str\n left_context: str\n right_context: str\n gt: str\n total_tests: int"
},
{
"identifier": "BaseParser",
"path": "lm_eval/context_parser.py",
... | import os
import typing as tp
import json
import re
import torch
import logging
from pathlib import Path
from dataclasses import asdict, fields
from transformers import AutoTokenizer, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList
from tqdm import tqdm
from .datatypes import Task
from .context_parser import BaseParser, TrivialContextParser | 1,355 |
logger = logging.getLogger("RealCode")
class InfillGenerator:
def __init__(self,
model_path: str,
num_samples: int,
prefix_tokens: tp.Union[str, tp.List[int]] = [],
middle_tokens: tp.Union[str, tp.List[int]] = [],
suffix_tokens: tp.Union[str, tp.List[int]] = [],
max_context_length: int = None,
left_context_ratio: int = 1,
dtype = torch.bfloat16,
eos_sequences: tp.List[str] = ["\sclass\s", "\sdef\s", "\s@", "<|endoftext|>", "<extra_id_0>"],
model_kwargs: tp.Dict = {},
generation_params: tp.Dict[str, tp.Any] = {},
context_parser: BaseParser = TrivialContextParser(),
add_extra_spaces_to_generation=0,
):
"""
Class to generate code in fill-in-the-middle mode
params:
model_path: str - which model to use for generation, anything that can be passed to AutoModelForCausalLM.from_pretrained
num_samples: int - number of samples to generate per task, values > 1 should be paired with generation_params
prefix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the left context. Can be either str or list of int tokens
middle_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the right context (see Fill-In-the-Middle). Can be either str or list of int tokens
suffix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert after the right context (see Fill-In-the-Middle). Can be either str or list of int tokens
max_context_length: int = None - truncation length for prompt, measured in tokens (len(left_context) + len(right_context) < max_context_length)
left_context_ratio: int = 1 - proportion of max_context_length given to left_context. 1 means 1:1 split between left and right, 3 means 3:1 split in favor of left context
dtype=torch.bfloat16 - torch dtype to use for inference
eos_sequences: tp.List[str] = ["\sclass\s", "\sdef\s", "\s@", "<|endoftext|>", "<extra_id_0>"] - regular expressions that determine end of geneartion
model_kwargs: tp.Dict = {} - kwargs to be passed to AutoModelForCausalLM.from_pretrained
generation_params: tp.Dict[str, tp.Any] = {} - kwargs to be passed to AutoModelForCausalLM.generate
context_parser: BaseParser = TrivialContextParser() - parser for left and right contexts
add_extra_spaces_to_generation=0 - number of added extra spaces add the begining of generation to fix indentation. May be required due to bugs in some tokenizers (e.g. Codellama)
"""
self.device = torch.device("cuda")
# self.device = torch.device("cpu")
logger.info(f"Loading model from {model_path} with kwargs f{model_kwargs}")
self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
self.model = AutoModelForCausalLM.from_pretrained(model_path,
torch_dtype=dtype, device_map="auto", trust_remote_code=True, **model_kwargs
).eval()
logger.info(f"Loaded model from {model_path} with kwargs f{model_kwargs}")
logger.info(f"Device map: \n{self.model.hf_device_map}")
self.num_samples = num_samples
self.prefix_tokens = self.tokenize_special_tokens(prefix_tokens)
self.middle_tokens = self.tokenize_special_tokens(middle_tokens)
self.suffix_tokens = self.tokenize_special_tokens(suffix_tokens)
logger.debug(f"prefix_tokens: {self.prefix_tokens}, middle_tokens: {self.middle_tokens}, suffix_tokens: {self.suffix_tokens}")
self.eos_sequences = eos_sequences[:]
#context truncation parameters
self.max_context_length = max_context_length
self.left_context_truncate_at = left_context_ratio / (left_context_ratio + 1)
self.right_context_truncate_at = 1 / (left_context_ratio + 1)
self.generation_params = generation_params
self.generation_params['num_return_sequences'] = self.num_samples
self.context_parser = context_parser
# Number of tokens before and after truncating to max_context_length
self.count_inferenced_tokens = []
self.count_possible_tokens = []
self.add_extra_spaces_to_generation = add_extra_spaces_to_generation
def tokenize_special_tokens(self, str_or_list: tp.Union[str, tp.List[int]]) -> torch.Tensor:
if type(str_or_list) == str:
return self.tokenizer.encode(str_or_list, return_tensors="pt", add_special_tokens=False).to(self.device) # ['input_ids']
else:
return torch.as_tensor(str_or_list).unsqueeze(0).to(self.device)
|
logger = logging.getLogger("RealCode")
class InfillGenerator:
def __init__(self,
model_path: str,
num_samples: int,
prefix_tokens: tp.Union[str, tp.List[int]] = [],
middle_tokens: tp.Union[str, tp.List[int]] = [],
suffix_tokens: tp.Union[str, tp.List[int]] = [],
max_context_length: int = None,
left_context_ratio: int = 1,
dtype = torch.bfloat16,
eos_sequences: tp.List[str] = ["\sclass\s", "\sdef\s", "\s@", "<|endoftext|>", "<extra_id_0>"],
model_kwargs: tp.Dict = {},
generation_params: tp.Dict[str, tp.Any] = {},
context_parser: BaseParser = TrivialContextParser(),
add_extra_spaces_to_generation=0,
):
"""
Class to generate code in fill-in-the-middle mode
params:
model_path: str - which model to use for generation, anything that can be passed to AutoModelForCausalLM.from_pretrained
num_samples: int - number of samples to generate per task, values > 1 should be paired with generation_params
prefix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the left context. Can be either str or list of int tokens
middle_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the right context (see Fill-In-the-Middle). Can be either str or list of int tokens
suffix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert after the right context (see Fill-In-the-Middle). Can be either str or list of int tokens
max_context_length: int = None - truncation length for prompt, measured in tokens (len(left_context) + len(right_context) < max_context_length)
left_context_ratio: int = 1 - proportion of max_context_length given to left_context. 1 means 1:1 split between left and right, 3 means 3:1 split in favor of left context
dtype=torch.bfloat16 - torch dtype to use for inference
eos_sequences: tp.List[str] = ["\sclass\s", "\sdef\s", "\s@", "<|endoftext|>", "<extra_id_0>"] - regular expressions that determine end of geneartion
model_kwargs: tp.Dict = {} - kwargs to be passed to AutoModelForCausalLM.from_pretrained
generation_params: tp.Dict[str, tp.Any] = {} - kwargs to be passed to AutoModelForCausalLM.generate
context_parser: BaseParser = TrivialContextParser() - parser for left and right contexts
add_extra_spaces_to_generation=0 - number of added extra spaces add the begining of generation to fix indentation. May be required due to bugs in some tokenizers (e.g. Codellama)
"""
self.device = torch.device("cuda")
# self.device = torch.device("cpu")
logger.info(f"Loading model from {model_path} with kwargs f{model_kwargs}")
self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
self.model = AutoModelForCausalLM.from_pretrained(model_path,
torch_dtype=dtype, device_map="auto", trust_remote_code=True, **model_kwargs
).eval()
logger.info(f"Loaded model from {model_path} with kwargs f{model_kwargs}")
logger.info(f"Device map: \n{self.model.hf_device_map}")
self.num_samples = num_samples
self.prefix_tokens = self.tokenize_special_tokens(prefix_tokens)
self.middle_tokens = self.tokenize_special_tokens(middle_tokens)
self.suffix_tokens = self.tokenize_special_tokens(suffix_tokens)
logger.debug(f"prefix_tokens: {self.prefix_tokens}, middle_tokens: {self.middle_tokens}, suffix_tokens: {self.suffix_tokens}")
self.eos_sequences = eos_sequences[:]
#context truncation parameters
self.max_context_length = max_context_length
self.left_context_truncate_at = left_context_ratio / (left_context_ratio + 1)
self.right_context_truncate_at = 1 / (left_context_ratio + 1)
self.generation_params = generation_params
self.generation_params['num_return_sequences'] = self.num_samples
self.context_parser = context_parser
# Number of tokens before and after truncating to max_context_length
self.count_inferenced_tokens = []
self.count_possible_tokens = []
self.add_extra_spaces_to_generation = add_extra_spaces_to_generation
def tokenize_special_tokens(self, str_or_list: tp.Union[str, tp.List[int]]) -> torch.Tensor:
if type(str_or_list) == str:
return self.tokenizer.encode(str_or_list, return_tensors="pt", add_special_tokens=False).to(self.device) # ['input_ids']
else:
return torch.as_tensor(str_or_list).unsqueeze(0).to(self.device)
| def _prepare_tokens(self, task: Task) -> torch.Tensor: | 0 | 2023-12-12 12:43:06+00:00 | 2k |
centrifugal/grand-chat-tutorial | backend/chat/views.py | [
{
"identifier": "Message",
"path": "backend/chat/models.py",
"snippet": "class Message(models.Model):\n room = models.ForeignKey(Room, related_name='messages', on_delete=models.CASCADE)\n # Note, message may have null user – we consider such messages \"system\". These messages\n # initiated by ... | import json
import logging
import requests
from requests.adapters import HTTPAdapter, Retry
from django.conf import settings
from django.db import transaction
from django.db.models import Exists, OuterRef, Count
from django.shortcuts import get_object_or_404
from django.utils import timezone
from rest_framework import status, viewsets
from rest_framework.generics import ListCreateAPIView
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from .models import Message, Room, RoomMember, Outbox, CDC
from .serializers import MessageSerializer, RoomSearchSerializer, RoomSerializer, RoomMemberSerializer | 982 |
class RoomListViewSet(ListModelMixin, GenericViewSet):
serializer_class = RoomSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
|
class RoomListViewSet(ListModelMixin, GenericViewSet):
serializer_class = RoomSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self): | return Room.objects.annotate( | 1 | 2023-12-06 10:13:26+00:00 | 2k |
shinkungoo/SymbolicCDM | SCDM/parameter.py | [
{
"identifier": "accuracy",
"path": "SCDM/eval.py",
"snippet": "def accuracy(y_pred, y_true, threshold=0.5, weights=None):\n pred = np.array(y_pred)\n true = np.array(y_true)\n result = np.where(pred > threshold, 1, 0)\n if weights is not None:\n correct = np.sum((true == result) * we... | import torch
import torch.nn as nn
from tqdm import tqdm
from .eval import accuracy, area_under_curve, f1_score
from .utility import init_interaction_function | 1,076 |
class ComputeIF(nn.Module):
def __init__(self,
student_number,
question_number,
knowledge_number):
super(ComputeIF, self).__init__()
self.student_emb = nn.Embedding(student_number, knowledge_number)
self.difficulty = nn.Embedding(question_number, knowledge_number)
self.discrimination = nn.Embedding(question_number, 1)
# initialize
for name, param in self.named_parameters():
if "weight" in name:
nn.init.xavier_normal_(param)
def forward(self, student_id, question, q_matrix_line, interaction_func):
proficiency_level = torch.sigmoid(self.student_emb(student_id))
difficulty = torch.sigmoid(self.difficulty(question))
discrimination = torch.sigmoid(self.discrimination(question))
input_x = interaction_func(discrimination, proficiency_level - difficulty, q_matrix_line)
output = torch.sigmoid(input_x)
return output.view(-1)
class Parameter:
def __init__(self,
student_number: int,
question_number: int,
knowledge_number: int,):
self.net = ComputeIF(student_number, question_number, knowledge_number)
self.student_number = student_number
self.question_number = question_number
self.knowledge_number = knowledge_number
self.interaction_function = init_interaction_function
self.interaction_function_string = "initial interaction function"
def train(self, train_set, epochs, device="cpu", lr=0.002, init=True):
# initialize
if init:
for name, param in self.net.named_parameters():
if "weight" in name:
nn.init.xavier_normal_(param)
self.net = self.net.to(device)
self.net.train()
loss_function = nn.BCELoss()
optimizer = torch.optim.Adam(self.net.parameters(), lr=lr)
with tqdm(total=epochs, desc="Training Process", unit="epoch") as pbar:
for epoch in range(epochs):
epoch_losses = []
for batch_data in train_set:
student_id, question, q_matrix_line, y = batch_data
student_id: torch.Tensor = student_id.to(device)
question: torch.Tensor = question.to(device)
q_matrix_line: torch.Tensor = q_matrix_line.to(device)
y: torch.Tensor = y.to(device)
pred: torch.Tensor = self.net(student_id,
question,
q_matrix_line,
self.interaction_function)
loss = loss_function(pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_losses.append(loss.mean().item())
pbar.update()
def evaluate(self, test_set, interaction_func, device="cpu"):
self.net = self.net.to(device)
self.net.eval()
y_true, y_pred = [], []
for batch_data in test_set:
student_id, question, q_matrix_line, y = batch_data
student_id: torch.Tensor = student_id.to(device)
question: torch.Tensor = question.to(device)
q_matrix_line: torch.Tensor = q_matrix_line.to(device)
pred: torch.Tensor = self.net(student_id,
question,
q_matrix_line,
interaction_func)
y_pred.extend(pred.detach().cpu().tolist())
y_true.extend(y.tolist())
acc = accuracy(y_pred, y_true)
auc = area_under_curve(y_pred, y_true)
|
class ComputeIF(nn.Module):
def __init__(self,
student_number,
question_number,
knowledge_number):
super(ComputeIF, self).__init__()
self.student_emb = nn.Embedding(student_number, knowledge_number)
self.difficulty = nn.Embedding(question_number, knowledge_number)
self.discrimination = nn.Embedding(question_number, 1)
# initialize
for name, param in self.named_parameters():
if "weight" in name:
nn.init.xavier_normal_(param)
def forward(self, student_id, question, q_matrix_line, interaction_func):
proficiency_level = torch.sigmoid(self.student_emb(student_id))
difficulty = torch.sigmoid(self.difficulty(question))
discrimination = torch.sigmoid(self.discrimination(question))
input_x = interaction_func(discrimination, proficiency_level - difficulty, q_matrix_line)
output = torch.sigmoid(input_x)
return output.view(-1)
class Parameter:
def __init__(self,
student_number: int,
question_number: int,
knowledge_number: int,):
self.net = ComputeIF(student_number, question_number, knowledge_number)
self.student_number = student_number
self.question_number = question_number
self.knowledge_number = knowledge_number
self.interaction_function = init_interaction_function
self.interaction_function_string = "initial interaction function"
def train(self, train_set, epochs, device="cpu", lr=0.002, init=True):
# initialize
if init:
for name, param in self.net.named_parameters():
if "weight" in name:
nn.init.xavier_normal_(param)
self.net = self.net.to(device)
self.net.train()
loss_function = nn.BCELoss()
optimizer = torch.optim.Adam(self.net.parameters(), lr=lr)
with tqdm(total=epochs, desc="Training Process", unit="epoch") as pbar:
for epoch in range(epochs):
epoch_losses = []
for batch_data in train_set:
student_id, question, q_matrix_line, y = batch_data
student_id: torch.Tensor = student_id.to(device)
question: torch.Tensor = question.to(device)
q_matrix_line: torch.Tensor = q_matrix_line.to(device)
y: torch.Tensor = y.to(device)
pred: torch.Tensor = self.net(student_id,
question,
q_matrix_line,
self.interaction_function)
loss = loss_function(pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_losses.append(loss.mean().item())
pbar.update()
def evaluate(self, test_set, interaction_func, device="cpu"):
self.net = self.net.to(device)
self.net.eval()
y_true, y_pred = [], []
for batch_data in test_set:
student_id, question, q_matrix_line, y = batch_data
student_id: torch.Tensor = student_id.to(device)
question: torch.Tensor = question.to(device)
q_matrix_line: torch.Tensor = q_matrix_line.to(device)
pred: torch.Tensor = self.net(student_id,
question,
q_matrix_line,
interaction_func)
y_pred.extend(pred.detach().cpu().tolist())
y_true.extend(y.tolist())
acc = accuracy(y_pred, y_true)
auc = area_under_curve(y_pred, y_true) | f1 = f1_score(y_pred, y_true) | 2 | 2023-12-09 13:37:15+00:00 | 2k |
pan-x-c/EE-LLM | megatron/core/tensor_parallel/mappings.py | [
{
"identifier": "get_tensor_and_expert_parallel_group",
"path": "megatron/core/parallel_state.py",
"snippet": "def get_tensor_and_expert_parallel_group():\n assert (\n _TENSOR_AND_EXPERT_PARALLEL_GROUP is not None\n ), 'tensor and expert parallel group is not initialized'\n return _TENSO... | import torch
from megatron.core.parallel_state import (
get_tensor_and_expert_parallel_group,
get_tensor_model_parallel_group,
get_tensor_model_parallel_rank,
get_tensor_model_parallel_world_size,
)
from .utils import split_tensor_along_last_dim | 789 | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
def _reduce(input_):
"""All-reduce the input tensor across model parallel group."""
# Bypass the function if we are using only 1 GPU.
if get_tensor_model_parallel_world_size() == 1:
return input_
# All-reduce.
torch.distributed.all_reduce(input_, group=get_tensor_model_parallel_group())
return input_
def _split_along_last_dim(input_):
"""Split the tensor along its last dimension and keep the
corresponding slice."""
world_size = get_tensor_model_parallel_world_size()
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_
# Split along last dimension.
| # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
def _reduce(input_):
"""All-reduce the input tensor across model parallel group."""
# Bypass the function if we are using only 1 GPU.
if get_tensor_model_parallel_world_size() == 1:
return input_
# All-reduce.
torch.distributed.all_reduce(input_, group=get_tensor_model_parallel_group())
return input_
def _split_along_last_dim(input_):
"""Split the tensor along its last dimension and keep the
corresponding slice."""
world_size = get_tensor_model_parallel_world_size()
# Bypass the function if we are using only 1 GPU.
if world_size == 1:
return input_
# Split along last dimension. | input_list = split_tensor_along_last_dim(input_, world_size) | 4 | 2023-12-07 08:29:38+00:00 | 2k |
kanadeblisst00/WeChat-PyRobot | src/wechat_pyrobot/hookmsg32.py | [
{
"identifier": "CDataJSONEncoder",
"path": "src/wechat_pyrobot/ctypes_json.py",
"snippet": "class CDataJSONEncoder(JSONEncoder):\r\n def default(self, obj):\r\n if isinstance(obj, (Array, list)):\r\n return [self.default(e) for e in obj]\r\n\r\n if isinstance(obj, _Pointer):... | import json
from py_process_hooker import Hook
from py_process_hooker.winapi import *
from .ctypes_json import CDataJSONEncoder
from .offset import CALL_OFFSET
| 904 |
struct_size = 0x2E0
class GeneralStructW32(Structure):
_fields_ = [
('value', c_wchar_p),
('len1', c_uint32),
('len2', c_uint32),
('_unkown_value0', c_uint32),
('_unkown_value1', c_uint32)
]
class WeChatMsgStruct32(Structure):
_fields_ = [
('_unkown_value0', c_uint32 * 8),
('localid', c_uint32),
('_unkown_value2', c_uint32 * 3),
('msgid', c_ulonglong),
('msg_type', c_uint32),
('is_self_msg', c_uint32),
('_unkown_value3', c_uint32),
('timestamp', c_uint32),
('sender', GeneralStructW32),
('_unkown_value4', c_uint32 * 5),
('content', GeneralStructW32),
('_unkown_value5', c_uint32 * 66),
('room_sender', GeneralStructW32),
('sign', GeneralStructW32),
('thumb_path', GeneralStructW32),
('file_path', GeneralStructW32),
]
|
struct_size = 0x2E0
class GeneralStructW32(Structure):
_fields_ = [
('value', c_wchar_p),
('len1', c_uint32),
('len2', c_uint32),
('_unkown_value0', c_uint32),
('_unkown_value1', c_uint32)
]
class WeChatMsgStruct32(Structure):
_fields_ = [
('_unkown_value0', c_uint32 * 8),
('localid', c_uint32),
('_unkown_value2', c_uint32 * 3),
('msgid', c_ulonglong),
('msg_type', c_uint32),
('is_self_msg', c_uint32),
('_unkown_value3', c_uint32),
('timestamp', c_uint32),
('sender', GeneralStructW32),
('_unkown_value4', c_uint32 * 5),
('content', GeneralStructW32),
('_unkown_value5', c_uint32 * 66),
('room_sender', GeneralStructW32),
('sign', GeneralStructW32),
('thumb_path', GeneralStructW32),
('file_path', GeneralStructW32),
]
| class MyCDataJSONEncoder(CDataJSONEncoder):
| 0 | 2023-12-12 08:43:11+00:00 | 2k |
mitrefireline/simharness | simharness2/environments/tests/check_reactive_environments.py | [
{
"identifier": "ReactiveDiscreteHarness",
"path": "simharness2/environments/reactive.py",
"snippet": "class ReactiveHarness(RLHarness): # noqa: D205,D212,D415\n def __init__(self, config: EnvContext) -> None:\n def set_trial_results_path(self, path: str) -> None:\n def step(\n self, ac... | import argparse
import logging
import os
import yaml
import traceback
from typing import Any, Dict
from ray.rllib.utils.pre_checks.env import check_gym_environments
from simharness2.environments.reactive import (
ReactiveDiscreteHarness,
ReactiveHarness,
)
from simharness2.sim_registry import get_simulation_from_name | 1,045 | # noqa : D212,D415
"""
To avoid an ImportError and/or ModueNotFoundError, run this script as a module:
python -m simharness2.environments.tests.check_reactive_environments \
--config <path_to_config_file> --env-type <train|eval>
(above command should be executed from the root of the repository)
"""
def setup_args():
"""Parse command line options (mode and config)."""
parser = argparse.ArgumentParser(description="Test custom environment with RLlib.")
help_s = "Path to (harness) config file."
parser.add_argument("--config", required=True, type=str, help=help_s)
help_s, choices = "Environment type.", ["train", "eval"]
parser.add_argument(
"--env-type", required=True, type=str, help=help_s, choices=choices
)
return parser.parse_args()
def get_config(cfg_path: str) -> Dict[str, Any]:
"""Load the YAML config file from the given path.
Arguments:
cfg_path: A string indicating the file path to load the YAML file from.
Returns:
A dictionary containing the contents of the YAML configuration file.
"""
with open(cfg_path, "r") as f:
return yaml.safe_load(f)
def reactive_multidiscrete_env_creator(env_config: Dict[str, Any]) -> ReactiveHarness:
"""Environment creator for RLlib.
Arguments:
env_config: A dictionary containing the environment configuration.
Returns:
An instance of the ReactiveHarness (environment) class.
"""
return ReactiveHarness(**env_config)
| # noqa : D212,D415
"""
To avoid an ImportError and/or ModueNotFoundError, run this script as a module:
python -m simharness2.environments.tests.check_reactive_environments \
--config <path_to_config_file> --env-type <train|eval>
(above command should be executed from the root of the repository)
"""
def setup_args():
"""Parse command line options (mode and config)."""
parser = argparse.ArgumentParser(description="Test custom environment with RLlib.")
help_s = "Path to (harness) config file."
parser.add_argument("--config", required=True, type=str, help=help_s)
help_s, choices = "Environment type.", ["train", "eval"]
parser.add_argument(
"--env-type", required=True, type=str, help=help_s, choices=choices
)
return parser.parse_args()
def get_config(cfg_path: str) -> Dict[str, Any]:
"""Load the YAML config file from the given path.
Arguments:
cfg_path: A string indicating the file path to load the YAML file from.
Returns:
A dictionary containing the contents of the YAML configuration file.
"""
with open(cfg_path, "r") as f:
return yaml.safe_load(f)
def reactive_multidiscrete_env_creator(env_config: Dict[str, Any]) -> ReactiveHarness:
"""Environment creator for RLlib.
Arguments:
env_config: A dictionary containing the environment configuration.
Returns:
An instance of the ReactiveHarness (environment) class.
"""
return ReactiveHarness(**env_config)
| def reactive_discrete_env_creator(env_config: str) -> ReactiveDiscreteHarness: | 0 | 2023-12-08 19:13:31+00:00 | 2k |
JeffJerseyCow/eviloauth | eviloauth/dispatcher.py | [
{
"identifier": "IDP",
"path": "eviloauth/idp.py",
"snippet": "class IDP():\n idps = get_idps()\n authz_endpoint = 'https://login.microsoftonline.com/common/oauth2/v2.0/authorize'\n token_endpoint = 'https://login.microsoftonline.com/common/oauth2/v2.0/token'\n\n def __init__(self, idp, redi... | import sys
import logging
from eviloauth.idp import IDP
from eviloauth.exceptions import EviloauthCommandException | 1,536 |
class Dispatcher:
def __init__(self, flask_server, module_dict, cache, redirect_server):
logging.debug('Initializing dispatcher')
logging.debug(f'\tFlask server: {flask_server}')
logging.debug(f'\tModule dict: {module_dict}')
logging.debug(f'\tCache: {cache}')
logging.debug(f'\tRedirect server: {redirect_server}')
self.flask_server = flask_server
self.module_dict = module_dict
self.cache = cache
self.redirect_server = redirect_server
def dispatch(self, commands):
cmd, sub, arg, *args = commands.split(' ') + [None, None, None]
if cmd == 'exit':
self.dispatch_exit()
elif cmd == 'module':
self.dispatch_module(cmd, sub, arg)
elif cmd == 'tokens':
self.dispatch_tokens(cmd, sub)
elif cmd == 'idp':
self.dispatch_idp(cmd, sub, arg)
elif cmd == 'target':
self.dispatch_target(cmd, sub, arg)
else:
raise EviloauthCommandException(
'Unknown command %s' % cmd)
def dispatch_exit(self):
print('Exiting...')
self.flask_server.shutdown()
sys.exit()
def dispatch_module(self, cmd, sub, arg):
mod = self.module_dict[f'eviloauth.{cmd}.{sub}.{arg}']
mod.__run__(self.cache.get('target'), 0)
def dispatch_tokens(self, cmd, sub):
general_tokens = self.cache.get('tokens')
if sub == 'list':
print([v for v in general_tokens.keys()])
elif sub == 'add':
logging.error('Not implemented yet')
else:
raise EviloauthCommandException(
'Unknown "%s" command %s' % (cmd, sub))
def dispatch_idp(self, cmd, sub, arg):
if sub == 'list':
print('Current IDP: %s' % self.cache.get('idp'))
elif sub == 'configure':
|
class Dispatcher:
def __init__(self, flask_server, module_dict, cache, redirect_server):
logging.debug('Initializing dispatcher')
logging.debug(f'\tFlask server: {flask_server}')
logging.debug(f'\tModule dict: {module_dict}')
logging.debug(f'\tCache: {cache}')
logging.debug(f'\tRedirect server: {redirect_server}')
self.flask_server = flask_server
self.module_dict = module_dict
self.cache = cache
self.redirect_server = redirect_server
def dispatch(self, commands):
cmd, sub, arg, *args = commands.split(' ') + [None, None, None]
if cmd == 'exit':
self.dispatch_exit()
elif cmd == 'module':
self.dispatch_module(cmd, sub, arg)
elif cmd == 'tokens':
self.dispatch_tokens(cmd, sub)
elif cmd == 'idp':
self.dispatch_idp(cmd, sub, arg)
elif cmd == 'target':
self.dispatch_target(cmd, sub, arg)
else:
raise EviloauthCommandException(
'Unknown command %s' % cmd)
def dispatch_exit(self):
print('Exiting...')
self.flask_server.shutdown()
sys.exit()
def dispatch_module(self, cmd, sub, arg):
mod = self.module_dict[f'eviloauth.{cmd}.{sub}.{arg}']
mod.__run__(self.cache.get('target'), 0)
def dispatch_tokens(self, cmd, sub):
general_tokens = self.cache.get('tokens')
if sub == 'list':
print([v for v in general_tokens.keys()])
elif sub == 'add':
logging.error('Not implemented yet')
else:
raise EviloauthCommandException(
'Unknown "%s" command %s' % (cmd, sub))
def dispatch_idp(self, cmd, sub, arg):
if sub == 'list':
print('Current IDP: %s' % self.cache.get('idp'))
elif sub == 'configure': | idp = IDP(arg, self.redirect_server) | 0 | 2023-12-09 11:21:25+00:00 | 2k |
racinette/querky | querky/backends/postgresql/asyncpg/name_type_mapper.py | [
{
"identifier": "PostgresqlNameTypeMapper",
"path": "querky/backends/postgresql/name_type_mapper.py",
"snippet": "class PostgresqlNameTypeMapper(PostgresqlTypeMapper):\n def __init__(self, typemap: dict[str, dict[str, TypeMetaData]]):\n self.type_cache = dict()\n # копируем\n sel... | from querky.backends.postgresql.name_type_mapper import PostgresqlNameTypeMapper
from querky.base_types import TypeMetaData
from querky.common_imports import DATETIME_MODULE
from querky.common_imports import DECIMAL as DECIMAL_IMPORT
from querky.common_imports import UUID as UUID_IMPORT
from querky.common_imports import UNION as UNION_IMPORT | 1,108 |
ASYNCPG_RANGE_IMPORT = "from asyncpg import Range as _Range"
ASYNCPG_RECORD_IMPORT = "from asyncpg import Record as _Record"
ASYNCPG_BITSTRING_IMPORT = "from asyncpg import BitString as _BitString"
ASYNCPG_BOX_IMPORT = "from asyncpg import Box as _Box"
ASYNCPG_CIRCLE_IMPORT = "from asyncpg import Circle as _Circle"
ASYNCPG_LINE_IMPORT = "from asyncpg import Line as _Line"
ASYNCPG_LINE_SEGMENT_IMPORT = "from asyncpg import LineSegment as _LineSegment"
ASYNCPG_PATH_IMPORT = "from asyncpg import Path as _Path"
ASYNCPG_POINT_IMPORT = "from asyncpg import Point as _Point"
ASYNCPG_POLYGON_IMPORT = "from asyncpg import Polygon as _Polygon"
|
ASYNCPG_RANGE_IMPORT = "from asyncpg import Range as _Range"
ASYNCPG_RECORD_IMPORT = "from asyncpg import Record as _Record"
ASYNCPG_BITSTRING_IMPORT = "from asyncpg import BitString as _BitString"
ASYNCPG_BOX_IMPORT = "from asyncpg import Box as _Box"
ASYNCPG_CIRCLE_IMPORT = "from asyncpg import Circle as _Circle"
ASYNCPG_LINE_IMPORT = "from asyncpg import Line as _Line"
ASYNCPG_LINE_SEGMENT_IMPORT = "from asyncpg import LineSegment as _LineSegment"
ASYNCPG_PATH_IMPORT = "from asyncpg import Path as _Path"
ASYNCPG_POINT_IMPORT = "from asyncpg import Point as _Point"
ASYNCPG_POLYGON_IMPORT = "from asyncpg import Polygon as _Polygon"
| INT = TypeMetaData("int") | 1 | 2023-12-13 15:16:34+00:00 | 2k |
Shahzadnit/EZ-CLIP | utils/solver.py | [
{
"identifier": "WarmupMultiStepLR",
"path": "utils/lr_scheduler.py",
"snippet": "class WarmupMultiStepLR(WarmupLR):\r\n\r\n def __init__(self,\r\n optimizer,\r\n milestones,\r\n gamma=0.1,\r\n warmup_epochs=0,\r\n warmup... | import torch.optim as optim
from utils.lr_scheduler import WarmupMultiStepLR, WarmupCosineAnnealingLR
| 1,071 |
def _optimizer(config, model):
if config.solver.optim == 'adam':
optimizer = optim.Adam([{'params': model.parameters()}],
lr=config.solver.lr, betas=(0.9, 0.98), eps=1e-8,
weight_decay=0.2) # Params used from paper, the lr is smaller, more safe for fine tuning to new dataset
print('Adam')
elif config.solver.optim == 'sgd':
optimizer = optim.SGD([{'params': model.parameters()}],
config.solver.lr,
momentum=config.solver.momentum,
weight_decay=config.solver.weight_decay)
print('SGD')
elif config.solver.optim == 'adamw':
vision_params = list(map(id, model.visual.parameters()))
text_params = filter(lambda p: id(p) not in vision_params,
model.parameters())
optimizer = optim.AdamW([{'params': text_params},
{'params': model.visual.parameters(), 'lr': config.solver.lr * config.solver.ratio},],
betas=(0.9, 0.98), lr=config.solver.lr, eps=1e-8,
weight_decay=config.solver.weight_decay) # Params used from paper, the lr is smaller, more safe for fine tuning to new dataset
for param_group in optimizer.param_groups:
print(param_group['lr'])
print('AdamW')
else:
raise ValueError('Unknown optimizer: {}'.format(config.solver.optim))
return optimizer
def _lr_scheduler(config,optimizer):
if config.solver.type == 'cosine':
lr_scheduler = WarmupCosineAnnealingLR(
optimizer,
config.solver.epochs,
warmup_epochs=config.solver.lr_warmup_step
)
elif config.solver.type == 'multistep':
if isinstance(config.solver.lr_decay_step, list):
milestones = config.solver.lr_decay_step
elif isinstance(config.solver.lr_decay_step, int):
milestones = [
config.solver.lr_decay_step * (i + 1)
for i in range(config.solver.epochs //
config.solver.lr_decay_step)]
else:
raise ValueError("error learning rate decay step: {}".format(type(config.solver.lr_decay_step)))
|
def _optimizer(config, model):
if config.solver.optim == 'adam':
optimizer = optim.Adam([{'params': model.parameters()}],
lr=config.solver.lr, betas=(0.9, 0.98), eps=1e-8,
weight_decay=0.2) # Params used from paper, the lr is smaller, more safe for fine tuning to new dataset
print('Adam')
elif config.solver.optim == 'sgd':
optimizer = optim.SGD([{'params': model.parameters()}],
config.solver.lr,
momentum=config.solver.momentum,
weight_decay=config.solver.weight_decay)
print('SGD')
elif config.solver.optim == 'adamw':
vision_params = list(map(id, model.visual.parameters()))
text_params = filter(lambda p: id(p) not in vision_params,
model.parameters())
optimizer = optim.AdamW([{'params': text_params},
{'params': model.visual.parameters(), 'lr': config.solver.lr * config.solver.ratio},],
betas=(0.9, 0.98), lr=config.solver.lr, eps=1e-8,
weight_decay=config.solver.weight_decay) # Params used from paper, the lr is smaller, more safe for fine tuning to new dataset
for param_group in optimizer.param_groups:
print(param_group['lr'])
print('AdamW')
else:
raise ValueError('Unknown optimizer: {}'.format(config.solver.optim))
return optimizer
def _lr_scheduler(config,optimizer):
if config.solver.type == 'cosine':
lr_scheduler = WarmupCosineAnnealingLR(
optimizer,
config.solver.epochs,
warmup_epochs=config.solver.lr_warmup_step
)
elif config.solver.type == 'multistep':
if isinstance(config.solver.lr_decay_step, list):
milestones = config.solver.lr_decay_step
elif isinstance(config.solver.lr_decay_step, int):
milestones = [
config.solver.lr_decay_step * (i + 1)
for i in range(config.solver.epochs //
config.solver.lr_decay_step)]
else:
raise ValueError("error learning rate decay step: {}".format(type(config.solver.lr_decay_step)))
| lr_scheduler = WarmupMultiStepLR(
| 0 | 2023-12-12 13:11:20+00:00 | 2k |
Gwolfgit/Authoritah | models.py | [
{
"identifier": "get_tailscale_ip4",
"path": "functions.py",
"snippet": "def get_tailscale_ip4() -> str:\n try:\n output = subprocess.check_output(\n [\"tailscale\", \"ip\", \"-4\"],\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n )\n ip ... | import orjson
from typing import Any, Dict, Tuple
from functions import get_tailscale_ip4, get_tailscale_ip6
from pathlib import Path | 738 |
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def load_config():
with open(Path(Path(__file__).parent.resolve(), "config.json"), "r") as fd:
return dotdict(orjson.loads(fd.read()))
class DefaultDict(dict):
"""
A dictionary subclass that maintains default keys and values.
"""
def __init__(self, default_values: Dict[Any, Any], *args, **kwargs):
"""
Initialize the dictionary with default values and any additional provided values.
:param default_values: A dictionary of default key-value pairs.
"""
super().__init__()
self.default_values = default_values
self.update(self.default_values)
def __setitem__(self, key, value):
"""
Set a dictionary item. If the key is a default key, reset to default value.
"""
if key in self.default_values:
super().__setitem__(key, self.default_values[key])
else:
super().__setitem__(key, value)
def __delitem__(self, key):
"""
Delete a dictionary item. If the key is a default key, reset to default value.
"""
if key in self.default_values:
super().__setitem__(key, self.default_values[key])
else:
super().__delitem__(key)
def pop(self, key, *args, **kwargs):
"""
Pop a dictionary item. If the key is a default key, reset to default value.
"""
if key in self.default_values:
return self.default_values[key]
return super().pop(key, *args, **kwargs)
def update(self, *args, **kwargs):
"""
Update the dictionary. Default keys are reset to default values.
"""
updates = dict(*args, **kwargs)
super().update(
{
k: self.default_values[k] if k in self.default_values else updates[k]
for k in updates
}
)
class MyAuthoritah:
def __init__(self, cfg: dotdict):
self.cfg = cfg
self.data = {}
self._relay = self.cfg.default_relay
self._ip6 = get_tailscale_ip6()
|
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def load_config():
with open(Path(Path(__file__).parent.resolve(), "config.json"), "r") as fd:
return dotdict(orjson.loads(fd.read()))
class DefaultDict(dict):
"""
A dictionary subclass that maintains default keys and values.
"""
def __init__(self, default_values: Dict[Any, Any], *args, **kwargs):
"""
Initialize the dictionary with default values and any additional provided values.
:param default_values: A dictionary of default key-value pairs.
"""
super().__init__()
self.default_values = default_values
self.update(self.default_values)
def __setitem__(self, key, value):
"""
Set a dictionary item. If the key is a default key, reset to default value.
"""
if key in self.default_values:
super().__setitem__(key, self.default_values[key])
else:
super().__setitem__(key, value)
def __delitem__(self, key):
"""
Delete a dictionary item. If the key is a default key, reset to default value.
"""
if key in self.default_values:
super().__setitem__(key, self.default_values[key])
else:
super().__delitem__(key)
def pop(self, key, *args, **kwargs):
"""
Pop a dictionary item. If the key is a default key, reset to default value.
"""
if key in self.default_values:
return self.default_values[key]
return super().pop(key, *args, **kwargs)
def update(self, *args, **kwargs):
"""
Update the dictionary. Default keys are reset to default values.
"""
updates = dict(*args, **kwargs)
super().update(
{
k: self.default_values[k] if k in self.default_values else updates[k]
for k in updates
}
)
class MyAuthoritah:
def __init__(self, cfg: dotdict):
self.cfg = cfg
self.data = {}
self._relay = self.cfg.default_relay
self._ip6 = get_tailscale_ip6() | self._ip = get_tailscale_ip4() | 0 | 2023-12-13 01:17:53+00:00 | 2k |
bluuewhale/nexon-openapi-python | src/nexon_openapi/utils/_transform.py | [
{
"identifier": "is_list",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def is_list(obj: object) -> TypeGuard[list[object]]:\n return isinstance(obj, list)"
},
{
"identifier": "is_mapping",
"path": "src/nexon_openapi/utils/_utils.py",
"snippet": "def is_mapping(obj: object... | from typing import Any, Mapping, Optional, TypeVar, Union, cast
from datetime import date, datetime
from typing_extensions import Literal, get_args, override, get_type_hints
from ._utils import (
is_list,
is_mapping,
is_list_type,
is_union_type,
extract_type_arg,
is_required_type,
is_annotated_type,
strip_annotated_type,
)
from .._compat import model_dump, is_typeddict
import pydantic | 1,275 | from __future__ import annotations
_T = TypeVar("_T")
PropertyFormat = Literal["iso8601", "custom"]
class PropertyInfo:
"""Metadata class to be used in Annotated types to provide information about a given type.
For example:
class MyParams(TypedDict):
account_holder_name: Annotated[str, PropertyInfo(alias='accountHolderName')]
This means that {'account_holder_name': 'Robert'} will be transformed to {'accountHolderName': 'Robert'} before being sent to the API.
"""
alias: Optional[str]
format: Optional[PropertyFormat]
format_template: Optional[str]
def __init__(
self,
*,
alias: Optional[str] = None,
format: Optional[PropertyFormat] = None,
format_template: Optional[str] = None,
) -> None:
self.alias = alias
self.format = format
self.format_template = format_template
@override
def __repr__(self) -> str:
return f"{self.__class__.__name__}(alias='{self.alias}', format={self.format}, format_template='{self.format_template}')"
def maybe_transform(
data: object,
expected_type: object,
) -> Optional[Any]:
"""Wrapper over `transform()` that allows `None` to be passed.
See `transform()` for more details.
"""
if data is None:
return None
return transform(data, expected_type)
# Wrapper over _transform_recursive providing fake types
def transform(
data: _T,
expected_type: object,
) -> _T:
"""Transform dictionaries based off of type information from the given type, for example:
```py
class Params(TypedDict, total=False):
card_id: Required[Annotated[str, PropertyInfo(alias='cardID')]]
transformed = transform({'card_id': '<my card ID>'}, Params)
# {'cardID': '<my card ID>'}
```
Any keys / data that does not have type information given will be included as is.
It should be noted that the transformations that this function does are not represented in the type system.
"""
transformed = _transform_recursive(data, annotation=cast(type, expected_type))
return cast(_T, transformed)
def _get_annotated_type(type_: type) -> Union[type, None]:
"""If the given type is an `Annotated` type then it is returned, if not `None` is returned.
This also unwraps the type when applicable, e.g. `Required[Annotated[T, ...]]`
"""
if is_required_type(type_):
# Unwrap `Required[Annotated[T, ...]]` to `Annotated[T, ...]`
type_ = get_args(type_)[0]
| from __future__ import annotations
_T = TypeVar("_T")
PropertyFormat = Literal["iso8601", "custom"]
class PropertyInfo:
"""Metadata class to be used in Annotated types to provide information about a given type.
For example:
class MyParams(TypedDict):
account_holder_name: Annotated[str, PropertyInfo(alias='accountHolderName')]
This means that {'account_holder_name': 'Robert'} will be transformed to {'accountHolderName': 'Robert'} before being sent to the API.
"""
alias: Optional[str]
format: Optional[PropertyFormat]
format_template: Optional[str]
def __init__(
self,
*,
alias: Optional[str] = None,
format: Optional[PropertyFormat] = None,
format_template: Optional[str] = None,
) -> None:
self.alias = alias
self.format = format
self.format_template = format_template
@override
def __repr__(self) -> str:
return f"{self.__class__.__name__}(alias='{self.alias}', format={self.format}, format_template='{self.format_template}')"
def maybe_transform(
data: object,
expected_type: object,
) -> Optional[Any]:
"""Wrapper over `transform()` that allows `None` to be passed.
See `transform()` for more details.
"""
if data is None:
return None
return transform(data, expected_type)
# Wrapper over _transform_recursive providing fake types
def transform(
data: _T,
expected_type: object,
) -> _T:
"""Transform dictionaries based off of type information from the given type, for example:
```py
class Params(TypedDict, total=False):
card_id: Required[Annotated[str, PropertyInfo(alias='cardID')]]
transformed = transform({'card_id': '<my card ID>'}, Params)
# {'cardID': '<my card ID>'}
```
Any keys / data that does not have type information given will be included as is.
It should be noted that the transformations that this function does are not represented in the type system.
"""
transformed = _transform_recursive(data, annotation=cast(type, expected_type))
return cast(_T, transformed)
def _get_annotated_type(type_: type) -> Union[type, None]:
"""If the given type is an `Annotated` type then it is returned, if not `None` is returned.
This also unwraps the type when applicable, e.g. `Required[Annotated[T, ...]]`
"""
if is_required_type(type_):
# Unwrap `Required[Annotated[T, ...]]` to `Annotated[T, ...]`
type_ = get_args(type_)[0]
| if is_annotated_type(type_): | 6 | 2023-12-14 18:12:17+00:00 | 2k |
Jack24658735/FedLGT | dataloaders/flair_dataset_fed.py | [
{
"identifier": "get_unk_mask_indices",
"path": "dataloaders/data_utils.py",
"snippet": "def get_unk_mask_indices(image,testing,num_labels,known_labels,epoch=1):\n if testing:\n # for consistency across epochs and experiments, seed using hashed image array \n random.seed(hashlib.sha1(np... | import os
import torch
import numpy as np
import pickle
import h5py
from torch.utils.data import Dataset, DataLoader
from pdb import set_trace as stop
from dataloaders.data_utils import get_unk_mask_indices,image_loader | 1,023 |
class FlairFedDataset(Dataset):
def __init__(self, inp_data, split, num_labels, data_file, img_root, curr_user=None, max_samples=-1,transform=None,known_labels=0,testing=False, label_mapping=None, fine_grained_label_mapping=None):
super(FlairFedDataset, self).__init__()
# print(data_file)
#self.split_data = h5py.File('/home/liujack/multi_label/C-Tran/data/flair/cent_data.hdf5', 'r')
self.split_data = inp_data
self.split = split
self.fine_grained_label_mapping = fine_grained_label_mapping
self.label_mapping = label_mapping
if max_samples != -1:
self.split_data = self.split_data[0:max_samples]
self.img_root = img_root
self.transform = transform
self.num_labels = num_labels
self.known_labels = known_labels
self.testing = testing
self.curr_user = curr_user
self.image_id_list = list(self.split_data[self.split][self.curr_user]['image_ids'])
self.image_list = list(self.split_data[self.split][self.curr_user]['images'])
self.label_list = list(self.split_data[self.split][self.curr_user]['labels'])
self.fg_label_list = list(self.split_data[self.split][self.curr_user]['fine_grained_labels'])
def __len__(self):
return len(self.image_id_list)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# take a sample
image_ID = self.image_id_list[idx]
# img = np.array(self.split_data[self.split][self.curr_user][image_ID]['image'])
img = self.image_list[idx]
image = self.transform(img)
if self.fine_grained_label_mapping != None:
# fine grained labels are used
# labels_str = np.array(self.split_data[self.split][image_ID]['fine_grained_labels'])
labels_str = self.fg_label_list[idx]
else:
# coarse grained labels are used
# labels_str = np.array(self.split_data[self.split][image_ID]['labels'])
labels_str = self.label_list[idx]
assert self.label_mapping != None
# fg_labels = np.array(self.split_data[self.split][image_ID]['fine_grained_labels'])
# image_ID = self.split_data[idx]['file_name']
# img_name = os.path.join(self.img_root,image_ID + '.jpg')
# image = image_loader(img_name,self.transform)
labels_str = labels_str.tolist()
labels_str = str(labels_str)[2:-1].split('|')
tran_labels = [0] * self.num_labels
if self.fine_grained_label_mapping != None:
for label in labels_str:
tran_labels = list(map(lambda x, y: x | y, tran_labels, self.fine_grained_label_mapping[label]))
else:
for label in labels_str:
tran_labels = list(map(lambda x, y: x | y, tran_labels, self.label_mapping[label]))
assert tran_labels.count(1) == len(labels_str)
labels = torch.Tensor(tran_labels)
|
class FlairFedDataset(Dataset):
def __init__(self, inp_data, split, num_labels, data_file, img_root, curr_user=None, max_samples=-1,transform=None,known_labels=0,testing=False, label_mapping=None, fine_grained_label_mapping=None):
super(FlairFedDataset, self).__init__()
# print(data_file)
#self.split_data = h5py.File('/home/liujack/multi_label/C-Tran/data/flair/cent_data.hdf5', 'r')
self.split_data = inp_data
self.split = split
self.fine_grained_label_mapping = fine_grained_label_mapping
self.label_mapping = label_mapping
if max_samples != -1:
self.split_data = self.split_data[0:max_samples]
self.img_root = img_root
self.transform = transform
self.num_labels = num_labels
self.known_labels = known_labels
self.testing = testing
self.curr_user = curr_user
self.image_id_list = list(self.split_data[self.split][self.curr_user]['image_ids'])
self.image_list = list(self.split_data[self.split][self.curr_user]['images'])
self.label_list = list(self.split_data[self.split][self.curr_user]['labels'])
self.fg_label_list = list(self.split_data[self.split][self.curr_user]['fine_grained_labels'])
def __len__(self):
return len(self.image_id_list)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# take a sample
image_ID = self.image_id_list[idx]
# img = np.array(self.split_data[self.split][self.curr_user][image_ID]['image'])
img = self.image_list[idx]
image = self.transform(img)
if self.fine_grained_label_mapping != None:
# fine grained labels are used
# labels_str = np.array(self.split_data[self.split][image_ID]['fine_grained_labels'])
labels_str = self.fg_label_list[idx]
else:
# coarse grained labels are used
# labels_str = np.array(self.split_data[self.split][image_ID]['labels'])
labels_str = self.label_list[idx]
assert self.label_mapping != None
# fg_labels = np.array(self.split_data[self.split][image_ID]['fine_grained_labels'])
# image_ID = self.split_data[idx]['file_name']
# img_name = os.path.join(self.img_root,image_ID + '.jpg')
# image = image_loader(img_name,self.transform)
labels_str = labels_str.tolist()
labels_str = str(labels_str)[2:-1].split('|')
tran_labels = [0] * self.num_labels
if self.fine_grained_label_mapping != None:
for label in labels_str:
tran_labels = list(map(lambda x, y: x | y, tran_labels, self.fine_grained_label_mapping[label]))
else:
for label in labels_str:
tran_labels = list(map(lambda x, y: x | y, tran_labels, self.label_mapping[label]))
assert tran_labels.count(1) == len(labels_str)
labels = torch.Tensor(tran_labels)
| unk_mask_indices = get_unk_mask_indices(image,self.testing,self.num_labels,self.known_labels) | 0 | 2023-12-09 09:16:59+00:00 | 2k |
AgriCodeHub/dairy-django-backend | production/validators.py | [
{
"identifier": "CowCategoryChoices",
"path": "core/choices.py",
"snippet": "class CowCategoryChoices(models.TextChoices):\n \"\"\"\n Choices for the category of a cow.\n\n Choices:\n - `CALF`: Represents a calf.\n - `WEANER`: Represents a weaner.\n - `HEIFER`: Represents a heifer.\n ... | from datetime import timedelta
from django.core.exceptions import ValidationError
from core.choices import CowCategoryChoices, CowAvailabilityChoices
from core.utils import todays_date
from production.choices import LactationStageChoices
from users.choices import SexChoices
from production.models import Lactation | 1,470 |
class LactationValidator:
"""
Provides validation methods for lactation records associated with cows.
Methods:
- `validate_age(start_date, cow)`: Validates the start date of lactation based on the cow's age.
- `validate_cow_origin(cow)`: Validates that manual entry is allowed only for bought cows.
- `validate_cow_category(category)`: Validates the cow category for lactation records, allowing only bought cows with calves.
- `validate_fields(start_date, pregnancy, lactation_number, cow, lactation)`: Validates various fields of the lactation record, including start date, end date, pregnancy status, lactation number, and cow's age.
"""
@staticmethod
def validate_age(start_date, cow):
"""
Validates the start date of lactation based on the cow's age.
Args:
- `start_date` (date): The start date of the lactation.
- `cow` (Cow): The cow associated with the lactation record.
Raises:
- `ValidationError`: If the start date is before the cow reaches 635 days of age.
"""
if start_date < cow.date_of_birth + timedelta(days=635):
raise ValidationError(
code="invalid_start_date",
message=f"Invalid start date. Lactation must have started or be around {cow.date_of_birth + timedelta(days=635)}, not {start_date}.",
)
@staticmethod
def validate_cow_origin(cow):
"""
Validates that manual entry is allowed only for bought cows.
Args:
- `cow` (Cow): The cow associated with the lactation record.
Raises:
- `ValidationError`: If manual entry is attempted on a cow that is not bought.
"""
if not cow.is_bought:
raise ValidationError(
code="manual_entry_only_on_bought_cows",
message="Manual entry is allowed only for bought cows.",
)
@staticmethod
def validate_cow_category(category):
"""
Validates the cow category for lactation records, allowing only bought cows with calves.
Args:
- `category` (str): The cow category associated with the lactation record.
Raises:
- `ValidationError`: If the cow category is invalid or not a milking cow with calves.
"""
|
class LactationValidator:
"""
Provides validation methods for lactation records associated with cows.
Methods:
- `validate_age(start_date, cow)`: Validates the start date of lactation based on the cow's age.
- `validate_cow_origin(cow)`: Validates that manual entry is allowed only for bought cows.
- `validate_cow_category(category)`: Validates the cow category for lactation records, allowing only bought cows with calves.
- `validate_fields(start_date, pregnancy, lactation_number, cow, lactation)`: Validates various fields of the lactation record, including start date, end date, pregnancy status, lactation number, and cow's age.
"""
@staticmethod
def validate_age(start_date, cow):
"""
Validates the start date of lactation based on the cow's age.
Args:
- `start_date` (date): The start date of the lactation.
- `cow` (Cow): The cow associated with the lactation record.
Raises:
- `ValidationError`: If the start date is before the cow reaches 635 days of age.
"""
if start_date < cow.date_of_birth + timedelta(days=635):
raise ValidationError(
code="invalid_start_date",
message=f"Invalid start date. Lactation must have started or be around {cow.date_of_birth + timedelta(days=635)}, not {start_date}.",
)
@staticmethod
def validate_cow_origin(cow):
"""
Validates that manual entry is allowed only for bought cows.
Args:
- `cow` (Cow): The cow associated with the lactation record.
Raises:
- `ValidationError`: If manual entry is attempted on a cow that is not bought.
"""
if not cow.is_bought:
raise ValidationError(
code="manual_entry_only_on_bought_cows",
message="Manual entry is allowed only for bought cows.",
)
@staticmethod
def validate_cow_category(category):
"""
Validates the cow category for lactation records, allowing only bought cows with calves.
Args:
- `category` (str): The cow category associated with the lactation record.
Raises:
- `ValidationError`: If the cow category is invalid or not a milking cow with calves.
""" | if category not in CowCategoryChoices.values: | 0 | 2023-12-09 06:56:42+00:00 | 2k |
PeriniM/Rotary-Pendulum-RL | control/reinforcement_learning/DQN/Agent.py | [
{
"identifier": "DeepQNetwork",
"path": "control/reinforcement_learning/DQN/DeepQNetwork.py",
"snippet": "class DeepQNetwork:\n \"\"\"\n Deep Q Network to approximate the Q function\n \"\"\"\n def __init__(self, lr, num_actions, input_dims, fc_dims = [32, 32], opt='adam', loss='mse'):\n\n ... | import os
import configparser
import ast
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import copy
import time
import tensorflow as tf
from matplotlib import cm
from datetime import datetime
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import TensorBoard
from control.reinforcement_learning.DQN.DeepQNetwork import DeepQNetwork
from control.reinforcement_learning.DQN.ReplayBuffer import ReplayBuffer | 1,295 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class Agent:
"""
DQN Agent
- Take an environment
- Set up the deep neural network
- Store the experience
- Choose action
- Train the network
- Evaluate the network
"""
def __init__(self, env):
# check if gpu is available
if tf.config.list_physical_devices('GPU'):
# print the device name
print("GPU is available")
print("Device name: {}".format(tf.test.gpu_device_name()))
else:
print("GPU is not available")
self.env = env
self.nJoint = self.env.nbJoint
# read INI file
# get the path of the root directory
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ini_file_path = os.path.join(root_dir, 'config.ini')
self.params = self.parse_ini(ini_file_path)
# set up the parameters from the INI file
self.action_steps = int(self.params['action_steps'])
self.torque_range = ast.literal_eval(self.params['control_range'])
self.max_episode_steps = int(self.params['max_episode_steps'])
self.train_episodes = int(self.params['train_episodes'])
self.lr = float(self.params['lr'])
self.discount_factor = float(self.params['discount_factor'])
self.epsilon = float(self.params['epsilon'])
self.epsilon_decay_episodes = int(self.params['epsilon_decay_episodes'])
self.epsilon_final = float(self.params['epsilon_final'])
self.buffer_size = int(self.params['buffer_size'])
self.batch_size = int(self.params['batch_size'])
self.hidden_dims = ast.literal_eval(self.params['hidden_dims'])
self.update_rate_episodes = int(self.params['target_update_episodes'])
self.train_rate_steps = int(self.params['train_rate_steps'])
self.discounted_reward = 0.0
self.epsilon_decay = (self.epsilon - self.epsilon_final) / self.epsilon_decay_episodes
# set up the environment parameters
self.env.num_actions = self.action_steps
self.env.range_actions = self.torque_range
self.env.maxIter = self.max_episode_steps
self.env.umax = self.torque_range[1]
self.env.actions = np.linspace(self.env.range_actions[0], self.env.range_actions[1], self.action_steps)
self.env.action_space = [i for i in range(self.action_steps)]
self.action_space = self.env.action_space
self.total_step_counter = 0
| os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class Agent:
"""
DQN Agent
- Take an environment
- Set up the deep neural network
- Store the experience
- Choose action
- Train the network
- Evaluate the network
"""
def __init__(self, env):
# check if gpu is available
if tf.config.list_physical_devices('GPU'):
# print the device name
print("GPU is available")
print("Device name: {}".format(tf.test.gpu_device_name()))
else:
print("GPU is not available")
self.env = env
self.nJoint = self.env.nbJoint
# read INI file
# get the path of the root directory
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ini_file_path = os.path.join(root_dir, 'config.ini')
self.params = self.parse_ini(ini_file_path)
# set up the parameters from the INI file
self.action_steps = int(self.params['action_steps'])
self.torque_range = ast.literal_eval(self.params['control_range'])
self.max_episode_steps = int(self.params['max_episode_steps'])
self.train_episodes = int(self.params['train_episodes'])
self.lr = float(self.params['lr'])
self.discount_factor = float(self.params['discount_factor'])
self.epsilon = float(self.params['epsilon'])
self.epsilon_decay_episodes = int(self.params['epsilon_decay_episodes'])
self.epsilon_final = float(self.params['epsilon_final'])
self.buffer_size = int(self.params['buffer_size'])
self.batch_size = int(self.params['batch_size'])
self.hidden_dims = ast.literal_eval(self.params['hidden_dims'])
self.update_rate_episodes = int(self.params['target_update_episodes'])
self.train_rate_steps = int(self.params['train_rate_steps'])
self.discounted_reward = 0.0
self.epsilon_decay = (self.epsilon - self.epsilon_final) / self.epsilon_decay_episodes
# set up the environment parameters
self.env.num_actions = self.action_steps
self.env.range_actions = self.torque_range
self.env.maxIter = self.max_episode_steps
self.env.umax = self.torque_range[1]
self.env.actions = np.linspace(self.env.range_actions[0], self.env.range_actions[1], self.action_steps)
self.env.action_space = [i for i in range(self.action_steps)]
self.action_space = self.env.action_space
self.total_step_counter = 0 | self.replay_buffer = ReplayBuffer(self.buffer_size) | 1 | 2023-12-09 11:22:54+00:00 | 2k |
Kokonico/ObjLog | objlog/Base/LogNode.py | [
{
"identifier": "Debug",
"path": "objlog/LogMessages.py",
"snippet": "class Debug(LogMessage):\n \"\"\"the default debug message, with blue color\"\"\"\n level = \"DEBUG\"\n color = \"\\033[94m\""
},
{
"identifier": "LogMessage",
"path": "objlog/Base/LogMessage.py",
"snippet": "... | from objlog.LogMessages import Debug
from objlog.Base.LogMessage import LogMessage # "no parent package" error happens when I don't specify the package,
from collections import deque | 875 | """The LogNode class, the main class of the ObjLogger"""
# IDK why
class LogNode:
"""A LogNode, the main class of the ObjLogger. It can log messages to a file, to the console, or both."""
open = open # this code is probably the reason why my dad left me
# this is clearly not a good way to do this, but I don't know how to do it better
# if anyone can prevent doing this, and fix the exception caused when deleting a LogNode, please do it
# else please increment this number by 1
# thank you
# total_failed_attempts_to_fix_this = 1
def __init__(self, name: str, log_file: str | None = None, print_to_console: bool = False,
print_filter: list | None = None, max_messages_in_memory: int = 500, max_log_messages: int = 1000,
log_when_closed: bool = True, wipe_log_file_on_init: bool = False):
self.log_file = log_file
self.name = name
self.print = print_to_console
self.messages = deque(maxlen=max_messages_in_memory)
self.max = max_messages_in_memory
self.maxinf = max_log_messages
self.print_filter = print_filter
self.log_closure_message = log_when_closed
self.log_len = 0
# check if log exists (in file system), and if so, clear it
if isinstance(log_file, str) and wipe_log_file_on_init:
with open(log_file, "w+") as f:
f.write("")
def log(self, message, override_log_file: str | None = None, force_print: tuple[bool, bool] = (False, False),
preserve_message_in_memory: bool = True) -> None:
"""log a message"""
# make sure it's a LogMessage or its subclass
| """The LogNode class, the main class of the ObjLogger"""
# IDK why
class LogNode:
"""A LogNode, the main class of the ObjLogger. It can log messages to a file, to the console, or both."""
open = open # this code is probably the reason why my dad left me
# this is clearly not a good way to do this, but I don't know how to do it better
# if anyone can prevent doing this, and fix the exception caused when deleting a LogNode, please do it
# else please increment this number by 1
# thank you
# total_failed_attempts_to_fix_this = 1
def __init__(self, name: str, log_file: str | None = None, print_to_console: bool = False,
print_filter: list | None = None, max_messages_in_memory: int = 500, max_log_messages: int = 1000,
log_when_closed: bool = True, wipe_log_file_on_init: bool = False):
self.log_file = log_file
self.name = name
self.print = print_to_console
self.messages = deque(maxlen=max_messages_in_memory)
self.max = max_messages_in_memory
self.maxinf = max_log_messages
self.print_filter = print_filter
self.log_closure_message = log_when_closed
self.log_len = 0
# check if log exists (in file system), and if so, clear it
if isinstance(log_file, str) and wipe_log_file_on_init:
with open(log_file, "w+") as f:
f.write("")
def log(self, message, override_log_file: str | None = None, force_print: tuple[bool, bool] = (False, False),
preserve_message_in_memory: bool = True) -> None:
"""log a message"""
# make sure it's a LogMessage or its subclass | if not isinstance(message, LogMessage): | 1 | 2023-12-08 20:41:18+00:00 | 2k |
anyquest/pyaq | aq/providers/gemini/provider.py | [
{
"identifier": "BaseProvider",
"path": "aq/providers/provider.py",
"snippet": "class BaseProvider:\n async def create_completion(self, request: ChatCompletionRequest) -> ChatCompletionResponse:\n pass"
},
{
"identifier": "ProviderError",
"path": "aq/providers/provider.py",
"sn... | import logging
import re
from typing import Dict, Any, Optional, List, Literal
from pydantic import BaseModel
from ..provider import BaseProvider, ProviderError
from ..types import ChatCompletionRequest, ChatCompletionResponse, ChatCompletionMessage, Choice, Error
from ...http_client import AsyncHttpClient | 1,233 |
class InlineData(BaseModel):
mimeType: str
data: str
class Part(BaseModel):
text: Optional[str] = None
inlineData: Optional[InlineData] = None
class Content(BaseModel):
role: Literal["user", "model"]
parts: List[Part]
class GenerationConfig(BaseModel):
temperature: float = 0.5
maxOutputTokens: int = 1000
class GeminiCompletionRequest(BaseModel):
contents: List[Content]
generationConfig: GenerationConfig
class ResponseCandidate(BaseModel):
content: Content
finishReason: Literal["STOP"]
class GeminiCompletionResponse(BaseModel):
candidates: List[ResponseCandidate]
class GeminiProvider(BaseProvider):
def __init__(self, config: Dict[str, Any], http_client: AsyncHttpClient):
self._config = config
self._http_client = http_client
self._logger = logging.getLogger(self.__class__.__name__)
@staticmethod
def _check_config(config: Dict[str, Any]) -> None:
required_keys = ['endpoint', 'key']
if not all(key in config for key in required_keys):
raise ProviderError(400, "The Gemini provider is not configured. Add settings to config.yml.")
|
class InlineData(BaseModel):
mimeType: str
data: str
class Part(BaseModel):
text: Optional[str] = None
inlineData: Optional[InlineData] = None
class Content(BaseModel):
role: Literal["user", "model"]
parts: List[Part]
class GenerationConfig(BaseModel):
temperature: float = 0.5
maxOutputTokens: int = 1000
class GeminiCompletionRequest(BaseModel):
contents: List[Content]
generationConfig: GenerationConfig
class ResponseCandidate(BaseModel):
content: Content
finishReason: Literal["STOP"]
class GeminiCompletionResponse(BaseModel):
candidates: List[ResponseCandidate]
class GeminiProvider(BaseProvider):
def __init__(self, config: Dict[str, Any], http_client: AsyncHttpClient):
self._config = config
self._http_client = http_client
self._logger = logging.getLogger(self.__class__.__name__)
@staticmethod
def _check_config(config: Dict[str, Any]) -> None:
required_keys = ['endpoint', 'key']
if not all(key in config for key in required_keys):
raise ProviderError(400, "The Gemini provider is not configured. Add settings to config.yml.")
| async def create_completion(self, request: ChatCompletionRequest) -> ChatCompletionResponse: | 3 | 2023-12-14 13:25:52+00:00 | 2k |
multimodallearning/DG-TTA | dg_tta/tta/ipynb_utils.py | [
{
"identifier": "get_data_filepaths",
"path": "dg_tta/tta/config_log_utils.py",
"snippet": "def get_data_filepaths(tta_dataset_name, tta_dataset_bucket):\n raw_tta_dataset_dir = Path(nnUNet_raw, tta_dataset_name)\n if tta_dataset_bucket == \"imagesTr\":\n source_folders = [raw_tta_dataset_d... | import json
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import torch
from mpl_toolkits.axes_grid1.axes_grid import ImageGrid
from nnunetv2.imageio.simpleitk_reader_writer import SimpleITKIO
from dg_tta.tta.config_log_utils import (
get_data_filepaths,
get_dgtta_colormap,
get_resources_dir,
)
from dg_tta.utils import check_dga_root_is_set | 1,136 |
def read_image(source_data_paths, path_idx):
if source_data_paths is None:
return None, None
source_img, source_sitk_stuff = SimpleITKIO().read_images(
source_data_paths[path_idx : path_idx + 1]
)
source_img = source_img[0]
return torch.tensor(source_img)[None, None, :], source_sitk_stuff
def get_target_imgs_datapaths():
check_dga_root_is_set()
with open("tta_plan.json", "r") as f:
tta_plan = json.load(f)
return tta_plan["tta_data_filepaths"]
def get_source_imgs_datapaths():
check_dga_root_is_set()
buckets = ["imagesTr", "imagesTs"]
with open("tta_plan.json", "r") as f:
tta_plan = json.load(f)
source_dataset_name = tta_plan["__pretrained_dataset_name__"]
if source_dataset_name.startswith("TS104"):
return "TS104"
source_data_paths = []
for buc in buckets:
source_data_paths.extend(get_data_filepaths(source_dataset_name, buc))
return source_data_paths
def get_orient_imgs(img):
def get_axes_idxs(axis_size):
NUM_IDXS = 16
return np.linspace(0, axis_size - 1, NUM_IDXS).round().astype(int)
img = img.squeeze(0, 1)
D, H, W = img.shape
slices = dict(HW=[], DW=[], DH=[])
for d in get_axes_idxs(D):
slices["HW"].append(img[d, :, :])
for h in get_axes_idxs(H):
slices["DW"].append(img[:, h, :])
for w in get_axes_idxs(W):
slices["DH"].append(img[:, :, w])
return slices
def clear_axis(ax):
ax.get_yaxis().set_ticks([])
ax.get_xaxis().set_ticks([])
ax.grid(False)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
def get_spacing_ratio(sitk_stuff, axis_idx):
rolled_spacing = np.roll(np.array(sitk_stuff["spacing"]), axis_idx)
return rolled_spacing[1] / rolled_spacing[0]
def show_image_overview(img, sitk_stuff, fig_inch_size=5.0):
orient_imgs = get_orient_imgs(img)
vmin, vmax = img.min(), img.max()
dpi = 150.0
large_text_size = fig_inch_size * 10
small_text_size = fig_inch_size * 2
|
def read_image(source_data_paths, path_idx):
if source_data_paths is None:
return None, None
source_img, source_sitk_stuff = SimpleITKIO().read_images(
source_data_paths[path_idx : path_idx + 1]
)
source_img = source_img[0]
return torch.tensor(source_img)[None, None, :], source_sitk_stuff
def get_target_imgs_datapaths():
check_dga_root_is_set()
with open("tta_plan.json", "r") as f:
tta_plan = json.load(f)
return tta_plan["tta_data_filepaths"]
def get_source_imgs_datapaths():
check_dga_root_is_set()
buckets = ["imagesTr", "imagesTs"]
with open("tta_plan.json", "r") as f:
tta_plan = json.load(f)
source_dataset_name = tta_plan["__pretrained_dataset_name__"]
if source_dataset_name.startswith("TS104"):
return "TS104"
source_data_paths = []
for buc in buckets:
source_data_paths.extend(get_data_filepaths(source_dataset_name, buc))
return source_data_paths
def get_orient_imgs(img):
def get_axes_idxs(axis_size):
NUM_IDXS = 16
return np.linspace(0, axis_size - 1, NUM_IDXS).round().astype(int)
img = img.squeeze(0, 1)
D, H, W = img.shape
slices = dict(HW=[], DW=[], DH=[])
for d in get_axes_idxs(D):
slices["HW"].append(img[d, :, :])
for h in get_axes_idxs(H):
slices["DW"].append(img[:, h, :])
for w in get_axes_idxs(W):
slices["DH"].append(img[:, :, w])
return slices
def clear_axis(ax):
ax.get_yaxis().set_ticks([])
ax.get_xaxis().set_ticks([])
ax.grid(False)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
def get_spacing_ratio(sitk_stuff, axis_idx):
rolled_spacing = np.roll(np.array(sitk_stuff["spacing"]), axis_idx)
return rolled_spacing[1] / rolled_spacing[0]
def show_image_overview(img, sitk_stuff, fig_inch_size=5.0):
orient_imgs = get_orient_imgs(img)
vmin, vmax = img.min(), img.max()
dpi = 150.0
large_text_size = fig_inch_size * 10
small_text_size = fig_inch_size * 2 | cmap = get_dgtta_colormap() | 1 | 2023-12-08 08:43:11+00:00 | 2k |
tommy-xq/SA2VP | vpt_main/src/models/resnet.py | [
{
"identifier": "MLP",
"path": "vpt_main/src/models/mlp.py",
"snippet": "class MLP(nn.Module):\n def __init__(\n self,\n input_dim: int,\n mlp_dims: List[int],\n dropout: float = 0.1,\n nonlinearity: Type[nn.Module] = nn.ReLU,\n normalization: Type[nn.Module]... | import torch
import torch.nn as nn
import torchvision as tv
from collections import OrderedDict
from torchvision import models
from .mlp import MLP
from ..utils import logging | 772 | #!/usr/bin/env python3
"""
ResNet-related models:
"imagenet_sup_rn18",
"imagenet_sup_rn34",
"imagenet_sup_rn50",
"imagenet_sup_rn101",
"imagenet_sup_rn152",
"mocov3_rn50"
"""
| #!/usr/bin/env python3
"""
ResNet-related models:
"imagenet_sup_rn18",
"imagenet_sup_rn34",
"imagenet_sup_rn50",
"imagenet_sup_rn101",
"imagenet_sup_rn152",
"mocov3_rn50"
"""
| logger = logging.get_logger("visual_prompt") | 1 | 2023-12-12 13:19:17+00:00 | 2k |
SooLab/DDCOT | utils_evaluate.py | [
{
"identifier": "caculate_bleu",
"path": "evaluations.py",
"snippet": "def caculate_bleu(results, data, gram):\n bleus = []\n for qid, output in results.items():\n prediction = output\n target = data[qid]\n # target = data[qid]['lecture'] + data[qid]['solution']\n targe... | import os
import json
import argparse
import warnings
import pandas as pd
from sentence_transformers import SentenceTransformer
from evaluations import caculate_bleu, caculate_rouge, caculate_similariry | 973 | '''
Adapted from https://github.com/lupantech/ScienceQA
'''
warnings.filterwarnings('ignore')
def get_acc_with_contion(res_pd, key, values):
if isinstance(values, list):
total_pd = res_pd[res_pd[key].isin(values)]
else:
total_pd = res_pd[res_pd[key] == values]
correct_pd = total_pd[total_pd['true_false'] == True]
acc = "{:.2f}".format(len(correct_pd) / len(total_pd) * 100)
return acc
def get_scores(result_data, rationale_data, results_reference, data_file, img):
# read result file
results = result_data
num = len(results)
# read data file
sqa_data = json.load(open(data_file))
# construct pandas data
sqa_pd = pd.DataFrame(sqa_data).T
res_pd = sqa_pd[sqa_pd['split'] == 'test'] # test set
if img:
res_pd = res_pd[res_pd["image"] == 'image.png']
# update data
for index, row in res_pd.iterrows():
res_pd.loc[index, 'no_context'] = True if (not row['hint'] and not row['image']) else False
res_pd.loc[index, 'has_text'] = True if row['hint'] else False
res_pd.loc[index, 'has_image'] = True if row['image'] else False
res_pd.loc[index, 'has_text_image'] = True if (row['hint'] and row['image']) else False
res_pd.loc[index, 'has_no_image'] = False if row['image'] else True
label = row['answer']
pred = int(results[index])
res_pd.loc[index, 'pred'] = pred
res_pd.loc[index, 'true_false'] = (label == pred)
# accuracy scores
acc_average = len(res_pd[res_pd['true_false'] == True]) / num * 100
# rationale quality
## BLEU
bleu1 = caculate_bleu(rationale_data, results_reference, gram=1)
bleu4 = caculate_bleu(rationale_data, results_reference, gram=4)
## Rouge-L
rouge = caculate_rouge(rationale_data, results_reference)
## Similarity
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2').cuda()
| '''
Adapted from https://github.com/lupantech/ScienceQA
'''
warnings.filterwarnings('ignore')
def get_acc_with_contion(res_pd, key, values):
if isinstance(values, list):
total_pd = res_pd[res_pd[key].isin(values)]
else:
total_pd = res_pd[res_pd[key] == values]
correct_pd = total_pd[total_pd['true_false'] == True]
acc = "{:.2f}".format(len(correct_pd) / len(total_pd) * 100)
return acc
def get_scores(result_data, rationale_data, results_reference, data_file, img):
# read result file
results = result_data
num = len(results)
# read data file
sqa_data = json.load(open(data_file))
# construct pandas data
sqa_pd = pd.DataFrame(sqa_data).T
res_pd = sqa_pd[sqa_pd['split'] == 'test'] # test set
if img:
res_pd = res_pd[res_pd["image"] == 'image.png']
# update data
for index, row in res_pd.iterrows():
res_pd.loc[index, 'no_context'] = True if (not row['hint'] and not row['image']) else False
res_pd.loc[index, 'has_text'] = True if row['hint'] else False
res_pd.loc[index, 'has_image'] = True if row['image'] else False
res_pd.loc[index, 'has_text_image'] = True if (row['hint'] and row['image']) else False
res_pd.loc[index, 'has_no_image'] = False if row['image'] else True
label = row['answer']
pred = int(results[index])
res_pd.loc[index, 'pred'] = pred
res_pd.loc[index, 'true_false'] = (label == pred)
# accuracy scores
acc_average = len(res_pd[res_pd['true_false'] == True]) / num * 100
# rationale quality
## BLEU
bleu1 = caculate_bleu(rationale_data, results_reference, gram=1)
bleu4 = caculate_bleu(rationale_data, results_reference, gram=4)
## Rouge-L
rouge = caculate_rouge(rationale_data, results_reference)
## Similarity
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2').cuda() | similariry = caculate_similariry(rationale_data, results_reference, model) | 2 | 2023-12-14 20:47:08+00:00 | 2k |
Qazalbash/jaxampler | jaxampler/_src/rvs/bernoulli.py | [
{
"identifier": "Numeric",
"path": "jaxampler/_src/typing.py",
"snippet": ""
},
{
"identifier": "Binomial",
"path": "jaxampler/_src/rvs/binomial.py",
"snippet": "class Binomial(DiscreteRV):\n r\"\"\"Binomial random variable\n .. math::\n X\\sim Bin(p,n) \\iff P(X=x|p,n)=\\bi... | from typing import Any, Optional
from ..typing import Numeric
from .binomial import Binomial | 959 | # Copyright 2023 The Jaxampler Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
class Bernoulli(Binomial):
r"""Bernoulli random variable with probability of success p
.. math::
X\sim \mathbf{B}(p)\iff P\left(X=x|p\right)=p^{x}(1-p)^{1-x}
"""
| # Copyright 2023 The Jaxampler Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
class Bernoulli(Binomial):
r"""Bernoulli random variable with probability of success p
.. math::
X\sim \mathbf{B}(p)\iff P\left(X=x|p\right)=p^{x}(1-p)^{1-x}
"""
| def __init__(self, p: Numeric | Any, name: Optional[str] = None) -> None: | 0 | 2023-12-11 04:27:17+00:00 | 2k |
GXNU-ZhongLab/ODTrack | lib/models/odtrack/base_backbone.py | [
{
"identifier": "PatchEmbed",
"path": "lib/models/layers/patch_embed.py",
"snippet": "class PatchEmbed(nn.Module):\r\n \"\"\" 2D Image to Patch Embedding\r\n \"\"\"\r\n\r\n def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True):\r\n supe... | from functools import partial
from timm.models.vision_transformer import resize_pos_embed
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from lib.models.layers.patch_embed import PatchEmbed
from lib.models.odtrack.utils import combine_tokens, recover_tokens
import torch
import torch.nn as nn
import torch.nn.functional as F
| 1,542 |
class BaseBackbone(nn.Module):
def __init__(self):
super().__init__()
# for original ViT
self.pos_embed = None
self.img_size = [224, 224]
self.patch_size = 16
self.embed_dim = 384
self.cat_mode = 'direct'
self.pos_embed_z = None
self.pos_embed_x = None
self.template_segment_pos_embed = None
self.search_segment_pos_embed = None
self.return_inter = False
self.return_stage = [2, 5, 8, 11]
self.add_sep_seg = False
def finetune_track(self, cfg, patch_start_index=1):
search_size = to_2tuple(cfg.DATA.SEARCH.SIZE)
template_size = to_2tuple(cfg.DATA.TEMPLATE.SIZE)
new_patch_size = cfg.MODEL.BACKBONE.STRIDE
self.cat_mode = cfg.MODEL.BACKBONE.CAT_MODE
self.return_inter = cfg.MODEL.RETURN_INTER
self.return_stage = cfg.MODEL.RETURN_STAGES
self.add_sep_seg = cfg.MODEL.BACKBONE.SEP_SEG
# resize patch embedding
if new_patch_size != self.patch_size:
print('Inconsistent Patch Size With The Pretrained Weights, Interpolate The Weight!')
old_patch_embed = {}
for name, param in self.patch_embed.named_parameters():
if 'weight' in name:
param = nn.functional.interpolate(param, size=(new_patch_size, new_patch_size),
mode='bicubic', align_corners=False)
param = nn.Parameter(param)
old_patch_embed[name] = param
|
class BaseBackbone(nn.Module):
def __init__(self):
super().__init__()
# for original ViT
self.pos_embed = None
self.img_size = [224, 224]
self.patch_size = 16
self.embed_dim = 384
self.cat_mode = 'direct'
self.pos_embed_z = None
self.pos_embed_x = None
self.template_segment_pos_embed = None
self.search_segment_pos_embed = None
self.return_inter = False
self.return_stage = [2, 5, 8, 11]
self.add_sep_seg = False
def finetune_track(self, cfg, patch_start_index=1):
search_size = to_2tuple(cfg.DATA.SEARCH.SIZE)
template_size = to_2tuple(cfg.DATA.TEMPLATE.SIZE)
new_patch_size = cfg.MODEL.BACKBONE.STRIDE
self.cat_mode = cfg.MODEL.BACKBONE.CAT_MODE
self.return_inter = cfg.MODEL.RETURN_INTER
self.return_stage = cfg.MODEL.RETURN_STAGES
self.add_sep_seg = cfg.MODEL.BACKBONE.SEP_SEG
# resize patch embedding
if new_patch_size != self.patch_size:
print('Inconsistent Patch Size With The Pretrained Weights, Interpolate The Weight!')
old_patch_embed = {}
for name, param in self.patch_embed.named_parameters():
if 'weight' in name:
param = nn.functional.interpolate(param, size=(new_patch_size, new_patch_size),
mode='bicubic', align_corners=False)
param = nn.Parameter(param)
old_patch_embed[name] = param
| self.patch_embed = PatchEmbed(img_size=self.img_size, patch_size=new_patch_size, in_chans=3,
| 0 | 2023-12-10 03:57:19+00:00 | 2k |
yilin-bao/nnanim | TestingCode/transformer.py | [
{
"identifier": "Attention",
"path": "TestingCode/modules.py",
"snippet": "class Attention(nn.Module):\n def __init__(\n self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0\n ):\n super(Attention, self).__init__()\n\n assert (\n dim % num_heads == 0... | from torch import nn
from TestingCode.modules import Attention, FeedForward, PreNorm | 1,079 |
class Transformer(nn.Module):
def __init__(
self,
dim,
depth,
heads,
mlp_ratio=4.0,
attn_dropout=0.0,
dropout=0.0,
qkv_bias=True,
revised=False,
):
super().__init__()
self.layers = nn.ModuleList([])
assert isinstance(
mlp_ratio, float
), "MLP ratio should be an integer for valid "
mlp_dim = int(mlp_ratio * dim)
for _ in range(depth):
self.layers.append(
nn.ModuleList(
[
PreNorm(
dim,
Attention(
dim,
num_heads=heads,
qkv_bias=qkv_bias,
attn_drop=attn_dropout,
proj_drop=dropout,
),
),
PreNorm(
dim,
|
class Transformer(nn.Module):
def __init__(
self,
dim,
depth,
heads,
mlp_ratio=4.0,
attn_dropout=0.0,
dropout=0.0,
qkv_bias=True,
revised=False,
):
super().__init__()
self.layers = nn.ModuleList([])
assert isinstance(
mlp_ratio, float
), "MLP ratio should be an integer for valid "
mlp_dim = int(mlp_ratio * dim)
for _ in range(depth):
self.layers.append(
nn.ModuleList(
[
PreNorm(
dim,
Attention(
dim,
num_heads=heads,
qkv_bias=qkv_bias,
attn_drop=attn_dropout,
proj_drop=dropout,
),
),
PreNorm(
dim, | FeedForward(dim, mlp_dim, dropout_rate=dropout,), | 1 | 2023-12-05 22:01:06+00:00 | 2k |
Tlntin/booking_simulator | modelscope_agent/llm/custom_llm.py | [
{
"identifier": "AgentType",
"path": "modelscope_agent/agent_types.py",
"snippet": "class AgentType(str, Enum):\n\n DEFAULT = 'default'\n \"\"\"\"\"\"\n\n MS_AGENT = 'ms-agent'\n \"\"\"An agent that uses the ModelScope-agent specific format does a reasoning step before acting .\n \"\"\"\n... | import os
import json
import requests
import traceback
from modelscope_agent.agent_types import AgentType
from .base import LLM
from .utils import DEFAULT_MESSAGE | 809 |
class CustomLLM(LLM):
'''
This method is for the service that provide llm serving through http.
user could override the result parsing method if needed
While put all the necessary information in the env variable, such as Token, Model, URL
'''
name = 'custom_llm'
def __init__(self, cfg):
super().__init__(cfg)
self.token = os.getenv('HTTP_LLM_TOKEN', None)
self.model = os.getenv('HTTP_LLM_MODEL', None)
self.model_id = self.model
self.url = os.getenv('HTTP_LLM_URL', None)
if self.token is None:
raise ValueError('HTTP_LLM_TOKEN is not set')
|
class CustomLLM(LLM):
'''
This method is for the service that provide llm serving through http.
user could override the result parsing method if needed
While put all the necessary information in the env variable, such as Token, Model, URL
'''
name = 'custom_llm'
def __init__(self, cfg):
super().__init__(cfg)
self.token = os.getenv('HTTP_LLM_TOKEN', None)
self.model = os.getenv('HTTP_LLM_MODEL', None)
self.model_id = self.model
self.url = os.getenv('HTTP_LLM_URL', None)
if self.token is None:
raise ValueError('HTTP_LLM_TOKEN is not set') | self.agent_type = self.cfg.get('agent_type', AgentType.DEFAULT) | 0 | 2023-12-12 04:24:00+00:00 | 2k |
dx-dtran/gpt2-mlx | generate.py | [
{
"identifier": "GPT",
"path": "transformer.py",
"snippet": "class GPT(nn.Module):\n def __init__(self, config: GPTConfig):\n super().__init__()\n assert config.vocab_size is not None\n assert config.block_size is not None\n self.config = config\n\n self.wte = nn.Em... | import argparse
import tiktoken
import time
import mlx.core as mx
from mlx.utils import tree_unflatten, tree_flatten
from transformer import GPT, GPTConfig | 1,096 |
def load_model(model_name):
config_args = {
"gpt2": dict(n_layer=12, n_head=12, n_embd=768),
"gpt2-medium": dict(n_layer=24, n_head=16, n_embd=1024),
"gpt2-large": dict(n_layer=36, n_head=20, n_embd=1280),
"gpt2-xl": dict(n_layer=48, n_head=25, n_embd=1600),
}[model_name]
config_args["vocab_size"] = 50257
config_args["block_size"] = 1024
config_args["bias"] = True
config = GPTConfig(**config_args)
|
def load_model(model_name):
config_args = {
"gpt2": dict(n_layer=12, n_head=12, n_embd=768),
"gpt2-medium": dict(n_layer=24, n_head=16, n_embd=1024),
"gpt2-large": dict(n_layer=36, n_head=20, n_embd=1280),
"gpt2-xl": dict(n_layer=48, n_head=25, n_embd=1600),
}[model_name]
config_args["vocab_size"] = 50257
config_args["block_size"] = 1024
config_args["bias"] = True
config = GPTConfig(**config_args)
| model = GPT(config) | 0 | 2023-12-09 03:33:57+00:00 | 2k |
chenchenygu/watermark-learnability | kgw_watermarking/watermark_reliability_release/utils/generation.py | [
{
"identifier": "load_lfqa",
"path": "kgw_watermarking/watermark_reliability_release/utils/data/lfqa.py",
"snippet": "def load_lfqa(args=None, path=\"./utils/data/lfqa.jsonl\"):\n cols_to_load = [\"prefix\", \"gold_completion\", \"title\", \"selftext\", \"q_id\"]\n\n args.dataset_config_name = Non... | import torch
from datasets import load_dataset, IterableDataset
from torch import Tensor
from tokenizers import Tokenizer
from transformers import (
AutoTokenizer,
LlamaTokenizer,
AutoModelForSeq2SeqLM,
AutoModelForCausalLM,
DataCollatorWithPadding,
)
from .data.lfqa import load_lfqa
from .data.essays import load_essays
from .data.wikitext import load_wikitext | 1,351 | # coding=utf-8
# Copyright 2023 Authors of "A Watermark for Large Language Models"
# available at https://arxiv.org/abs/2301.10226
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# HF classes
MAX_GENERATIONS = int(10000) # Hardcoded max length to avoid infinite loop
def load_model(args):
"""Load and return the model and tokenizer"""
args.is_seq2seq_model = any(
[(model_type in args.model_name_or_path) for model_type in ["t5", "T0"]]
)
args.is_decoder_only_model = any(
[(model_type in args.model_name_or_path) for model_type in ["gpt", "opt", "bloom", "llama"]]
)
if args.is_seq2seq_model:
model = AutoModelForSeq2SeqLM.from_pretrained(args.model_name_or_path)
elif args.is_decoder_only_model:
if args.load_fp16:
model = AutoModelForCausalLM.from_pretrained(
args.model_name_or_path, torch_dtype=torch.float16, device_map="auto"
)
else:
model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path)
else:
raise ValueError(f"Unknown model type: {args.model_name_or_path}")
if args.use_gpu:
device = "cuda" if torch.cuda.is_available() else "cpu"
if args.load_fp16:
pass
else:
model = model.to(device)
else:
device = "cpu"
model.eval()
if args.is_decoder_only_model:
padding_side = "left"
else:
raise NotImplementedError(
"Need to check how to handle padding for seq2seq models when calling generate"
)
if "llama" in args.model_name_or_path:
tokenizer = LlamaTokenizer.from_pretrained(
args.model_name_or_path, padding_side=padding_side
)
model.config.pad_token_id = tokenizer.pad_token_id = 0 # unk
model.config.bos_token_id = 1
model.config.eos_token_id = 2
else:
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, padding_side=padding_side
)
args.model_max_length = model.config.max_position_embeddings
return model, tokenizer, device
def add_idx(example, idx):
example.update({"idx": idx})
return example
def load_hf_dataset(args):
dataset_name, dataset_config_name = args.dataset_name, args.dataset_config_name
if dataset_name == "lfqa":
| # coding=utf-8
# Copyright 2023 Authors of "A Watermark for Large Language Models"
# available at https://arxiv.org/abs/2301.10226
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# HF classes
MAX_GENERATIONS = int(10000) # Hardcoded max length to avoid infinite loop
def load_model(args):
"""Load and return the model and tokenizer"""
args.is_seq2seq_model = any(
[(model_type in args.model_name_or_path) for model_type in ["t5", "T0"]]
)
args.is_decoder_only_model = any(
[(model_type in args.model_name_or_path) for model_type in ["gpt", "opt", "bloom", "llama"]]
)
if args.is_seq2seq_model:
model = AutoModelForSeq2SeqLM.from_pretrained(args.model_name_or_path)
elif args.is_decoder_only_model:
if args.load_fp16:
model = AutoModelForCausalLM.from_pretrained(
args.model_name_or_path, torch_dtype=torch.float16, device_map="auto"
)
else:
model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path)
else:
raise ValueError(f"Unknown model type: {args.model_name_or_path}")
if args.use_gpu:
device = "cuda" if torch.cuda.is_available() else "cpu"
if args.load_fp16:
pass
else:
model = model.to(device)
else:
device = "cpu"
model.eval()
if args.is_decoder_only_model:
padding_side = "left"
else:
raise NotImplementedError(
"Need to check how to handle padding for seq2seq models when calling generate"
)
if "llama" in args.model_name_or_path:
tokenizer = LlamaTokenizer.from_pretrained(
args.model_name_or_path, padding_side=padding_side
)
model.config.pad_token_id = tokenizer.pad_token_id = 0 # unk
model.config.bos_token_id = 1
model.config.eos_token_id = 2
else:
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, padding_side=padding_side
)
args.model_max_length = model.config.max_position_embeddings
return model, tokenizer, device
def add_idx(example, idx):
example.update({"idx": idx})
return example
def load_hf_dataset(args):
dataset_name, dataset_config_name = args.dataset_name, args.dataset_config_name
if dataset_name == "lfqa": | dataset = load_lfqa(args) | 0 | 2023-12-07 16:45:33+00:00 | 2k |
skyoux/SemAIM | main_knn.py | [
{
"identifier": "interpolate_pos_embed",
"path": "util/pos_embed.py",
"snippet": "def interpolate_pos_embed(model, checkpoint_model):\n if 'pos_embed' in checkpoint_model:\n pos_embed_checkpoint = checkpoint_model['pos_embed']\n embedding_size = pos_embed_checkpoint.shape[-1]\n n... | import os
import sys
import argparse
import numpy as np
import torch
import torch.distributed as dist
import torch.backends.cudnn as cudnn
import timm.models as timm_models
import util.misc as misc
from torch import nn
from torchvision import datasets
from torchvision import transforms as pth_transforms
from torchvision import models as torchvision_models
from timm.models.layers import trunc_normal_
from util.pos_embed import interpolate_pos_embed
from models import models_vit | 887 | #!/usr/bin/env python
def extract_feature_pipeline(args):
######################## preparing data ... ########################
resize_size = 256 if args.input_size == 224 else 512
transform = pth_transforms.Compose([
pth_transforms.Resize(resize_size, interpolation=3),
pth_transforms.CenterCrop(args.input_size),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
dataset_train = ReturnIndexDataset(os.path.join(args.data_path, 'train'), transform)
dataset_val = ReturnIndexDataset(os.path.join(args.data_path, 'val'), transform)
train_labels = torch.tensor(dataset_train.target).long()
test_labels = torch.tensor(dataset_val.target).long()
sampler = torch.utils.data.DistributedSampler(dataset_train, shuffle=False)
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=False,
drop_last=False,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=False,
drop_last=False,
)
print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.")
######################## building network ... ########################
| #!/usr/bin/env python
def extract_feature_pipeline(args):
######################## preparing data ... ########################
resize_size = 256 if args.input_size == 224 else 512
transform = pth_transforms.Compose([
pth_transforms.Resize(resize_size, interpolation=3),
pth_transforms.CenterCrop(args.input_size),
pth_transforms.ToTensor(),
pth_transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
dataset_train = ReturnIndexDataset(os.path.join(args.data_path, 'train'), transform)
dataset_val = ReturnIndexDataset(os.path.join(args.data_path, 'val'), transform)
train_labels = torch.tensor(dataset_train.target).long()
test_labels = torch.tensor(dataset_val.target).long()
sampler = torch.utils.data.DistributedSampler(dataset_train, shuffle=False)
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=False,
drop_last=False,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=False,
drop_last=False,
)
print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.")
######################## building network ... ######################## | model = models_vit.__dict__[args.model]( | 1 | 2023-12-10 15:17:11+00:00 | 2k |
boweniac/autogan | autogan/oai/generate_utils.py | [
{
"identifier": "chat_completions",
"path": "autogan/oai/openai_utils.py",
"snippet": "def chat_completions(messages: list, api_key: Dict, request_timeout: int, max_retries: int,\n stream_mode: Optional[bool] = None):\n \"\"\"OpenAI interface and OpenAI like interface call\n\n ... | import time
from typing import Optional, List
from autogan.oai.openai_utils import chat_completions
from autogan.oai.config_utils import LLMConfig
from autogan.oai.count_tokens_utils import count_text_tokens
from autogan.utils.response import ResponseFuncType | 1,285 |
def generate_chat_completion(llm_config: LLMConfig, messages: List, agent_name: str, gen: str,
response_func: ResponseFuncType, stream_mode: Optional[bool] = None)\
-> tuple[Optional[str], Optional[int]]:
"""Call the LLM interface
Currently, only the chatgpt model of openai (including azure) is adapted.
:param llm_config: LLM configuration.
:param messages:
:param agent_name:
:param gen: Used to distinguish agent replies, deep thoughts, context compression, general summaries, clue summaries
- main: agent replies
- idea: deep thoughts
- messages_summary: context compression
- text_summary: general summaries
- clue_summary: clue summaries
:param response_func: Used to return results to the interface or terminal.
:param stream_mode:
"""
# When a certain configuration in the configuration list fails to request,
# continue to try the next configuration until all configurations in the list are attempted.
loop = llm_config.len_of_api_key_list
for i in range(loop):
time.sleep(llm_config.request_interval_time)
api_key = llm_config.next_api_key
try:
completion_content = ""
completion_tokens = 0
index = 1
|
def generate_chat_completion(llm_config: LLMConfig, messages: List, agent_name: str, gen: str,
response_func: ResponseFuncType, stream_mode: Optional[bool] = None)\
-> tuple[Optional[str], Optional[int]]:
"""Call the LLM interface
Currently, only the chatgpt model of openai (including azure) is adapted.
:param llm_config: LLM configuration.
:param messages:
:param agent_name:
:param gen: Used to distinguish agent replies, deep thoughts, context compression, general summaries, clue summaries
- main: agent replies
- idea: deep thoughts
- messages_summary: context compression
- text_summary: general summaries
- clue_summary: clue summaries
:param response_func: Used to return results to the interface or terminal.
:param stream_mode:
"""
# When a certain configuration in the configuration list fails to request,
# continue to try the next configuration until all configurations in the list are attempted.
loop = llm_config.len_of_api_key_list
for i in range(loop):
time.sleep(llm_config.request_interval_time)
api_key = llm_config.next_api_key
try:
completion_content = ""
completion_tokens = 0
index = 1 | for message in chat_completions(messages, api_key, llm_config.request_timeout, | 0 | 2023-12-06 03:24:34+00:00 | 2k |
JingHao99/IDR-Ingredients-oriented-Degradation-Reformulation | data/IDR_dataset.py | [
{
"identifier": "crop_HWC_img",
"path": "utils/data_util.py",
"snippet": "def crop_HWC_img(image, base=64):\r\n \"\"\"\r\n 裁切到multiple of base的size上\r\n :param image: H,W,C\r\n :param base: (int)\r\n :return:\r\n \"\"\"\r\n h = image.shape[0]\r\n w = image.shape[1]\r\n crop_h ... | import os
import random
import copy
import numpy as np
from PIL import Image, ImageFile
from torch.utils.data import Dataset
from torchvision.transforms import ToPILImage, Compose, RandomCrop, ToTensor
from utils.data_util import crop_HWC_img, random_augmentation, padding, onehot, smooth_one_hot
from sklearn.preprocessing import OneHotEncoder
from data.degradation_util import Degradation
| 1,386 | ImageFile.LOAD_TRUNCATED_IMAGES = True
class IDR_dataset(Dataset):
def __init__(self, dataset_opt):
super(IDR_dataset, self).__init__()
self.dataset_opt = dataset_opt
self.rs_ids = []
self.hazy_ids = []
| ImageFile.LOAD_TRUNCATED_IMAGES = True
class IDR_dataset(Dataset):
def __init__(self, dataset_opt):
super(IDR_dataset, self).__init__()
self.dataset_opt = dataset_opt
self.rs_ids = []
self.hazy_ids = []
| self.D = Degradation(dataset_opt)
| 5 | 2023-12-07 10:58:34+00:00 | 2k |
TACJu/Compositor | Compositor_Mask2Former/mask2former/modeling/meta_arch/mask_former_head.py | [
{
"identifier": "build_transformer_decoder",
"path": "Compositor_Mask2Former/mask2former/modeling/transformer_decoder/maskformer_transformer_decoder.py",
"snippet": "def build_transformer_decoder(cfg, in_channels, mask_classification=True):\n \"\"\"\n Build a instance embedding branch from `cfg.MO... | import logging
import fvcore.nn.weight_init as weight_init
from copy import deepcopy
from typing import Callable, Dict, List, Optional, Tuple, Union
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from ..transformer_decoder.maskformer_transformer_decoder import build_transformer_decoder
from ..pixel_decoder.fpn import build_pixel_decoder | 1,245 | # Copyright (c) Facebook, Inc. and its affiliates.
@SEM_SEG_HEADS_REGISTRY.register()
class MaskFormerHead(nn.Module):
_version = 2
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# Do not warn if train from scratch
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
if "sem_seg_head" in k and not k.startswith(prefix + "predictor"):
newk = k.replace(prefix, prefix + "pixel_decoder.")
# logger.debug(f"{k} ==> {newk}")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if not scratch:
logger.warning(
f"Weight format of {self.__class__.__name__} have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
num_classes: int,
pixel_decoder: nn.Module,
loss_weight: float = 1.0,
ignore_value: int = -1,
# extra parameters
transformer_predictor: nn.Module,
transformer_in_feature: str,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
num_classes: number of classes to predict
pixel_decoder: the pixel decoder module
loss_weight: loss weight
ignore_value: category id to be ignored during training.
transformer_predictor: the transformer decoder that makes prediction
transformer_in_feature: input feature name to the transformer_predictor
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape]
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
self.ignore_value = ignore_value
self.common_stride = 4
self.loss_weight = loss_weight
self.pixel_decoder = pixel_decoder
self.predictor = transformer_predictor
self.transformer_in_feature = transformer_in_feature
self.num_classes = num_classes
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
# figure out in_channels to transformer predictor
if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "transformer_encoder":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "pixel_embedding":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "multi_scale_pixel_decoder": # for maskformer2
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
else:
transformer_predictor_in_channels = input_shape[cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE].channels
return {
"input_shape": {
k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
},
"ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
"num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
"pixel_decoder": build_pixel_decoder(cfg, input_shape),
"loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT,
"transformer_in_feature": cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE,
| # Copyright (c) Facebook, Inc. and its affiliates.
@SEM_SEG_HEADS_REGISTRY.register()
class MaskFormerHead(nn.Module):
_version = 2
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# Do not warn if train from scratch
scratch = True
logger = logging.getLogger(__name__)
for k in list(state_dict.keys()):
newk = k
if "sem_seg_head" in k and not k.startswith(prefix + "predictor"):
newk = k.replace(prefix, prefix + "pixel_decoder.")
# logger.debug(f"{k} ==> {newk}")
if newk != k:
state_dict[newk] = state_dict[k]
del state_dict[k]
scratch = False
if not scratch:
logger.warning(
f"Weight format of {self.__class__.__name__} have changed! "
"Please upgrade your models. Applying automatic conversion now ..."
)
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
num_classes: int,
pixel_decoder: nn.Module,
loss_weight: float = 1.0,
ignore_value: int = -1,
# extra parameters
transformer_predictor: nn.Module,
transformer_in_feature: str,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
num_classes: number of classes to predict
pixel_decoder: the pixel decoder module
loss_weight: loss weight
ignore_value: category id to be ignored during training.
transformer_predictor: the transformer decoder that makes prediction
transformer_in_feature: input feature name to the transformer_predictor
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape]
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
self.ignore_value = ignore_value
self.common_stride = 4
self.loss_weight = loss_weight
self.pixel_decoder = pixel_decoder
self.predictor = transformer_predictor
self.transformer_in_feature = transformer_in_feature
self.num_classes = num_classes
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
# figure out in_channels to transformer predictor
if cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "transformer_encoder":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "pixel_embedding":
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM
elif cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE == "multi_scale_pixel_decoder": # for maskformer2
transformer_predictor_in_channels = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM
else:
transformer_predictor_in_channels = input_shape[cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE].channels
return {
"input_shape": {
k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
},
"ignore_value": cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
"num_classes": cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
"pixel_decoder": build_pixel_decoder(cfg, input_shape),
"loss_weight": cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT,
"transformer_in_feature": cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE, | "transformer_predictor": build_transformer_decoder( | 0 | 2023-12-12 11:49:28+00:00 | 2k |
Mirascope/mirascope | cookbook/api_example/api_example.py | [
{
"identifier": "OpenAIChat",
"path": "mirascope/chat/models.py",
"snippet": "class OpenAIChat:\n \"\"\"A convenience wrapper for the OpenAI Chat client.\"\"\"\n\n def __init__(self, model: str = \"gpt-3.5-turbo\", api_key: Optional[str] = None):\n \"\"\"Initializes an instance of `OpenAICh... | import os
from fastapi import FastAPI
from mirascope import OpenAIChat, Prompt | 1,168 | """A FastAPI app integrated with a multi-chain prompt for recommending books on a topic
and then asking which one is the best for beginners.
How to Run:
uvicorn api_example:app --reload
"""
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
app = FastAPI()
class BookRecommendationPrompt(Prompt):
"""
Can you recommend some books on {topic} in a list format?
"""
topic: str
class BestForBeginnersPrompt(Prompt):
"""
Given this list {book_list}, which one is the best for beginners?
"""
book_list: str
@app.post("/")
def root(book_recommendation: BookRecommendationPrompt):
"""Generates the best book for beginners on the given topic."""
| """A FastAPI app integrated with a multi-chain prompt for recommending books on a topic
and then asking which one is the best for beginners.
How to Run:
uvicorn api_example:app --reload
"""
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
app = FastAPI()
class BookRecommendationPrompt(Prompt):
"""
Can you recommend some books on {topic} in a list format?
"""
topic: str
class BestForBeginnersPrompt(Prompt):
"""
Given this list {book_list}, which one is the best for beginners?
"""
book_list: str
@app.post("/")
def root(book_recommendation: BookRecommendationPrompt):
"""Generates the best book for beginners on the given topic.""" | model = OpenAIChat() | 0 | 2023-12-05 01:22:34+00:00 | 2k |
allisson/pysqsx | sqsx/queue.py | [
{
"identifier": "NoRetry",
"path": "sqsx/exceptions.py",
"snippet": "class NoRetry(Exception):\n \"\"\"\n This exception must be used when we need that the message will be removed from the queue\n \"\"\"\n\n pass"
},
{
"identifier": "Retry",
"path": "sqsx/exceptions.py",
"sni... | import logging
import signal
import time
from concurrent.futures import ThreadPoolExecutor, wait
from typing import Any, Callable, Dict, Optional
from pydantic import BaseModel, Field, PrivateAttr
from sqsx.exceptions import NoRetry, Retry
from sqsx.helper import backoff_calculator_seconds, base64_to_dict, dict_to_base64 | 1,453 | logger = logging.getLogger(__name__)
queue_url_regex = r"(http|https)[:][\/]{2}[a-zA-Z0-9-_:.]+[\/][0-9]{12}[\/]{1}[a-zA-Z0-9-_]{0,80}"
class BaseQueueMixin:
def consume_messages(
self, max_messages: int = 1, max_threads: int = 1, wait_seconds: int = 10, run_forever: bool = True
) -> None:
logger.info(f"Starting consuming tasks, queue_url={self.url}")
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
while True:
if self._should_consume_tasks_stop:
logger.info(f"Stopping consuming tasks, queue_url={self.url}")
break
response = self.sqs_client.receive_message(
QueueUrl=self.url,
AttributeNames=["All"],
MaxNumberOfMessages=min(max_messages, 10),
MessageAttributeNames=["All"],
)
sqs_messages = response.get("Messages", [])
if not sqs_messages:
logger.debug(
f"Waiting some seconds because no message was received, seconds={wait_seconds}, queue_url={self.url}"
)
time.sleep(wait_seconds)
continue
with ThreadPoolExecutor(max_workers=max_threads) as executor:
futures = []
for sqs_message in sqs_messages:
futures.append(executor.submit(self._consume_message, sqs_message))
wait(futures)
if not run_forever:
break
def _exit_gracefully(self, signal_num, current_stack_frame) -> None:
logger.info("Starting graceful shutdown process")
self._should_consume_tasks_stop = True
def _message_ack(self, sqs_message: dict) -> None:
receipt_handle = sqs_message["ReceiptHandle"]
self.sqs_client.delete_message(QueueUrl=self.url, ReceiptHandle=receipt_handle)
def _message_nack(
self,
sqs_message: dict,
min_backoff_seconds: Optional[int] = None,
max_backoff_seconds: Optional[int] = None,
) -> None:
min_backoff_seconds = min_backoff_seconds if min_backoff_seconds else self.min_backoff_seconds
max_backoff_seconds = max_backoff_seconds if max_backoff_seconds else self.max_backoff_seconds
receipt_handle = sqs_message["ReceiptHandle"]
receive_count = int(sqs_message["Attributes"]["ApproximateReceiveCount"]) - 1
timeout = backoff_calculator_seconds(receive_count, min_backoff_seconds, max_backoff_seconds)
self.sqs_client.change_message_visibility(
QueueUrl=self.url, ReceiptHandle=receipt_handle, VisibilityTimeout=timeout
)
class Queue(BaseModel, BaseQueueMixin):
url: str = Field(pattern=queue_url_regex)
sqs_client: Any
min_backoff_seconds: int = Field(default=30)
max_backoff_seconds: int = Field(default=900)
_handlers: Dict[str, Callable] = PrivateAttr(default={})
_should_consume_tasks_stop: bool = PrivateAttr(default=False)
def add_task(self, task_name: str, **task_kwargs) -> dict:
return self.sqs_client.send_message(
QueueUrl=self.url,
MessageAttributes={"TaskName": {"DataType": "String", "StringValue": task_name}},
MessageBody=dict_to_base64({"kwargs": task_kwargs}),
)
def add_task_handler(self, task_name: str, task_handler_function: Callable) -> None:
self._handlers.update({task_name: task_handler_function})
def _consume_message(self, sqs_message: dict) -> None:
message_id = sqs_message["MessageId"]
task_name_attribute = sqs_message["MessageAttributes"].get("TaskName")
if task_name_attribute is None:
logger.warning(f"Message without TaskName attribute, message_id={message_id}")
return self._message_nack(sqs_message)
task_name = task_name_attribute["StringValue"]
task_handler_function = self._handlers.get(task_name)
if task_handler_function is None:
logger.warning(f"Task handler not found, message_id={message_id}, task_name={task_name}")
return self._message_nack(sqs_message)
try:
message_data = base64_to_dict(sqs_message["Body"])
except Exception:
logger.exception(f"Invalid message body, message_id={message_id}, task_name={task_name}")
return self._message_nack(sqs_message)
kwargs = message_data["kwargs"]
context = {
"queue_url": self.url,
"task_name": task_name,
"sqs_message": sqs_message,
}
try:
task_handler_function(context, **kwargs)
except Retry as exc:
logger.info(
f"Received an sqsx.Retry, setting a custom backoff policy, message_id={message_id}, task_name={task_name}"
)
return self._message_nack(
sqs_message,
min_backoff_seconds=exc.min_backoff_seconds,
max_backoff_seconds=exc.max_backoff_seconds,
)
|
logger = logging.getLogger(__name__)
queue_url_regex = r"(http|https)[:][\/]{2}[a-zA-Z0-9-_:.]+[\/][0-9]{12}[\/]{1}[a-zA-Z0-9-_]{0,80}"
class BaseQueueMixin:
def consume_messages(
self, max_messages: int = 1, max_threads: int = 1, wait_seconds: int = 10, run_forever: bool = True
) -> None:
logger.info(f"Starting consuming tasks, queue_url={self.url}")
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
while True:
if self._should_consume_tasks_stop:
logger.info(f"Stopping consuming tasks, queue_url={self.url}")
break
response = self.sqs_client.receive_message(
QueueUrl=self.url,
AttributeNames=["All"],
MaxNumberOfMessages=min(max_messages, 10),
MessageAttributeNames=["All"],
)
sqs_messages = response.get("Messages", [])
if not sqs_messages:
logger.debug(
f"Waiting some seconds because no message was received, seconds={wait_seconds}, queue_url={self.url}"
)
time.sleep(wait_seconds)
continue
with ThreadPoolExecutor(max_workers=max_threads) as executor:
futures = []
for sqs_message in sqs_messages:
futures.append(executor.submit(self._consume_message, sqs_message))
wait(futures)
if not run_forever:
break
def _exit_gracefully(self, signal_num, current_stack_frame) -> None:
logger.info("Starting graceful shutdown process")
self._should_consume_tasks_stop = True
def _message_ack(self, sqs_message: dict) -> None:
receipt_handle = sqs_message["ReceiptHandle"]
self.sqs_client.delete_message(QueueUrl=self.url, ReceiptHandle=receipt_handle)
def _message_nack(
self,
sqs_message: dict,
min_backoff_seconds: Optional[int] = None,
max_backoff_seconds: Optional[int] = None,
) -> None:
min_backoff_seconds = min_backoff_seconds if min_backoff_seconds else self.min_backoff_seconds
max_backoff_seconds = max_backoff_seconds if max_backoff_seconds else self.max_backoff_seconds
receipt_handle = sqs_message["ReceiptHandle"]
receive_count = int(sqs_message["Attributes"]["ApproximateReceiveCount"]) - 1
timeout = backoff_calculator_seconds(receive_count, min_backoff_seconds, max_backoff_seconds)
self.sqs_client.change_message_visibility(
QueueUrl=self.url, ReceiptHandle=receipt_handle, VisibilityTimeout=timeout
)
class Queue(BaseModel, BaseQueueMixin):
url: str = Field(pattern=queue_url_regex)
sqs_client: Any
min_backoff_seconds: int = Field(default=30)
max_backoff_seconds: int = Field(default=900)
_handlers: Dict[str, Callable] = PrivateAttr(default={})
_should_consume_tasks_stop: bool = PrivateAttr(default=False)
def add_task(self, task_name: str, **task_kwargs) -> dict:
return self.sqs_client.send_message(
QueueUrl=self.url,
MessageAttributes={"TaskName": {"DataType": "String", "StringValue": task_name}},
MessageBody=dict_to_base64({"kwargs": task_kwargs}),
)
def add_task_handler(self, task_name: str, task_handler_function: Callable) -> None:
self._handlers.update({task_name: task_handler_function})
def _consume_message(self, sqs_message: dict) -> None:
message_id = sqs_message["MessageId"]
task_name_attribute = sqs_message["MessageAttributes"].get("TaskName")
if task_name_attribute is None:
logger.warning(f"Message without TaskName attribute, message_id={message_id}")
return self._message_nack(sqs_message)
task_name = task_name_attribute["StringValue"]
task_handler_function = self._handlers.get(task_name)
if task_handler_function is None:
logger.warning(f"Task handler not found, message_id={message_id}, task_name={task_name}")
return self._message_nack(sqs_message)
try:
message_data = base64_to_dict(sqs_message["Body"])
except Exception:
logger.exception(f"Invalid message body, message_id={message_id}, task_name={task_name}")
return self._message_nack(sqs_message)
kwargs = message_data["kwargs"]
context = {
"queue_url": self.url,
"task_name": task_name,
"sqs_message": sqs_message,
}
try:
task_handler_function(context, **kwargs)
except Retry as exc:
logger.info(
f"Received an sqsx.Retry, setting a custom backoff policy, message_id={message_id}, task_name={task_name}"
)
return self._message_nack(
sqs_message,
min_backoff_seconds=exc.min_backoff_seconds,
max_backoff_seconds=exc.max_backoff_seconds,
) | except NoRetry: | 0 | 2023-12-13 10:48:29+00:00 | 2k |
turbopuffer/turbopuffer-python | turbopuffer/backend.py | [
{
"identifier": "TurbopufferError",
"path": "turbopuffer/error.py",
"snippet": "class TurbopufferError(Exception):\n pass"
},
{
"identifier": "AuthenticationError",
"path": "turbopuffer/error.py",
"snippet": "class AuthenticationError(TurbopufferError):\n pass"
},
{
"identi... | import json
import time
import traceback
import requests
import turbopuffer as tpuf
import gzip
from turbopuffer.error import TurbopufferError, AuthenticationError, APIError
from typing import Optional, List | 839 |
def find_api_key(api_key: Optional[str] = None) -> str:
if api_key is not None:
return api_key
elif tpuf.api_key is not None:
return tpuf.api_key
else:
raise AuthenticationError("No turbopuffer API key was provided.\n"
"Set the TURBOPUFFER_API_KEY environment variable, "
"or pass `api_key=` when creating a Namespace.")
class Backend:
api_key: str
api_base_url: str
session: requests.Session
def __init__(self, api_key: Optional[str] = None):
self.api_key = find_api_key(api_key)
self.api_base_url = tpuf.api_base_url
self.session = requests.Session()
self.session.headers.update({
'Authorization': f'Bearer {self.api_key}',
'User-Agent': f'tpuf-python/{tpuf.VERSION} {requests.utils.default_headers()["User-Agent"]}',
})
def make_api_request(self,
*args: List[str],
method: Optional[str] = None,
query: Optional[dict] = None,
payload: Optional[dict] = None) -> dict:
start = time.monotonic()
if method is None and payload is not None:
method = 'POST'
request = requests.Request(method or 'GET', self.api_base_url + '/' + '/'.join(args))
if query is not None:
request.params = query
if payload is not None:
# before = time.monotonic()
if isinstance(payload, dict):
# before = time.monotonic()
json_payload = tpuf.dump_json_bytes(payload)
# print('Json time:', time.monotonic() - before)
else:
raise ValueError(f'Unsupported POST payload type: {type(payload)}')
gzip_payload = gzip.compress(json_payload, compresslevel=1)
# json_mebibytes = len(json_payload) / 1024 / 1024
# gzip_mebibytes = len(gzip_payload) / 1024 / 1024
# print(f'Gzip time ({json_mebibytes} MiB json / {gzip_mebibytes} MiB gzip):', time.monotonic() - before)
request.headers.update({
'Content-Type': 'application/json',
'Content-Encoding': 'gzip',
})
request.data = gzip_payload
prepared = self.session.prepare_request(request)
retry_attempts = 0
while retry_attempts < 3:
# before = time.monotonic()
try:
# print(f'Sending request:', prepared.path_url, prepared.headers)
response = self.session.send(prepared, allow_redirects=False)
# print(f'Request time (HTTP {response.status_code}):', time.monotonic() - before)
if response.status_code > 500:
response.raise_for_status()
content_type = response.headers.get('Content-Type', 'text/plain')
if content_type == 'application/json':
try:
content = response.json()
except json.JSONDecodeError as err:
|
def find_api_key(api_key: Optional[str] = None) -> str:
if api_key is not None:
return api_key
elif tpuf.api_key is not None:
return tpuf.api_key
else:
raise AuthenticationError("No turbopuffer API key was provided.\n"
"Set the TURBOPUFFER_API_KEY environment variable, "
"or pass `api_key=` when creating a Namespace.")
class Backend:
api_key: str
api_base_url: str
session: requests.Session
def __init__(self, api_key: Optional[str] = None):
self.api_key = find_api_key(api_key)
self.api_base_url = tpuf.api_base_url
self.session = requests.Session()
self.session.headers.update({
'Authorization': f'Bearer {self.api_key}',
'User-Agent': f'tpuf-python/{tpuf.VERSION} {requests.utils.default_headers()["User-Agent"]}',
})
def make_api_request(self,
*args: List[str],
method: Optional[str] = None,
query: Optional[dict] = None,
payload: Optional[dict] = None) -> dict:
start = time.monotonic()
if method is None and payload is not None:
method = 'POST'
request = requests.Request(method or 'GET', self.api_base_url + '/' + '/'.join(args))
if query is not None:
request.params = query
if payload is not None:
# before = time.monotonic()
if isinstance(payload, dict):
# before = time.monotonic()
json_payload = tpuf.dump_json_bytes(payload)
# print('Json time:', time.monotonic() - before)
else:
raise ValueError(f'Unsupported POST payload type: {type(payload)}')
gzip_payload = gzip.compress(json_payload, compresslevel=1)
# json_mebibytes = len(json_payload) / 1024 / 1024
# gzip_mebibytes = len(gzip_payload) / 1024 / 1024
# print(f'Gzip time ({json_mebibytes} MiB json / {gzip_mebibytes} MiB gzip):', time.monotonic() - before)
request.headers.update({
'Content-Type': 'application/json',
'Content-Encoding': 'gzip',
})
request.data = gzip_payload
prepared = self.session.prepare_request(request)
retry_attempts = 0
while retry_attempts < 3:
# before = time.monotonic()
try:
# print(f'Sending request:', prepared.path_url, prepared.headers)
response = self.session.send(prepared, allow_redirects=False)
# print(f'Request time (HTTP {response.status_code}):', time.monotonic() - before)
if response.status_code > 500:
response.raise_for_status()
content_type = response.headers.get('Content-Type', 'text/plain')
if content_type == 'application/json':
try:
content = response.json()
except json.JSONDecodeError as err: | raise APIError(response.status_code, traceback.format_exception_only(err), response.text) | 2 | 2023-12-12 06:52:27+00:00 | 2k |
neu-spiral/multi-label-emg | scripts/run_experiment_2.py | [
{
"identifier": "run_one",
"path": "multi_label_emg/slurm_utils.py",
"snippet": "def run_one(job: str, running_job_count: int, dry_run: bool):\n if ON_SLURM_CLUSTER:\n _run_one_slurm(job, running_job_count, slurm_logs_dir, dry_run)\n else:\n _run_one_local(job, running_job_count, dry... | import itertools
import numpy as np
from run_experiment_1 import Setting
from multi_label_emg.slurm_utils import run_one
from multi_label_emg.utils import PROJECT_ROOT | 675 | """
Experiment 2:
Using previous best parallel model type and classifier,
Vary method of subsetting synthetic doubles and how many to use.
"""
DRY_RUN = True
script = PROJECT_ROOT / "train.py"
python = PROJECT_ROOT.parent / "venv" / "bin" / "python"
assert script.exists()
assert python.exists()
subjects = [f"Subj{i}" for i in range(11)]
parallel_model_type = "ParallelA"
clf = "mlp"
doubles_methods = [
"subset_uniform",
"subset_near_mean",
"subset_spaced_quantiles",
"subsetInput_uniform",
"subsetInput_near_mean",
"subsetInput_spaced_quantiles",
]
settings = []
for subj, seed, doubles_method, doubles_frac in itertools.product(
subjects,
np.arange(3),
doubles_methods,
[0.001, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5],
):
if doubles_method.startswith("subsetInput"):
frac = np.round(np.sqrt(doubles_frac), 4)
else:
frac = doubles_frac
settings.append(
Setting(
subject=subj,
seed=seed,
parallel_model_type=parallel_model_type,
clf_name=clf,
doubles_method=doubles_method,
fraction_doubles_per_class=frac,
singles_method="none",
rel_fraction_singles_per_class=1.0,
include_doubles_in_train=False,
feature_combine_type="avg",
)
)
if __name__ == "__main__":
if DRY_RUN:
print("#" * 80)
print("DRY RUN")
running_job_count = 0
for setting in settings:
job = f"{python} {script} "
job += f"--subject {setting.subject} "
job += f"--seed {setting.seed} "
job += f"--parallel_model_type {setting.parallel_model_type} "
job += f"--clf_name {setting.clf_name} "
job += f"--doubles_method {setting.doubles_method} "
job += f"--fraction_doubles_per_class {setting.fraction_doubles_per_class} "
job += f"--singles_method {setting.singles_method} "
job += f"--rel_fraction_singles_per_class {setting.rel_fraction_singles_per_class} "
job += f"--include_doubles_in_train {setting.include_doubles_in_train} "
| """
Experiment 2:
Using previous best parallel model type and classifier,
Vary method of subsetting synthetic doubles and how many to use.
"""
DRY_RUN = True
script = PROJECT_ROOT / "train.py"
python = PROJECT_ROOT.parent / "venv" / "bin" / "python"
assert script.exists()
assert python.exists()
subjects = [f"Subj{i}" for i in range(11)]
parallel_model_type = "ParallelA"
clf = "mlp"
doubles_methods = [
"subset_uniform",
"subset_near_mean",
"subset_spaced_quantiles",
"subsetInput_uniform",
"subsetInput_near_mean",
"subsetInput_spaced_quantiles",
]
settings = []
for subj, seed, doubles_method, doubles_frac in itertools.product(
subjects,
np.arange(3),
doubles_methods,
[0.001, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5],
):
if doubles_method.startswith("subsetInput"):
frac = np.round(np.sqrt(doubles_frac), 4)
else:
frac = doubles_frac
settings.append(
Setting(
subject=subj,
seed=seed,
parallel_model_type=parallel_model_type,
clf_name=clf,
doubles_method=doubles_method,
fraction_doubles_per_class=frac,
singles_method="none",
rel_fraction_singles_per_class=1.0,
include_doubles_in_train=False,
feature_combine_type="avg",
)
)
if __name__ == "__main__":
if DRY_RUN:
print("#" * 80)
print("DRY RUN")
running_job_count = 0
for setting in settings:
job = f"{python} {script} "
job += f"--subject {setting.subject} "
job += f"--seed {setting.seed} "
job += f"--parallel_model_type {setting.parallel_model_type} "
job += f"--clf_name {setting.clf_name} "
job += f"--doubles_method {setting.doubles_method} "
job += f"--fraction_doubles_per_class {setting.fraction_doubles_per_class} "
job += f"--singles_method {setting.singles_method} "
job += f"--rel_fraction_singles_per_class {setting.rel_fraction_singles_per_class} "
job += f"--include_doubles_in_train {setting.include_doubles_in_train} " | run_one(job, running_job_count, dry_run=DRY_RUN) | 0 | 2023-12-12 16:50:34+00:00 | 2k |
lbcb-sci/GNNome | graph_dataset.py | [
{
"identifier": "get_config",
"path": "config.py",
"snippet": "def get_config():\n return {\n 'checkpoints_path': 'checkpoints',\n 'models_path': 'models',\n \n 'tool_dir': 'vendor',\n 'raven_dir': 'vendor/raven-1.8.1',\n 'hifiasm_dir': 'vendor/hifiasm-0.18.8... | import re
import os
import pickle
import subprocess
import dgl
import graph_parser
from dgl.data import DGLDataset
from config import get_config
from utils import preprocess_graph, add_positional_encoding, extract_contigs | 1,513 |
class AssemblyGraphDataset(DGLDataset):
def __init__(self, root, assembler, threads=32, generate=False):
self.root = os.path.abspath(root)
self.assembler = assembler
self.threads = threads
self.assembly_dir = os.path.join(self.root, self.assembler)
# print(self.assembly_dir)
if 'raw' not in os.listdir(self.root):
subprocess.run(f"mkdir 'raw'", shell=True, cwd=self.root)
if 'output' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'output'", shell=True, cwd=self.assembly_dir)
if f'processed' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'processed'", shell=True, cwd=self.assembly_dir)
if f'info' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'info'", shell=True, cwd=self.assembly_dir)
raw_dir = os.path.join(self.root, 'raw')
save_dir = os.path.join(self.assembly_dir, f'processed')
self.output_dir = os.path.join(self.assembly_dir, f'output')
self.info_dir = os.path.join(self.assembly_dir, f'info')
config = get_config()
raven_dir = config['raven_dir']
self.raven_path = os.path.join(raven_dir, f'build/bin/raven')
self.raven_path = os.path.abspath(self.raven_path)
hifiasm_dir = config['hifiasm_dir']
self.hifiasm_path = os.path.join(hifiasm_dir, f'hifiasm')
self.hifiasm_path = os.path.abspath(self.hifiasm_path)
super().__init__(name='assembly_graphs', raw_dir=raw_dir, save_dir=save_dir)
self.graph_list = []
if not generate:
for file in os.listdir(self.save_dir):
idx = int(file[:-4])
graph = dgl.load_graphs(os.path.join(self.save_dir, file))[0][0]
graph = preprocess_graph(graph, self.root, idx)
|
class AssemblyGraphDataset(DGLDataset):
def __init__(self, root, assembler, threads=32, generate=False):
self.root = os.path.abspath(root)
self.assembler = assembler
self.threads = threads
self.assembly_dir = os.path.join(self.root, self.assembler)
# print(self.assembly_dir)
if 'raw' not in os.listdir(self.root):
subprocess.run(f"mkdir 'raw'", shell=True, cwd=self.root)
if 'output' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'output'", shell=True, cwd=self.assembly_dir)
if f'processed' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'processed'", shell=True, cwd=self.assembly_dir)
if f'info' not in os.listdir(self.assembly_dir):
subprocess.run(f"mkdir 'info'", shell=True, cwd=self.assembly_dir)
raw_dir = os.path.join(self.root, 'raw')
save_dir = os.path.join(self.assembly_dir, f'processed')
self.output_dir = os.path.join(self.assembly_dir, f'output')
self.info_dir = os.path.join(self.assembly_dir, f'info')
config = get_config()
raven_dir = config['raven_dir']
self.raven_path = os.path.join(raven_dir, f'build/bin/raven')
self.raven_path = os.path.abspath(self.raven_path)
hifiasm_dir = config['hifiasm_dir']
self.hifiasm_path = os.path.join(hifiasm_dir, f'hifiasm')
self.hifiasm_path = os.path.abspath(self.hifiasm_path)
super().__init__(name='assembly_graphs', raw_dir=raw_dir, save_dir=save_dir)
self.graph_list = []
if not generate:
for file in os.listdir(self.save_dir):
idx = int(file[:-4])
graph = dgl.load_graphs(os.path.join(self.save_dir, file))[0][0]
graph = preprocess_graph(graph, self.root, idx) | graph = add_positional_encoding(graph) | 2 | 2023-12-08 04:45:45+00:00 | 2k |
altfoxie/ha-sberdevices | custom_components/sberdevices/light.py | [
{
"identifier": "DeviceAPI",
"path": "custom_components/sberdevices/api.py",
"snippet": "class DeviceAPI:\n def __init__(self, home: HomeAPI, device_id: str) -> None:\n self._home = home\n self._id = device_id\n\n @property\n def device(self) -> dict[str, any]:\n return sel... | import math
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP_KELVIN,
ATTR_HS_COLOR,
ATTR_WHITE,
ColorMode,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.util.color import brightness_to_value, value_to_brightness
from homeassistant.util.scaling import scale_ranged_value_to_int_range
from .api import DeviceAPI, HomeAPI
from .const import DOMAIN | 1,211 | """Support for Abode Security System lights."""
from __future__ import annotations
# hardcode xd
COLOR_TEMP_MIN = 2700
COLOR_TEMP_MAX = 6500
COLOR_TEMP_RANGE = (COLOR_TEMP_MIN, COLOR_TEMP_MAX)
H_RANGE = (0, 360)
S_RANGE = (0, 100)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
| """Support for Abode Security System lights."""
from __future__ import annotations
# hardcode xd
COLOR_TEMP_MIN = 2700
COLOR_TEMP_MAX = 6500
COLOR_TEMP_RANGE = (COLOR_TEMP_MIN, COLOR_TEMP_MAX)
H_RANGE = (0, 360)
S_RANGE = (0, 100)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None: | home: HomeAPI = hass.data[DOMAIN][entry.entry_id]["home"] | 2 | 2023-12-09 15:27:27+00:00 | 2k |
amadad/agentcy3 | agency_swarm/tools/tool_factory.py | [
{
"identifier": "BaseTool",
"path": "agency_swarm/tools/base_tool.py",
"snippet": "class BaseTool(OpenAISchema, ABC):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n @abstractmethod\n def run(self, **kwargs):\n pass"
},
{
"identifier": "reference_schema",
... | import inspect
from typing import Any, Dict, List, Type
from pydantic import create_model, Field
from .base_tool import BaseTool
from ..util.schema import reference_schema
from langchain.tools import format_tool_to_openai_function | 1,523 | except ImportError:
raise ImportError("You must install langchain to use this method.")
if inspect.isclass(tool):
tool = tool()
def callback(self):
tool_input = self.model_dump()
try:
return tool.run(tool_input)
except TypeError:
if len(tool_input) == 1:
return tool.run(list(tool_input.values())[0])
else:
raise TypeError(f"Error parsing input for tool '{tool.__class__.__name__}' Please open an issue "
f"on github.")
return ToolFactory.from_openai_schema(
format_tool_to_openai_function(tool),
callback
)
@staticmethod
def from_openai_schema(schema: Dict[str, Any], callback: Any):
"""
Converts an OpenAI schema into a BaseTool. Nested propoerties without refs are not supported yet.
:param schema:
:param callback:
:return:
"""
def resolve_ref(ref: str, defs: Dict[str, Any]) -> Any:
# Extract the key from the reference
key = ref.split('/')[-1]
if key in defs:
return defs[key]
else:
raise ValueError(f"Reference '{ref}' not found in definitions")
def create_fields(schema: Dict[str, Any], type_mapping: Dict[str, Type[Any]], required_fields: List[str],
defs: Dict[str, Any]) -> Dict[str, Any]:
fields = {}
for prop, details in schema.items():
alias = None
if prop.startswith('_'):
alias = prop
prop = prop.lstrip('_')
json_type = details['type']
if json_type in type_mapping:
field_type = type_mapping[json_type]
field_description = details.get('description', '')
is_required = prop in required_fields
field_default = ... if is_required else None
if json_type == 'array':
items_schema = details.get('items', {})
if 'type' in items_schema:
item_type = type_mapping[items_schema['type']]
field_type = List[item_type]
elif 'properties' in items_schema: # Handling direct nested object in array
nested_properties = items_schema['properties']
nested_required = items_schema.get('required', [])
nested_model_name = items_schema.get('title', f"{prop}Item")
nested_fields = create_fields(nested_properties, type_mapping, nested_required, defs)
nested_model = create_model(nested_model_name, **nested_fields)
field_type = List[nested_model]
elif '$ref' in items_schema:
ref_model = resolve_ref(items_schema['$ref'], defs)
field_type = List[ref_model]
else:
raise ValueError("Array items must have a 'type', 'properties', or '$ref'")
elif json_type == 'object':
if 'properties' in details:
nested_properties = details['properties']
nested_required = details.get('required', [])
nested_model_name = details.get('title', f"{prop}Model")
nested_fields = create_fields(nested_properties, type_mapping, nested_required, defs)
field_type = create_model(nested_model_name, **nested_fields)
elif '$ref' in details:
ref_model = resolve_ref(details['$ref'], defs)
field_type = ref_model
else:
raise ValueError("Object must have 'properties' or '$ref'")
fields[prop] = (
field_type, Field(default=field_default, description=field_description, alias=alias))
else:
raise ValueError(f"Unsupported type '{json_type}' for property '{prop}'")
return fields
type_mapping = {
'string': str,
'integer': int,
'number': float,
'boolean': bool,
'array': List,
'object': dict,
'null': type(None),
}
schema = reference_schema(schema)
name = schema['name']
description = schema['description']
properties = schema['parameters']['properties']
required_fields = schema['parameters'].get('required', [])
# Add definitions ($defs) to type_mapping
defs = {k: create_model(k, **create_fields(v['properties'], type_mapping, v.get('required', []), {})) for k, v
in schema['parameters'].get('$defs', {}).items()}
type_mapping.update(defs)
fields = create_fields(properties, type_mapping, required_fields, defs)
# Dynamically creating the Pydantic model
model = create_model(name, **fields)
|
class ToolFactory:
@staticmethod
def from_langchain_tools(tools: List):
"""
Converts a list of langchain tools into a list of BaseTools.
:param tools: A list of langchain tools.
:return: A list of BaseTools.
"""
converted_tools = []
for tool in tools:
converted_tools.append(ToolFactory.from_langchain_tool(tool))
return converted_tools
@staticmethod
def from_langchain_tool(tool):
"""
Converts a langchain tool into a BaseTool.
:param tool: A langchain tool.
:return: A BaseTool.
"""
try:
except ImportError:
raise ImportError("You must install langchain to use this method.")
if inspect.isclass(tool):
tool = tool()
def callback(self):
tool_input = self.model_dump()
try:
return tool.run(tool_input)
except TypeError:
if len(tool_input) == 1:
return tool.run(list(tool_input.values())[0])
else:
raise TypeError(f"Error parsing input for tool '{tool.__class__.__name__}' Please open an issue "
f"on github.")
return ToolFactory.from_openai_schema(
format_tool_to_openai_function(tool),
callback
)
@staticmethod
def from_openai_schema(schema: Dict[str, Any], callback: Any):
"""
Converts an OpenAI schema into a BaseTool. Nested propoerties without refs are not supported yet.
:param schema:
:param callback:
:return:
"""
def resolve_ref(ref: str, defs: Dict[str, Any]) -> Any:
# Extract the key from the reference
key = ref.split('/')[-1]
if key in defs:
return defs[key]
else:
raise ValueError(f"Reference '{ref}' not found in definitions")
def create_fields(schema: Dict[str, Any], type_mapping: Dict[str, Type[Any]], required_fields: List[str],
defs: Dict[str, Any]) -> Dict[str, Any]:
fields = {}
for prop, details in schema.items():
alias = None
if prop.startswith('_'):
alias = prop
prop = prop.lstrip('_')
json_type = details['type']
if json_type in type_mapping:
field_type = type_mapping[json_type]
field_description = details.get('description', '')
is_required = prop in required_fields
field_default = ... if is_required else None
if json_type == 'array':
items_schema = details.get('items', {})
if 'type' in items_schema:
item_type = type_mapping[items_schema['type']]
field_type = List[item_type]
elif 'properties' in items_schema: # Handling direct nested object in array
nested_properties = items_schema['properties']
nested_required = items_schema.get('required', [])
nested_model_name = items_schema.get('title', f"{prop}Item")
nested_fields = create_fields(nested_properties, type_mapping, nested_required, defs)
nested_model = create_model(nested_model_name, **nested_fields)
field_type = List[nested_model]
elif '$ref' in items_schema:
ref_model = resolve_ref(items_schema['$ref'], defs)
field_type = List[ref_model]
else:
raise ValueError("Array items must have a 'type', 'properties', or '$ref'")
elif json_type == 'object':
if 'properties' in details:
nested_properties = details['properties']
nested_required = details.get('required', [])
nested_model_name = details.get('title', f"{prop}Model")
nested_fields = create_fields(nested_properties, type_mapping, nested_required, defs)
field_type = create_model(nested_model_name, **nested_fields)
elif '$ref' in details:
ref_model = resolve_ref(details['$ref'], defs)
field_type = ref_model
else:
raise ValueError("Object must have 'properties' or '$ref'")
fields[prop] = (
field_type, Field(default=field_default, description=field_description, alias=alias))
else:
raise ValueError(f"Unsupported type '{json_type}' for property '{prop}'")
return fields
type_mapping = {
'string': str,
'integer': int,
'number': float,
'boolean': bool,
'array': List,
'object': dict,
'null': type(None),
}
schema = reference_schema(schema)
name = schema['name']
description = schema['description']
properties = schema['parameters']['properties']
required_fields = schema['parameters'].get('required', [])
# Add definitions ($defs) to type_mapping
defs = {k: create_model(k, **create_fields(v['properties'], type_mapping, v.get('required', []), {})) for k, v
in schema['parameters'].get('$defs', {}).items()}
type_mapping.update(defs)
fields = create_fields(properties, type_mapping, required_fields, defs)
# Dynamically creating the Pydantic model
model = create_model(name, **fields)
| tool = type(name, (BaseTool, model), { | 0 | 2023-12-14 01:40:32+00:00 | 2k |
Deltares/imod-python | imod/tests/test_flow/test_flow_dis.py | [
{
"identifier": "TimeDiscretization",
"path": "imod/flow/dis.py",
"snippet": "class TimeDiscretization(Package):\n \"\"\"\n Time discretisation package class.\n\n Parameters\n ----------\n timestep_duration: xr.DataArray\n is the length of the current stress period (PERLEN). If the... | import cftime
import numpy as np
import pytest
import xarray as xr
from imod.flow import TimeDiscretization
from imod.wq import timeutil | 973 |
@pytest.fixture(scope="module")
def time_discretization(three_days):
times = three_days
|
@pytest.fixture(scope="module")
def time_discretization(three_days):
times = three_days | duration = timeutil.timestep_duration(times, False) | 1 | 2023-12-08 13:57:59+00:00 | 2k |
Dong142857/Live3DPortrait | models/eg3d/volumetric_rendering/renderer.py | [
{
"identifier": "MipRayMarcher2",
"path": "models/eg3d/volumetric_rendering/ray_marcher.py",
"snippet": "class MipRayMarcher2(nn.Module):\n def __init__(self):\n super().__init__()\n\n\n def run_forward(self, colors, densities, depths, rendering_options):\n deltas = depths[:, :, 1:] ... | import math
import torch
import torch.nn as nn
from models.eg3d.volumetric_rendering.ray_marcher import MipRayMarcher2
from models.eg3d.volumetric_rendering import math_utils | 1,563 | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""
The renderer is a module that takes in rays, decides where to sample along each
ray, and computes pixel colors using the volume rendering equation.
"""
def generate_planes():
"""
Defines planes by the three vectors that form the "axes" of the
plane. Should work with arbitrary number of planes and planes of
arbitrary orientation.
"""
return torch.tensor([[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
[[1, 0, 0],
[0, 0, 1],
[0, 1, 0]],
[[0, 0, 1],
[1, 0, 0],
[0, 1, 0]]], dtype=torch.float32)
def project_onto_planes(planes, coordinates):
"""
Does a projection of a 3D point onto a batch of 2D planes,
returning 2D plane coordinates.
Takes plane axes of shape n_planes, 3, 3
# Takes coordinates of shape N, M, 3
# returns projections of shape N*n_planes, M, 2
"""
N, M, C = coordinates.shape
n_planes, _, _ = planes.shape
coordinates = coordinates.unsqueeze(1).expand(-1, n_planes, -1, -1).reshape(N*n_planes, M, 3)
inv_planes = torch.linalg.inv(planes).unsqueeze(0).expand(N, -1, -1, -1).reshape(N*n_planes, 3, 3)
projections = torch.bmm(coordinates, inv_planes)
return projections[..., :2]
def sample_from_planes(plane_axes, plane_features, coordinates, mode='bilinear', padding_mode='zeros', box_warp=None):
assert padding_mode == 'zeros'
N, n_planes, C, H, W = plane_features.shape
_, M, _ = coordinates.shape
plane_features = plane_features.view(N*n_planes, C, H, W)
coordinates = (2/box_warp) * coordinates # TODO: add specific box bounds
projected_coordinates = project_onto_planes(plane_axes, coordinates).unsqueeze(1)
output_features = torch.nn.functional.grid_sample(plane_features, projected_coordinates.float(), mode=mode, padding_mode=padding_mode, align_corners=False).permute(0, 3, 2, 1).reshape(N, n_planes, M, C)
return output_features
def sample_from_3dgrid(grid, coordinates):
"""
Expects coordinates in shape (batch_size, num_points_per_batch, 3)
Expects grid in shape (1, channels, H, W, D)
(Also works if grid has batch size)
Returns sampled features of shape (batch_size, num_points_per_batch, feature_channels)
"""
batch_size, n_coords, n_dims = coordinates.shape
sampled_features = torch.nn.functional.grid_sample(grid.expand(batch_size, -1, -1, -1, -1),
coordinates.reshape(batch_size, 1, 1, -1, n_dims),
mode='bilinear', padding_mode='zeros', align_corners=False)
N, C, H, W, D = sampled_features.shape
sampled_features = sampled_features.permute(0, 4, 3, 2, 1).reshape(N, H*W*D, C)
return sampled_features
class ImportanceRenderer(torch.nn.Module):
def __init__(self):
super().__init__()
| # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""
The renderer is a module that takes in rays, decides where to sample along each
ray, and computes pixel colors using the volume rendering equation.
"""
def generate_planes():
"""
Defines planes by the three vectors that form the "axes" of the
plane. Should work with arbitrary number of planes and planes of
arbitrary orientation.
"""
return torch.tensor([[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
[[1, 0, 0],
[0, 0, 1],
[0, 1, 0]],
[[0, 0, 1],
[1, 0, 0],
[0, 1, 0]]], dtype=torch.float32)
def project_onto_planes(planes, coordinates):
"""
Does a projection of a 3D point onto a batch of 2D planes,
returning 2D plane coordinates.
Takes plane axes of shape n_planes, 3, 3
# Takes coordinates of shape N, M, 3
# returns projections of shape N*n_planes, M, 2
"""
N, M, C = coordinates.shape
n_planes, _, _ = planes.shape
coordinates = coordinates.unsqueeze(1).expand(-1, n_planes, -1, -1).reshape(N*n_planes, M, 3)
inv_planes = torch.linalg.inv(planes).unsqueeze(0).expand(N, -1, -1, -1).reshape(N*n_planes, 3, 3)
projections = torch.bmm(coordinates, inv_planes)
return projections[..., :2]
def sample_from_planes(plane_axes, plane_features, coordinates, mode='bilinear', padding_mode='zeros', box_warp=None):
assert padding_mode == 'zeros'
N, n_planes, C, H, W = plane_features.shape
_, M, _ = coordinates.shape
plane_features = plane_features.view(N*n_planes, C, H, W)
coordinates = (2/box_warp) * coordinates # TODO: add specific box bounds
projected_coordinates = project_onto_planes(plane_axes, coordinates).unsqueeze(1)
output_features = torch.nn.functional.grid_sample(plane_features, projected_coordinates.float(), mode=mode, padding_mode=padding_mode, align_corners=False).permute(0, 3, 2, 1).reshape(N, n_planes, M, C)
return output_features
def sample_from_3dgrid(grid, coordinates):
"""
Expects coordinates in shape (batch_size, num_points_per_batch, 3)
Expects grid in shape (1, channels, H, W, D)
(Also works if grid has batch size)
Returns sampled features of shape (batch_size, num_points_per_batch, feature_channels)
"""
batch_size, n_coords, n_dims = coordinates.shape
sampled_features = torch.nn.functional.grid_sample(grid.expand(batch_size, -1, -1, -1, -1),
coordinates.reshape(batch_size, 1, 1, -1, n_dims),
mode='bilinear', padding_mode='zeros', align_corners=False)
N, C, H, W, D = sampled_features.shape
sampled_features = sampled_features.permute(0, 4, 3, 2, 1).reshape(N, H*W*D, C)
return sampled_features
class ImportanceRenderer(torch.nn.Module):
def __init__(self):
super().__init__() | self.ray_marcher = MipRayMarcher2() | 0 | 2023-12-09 15:18:53+00:00 | 2k |
lumi-ua/goit-project2-django-assistant | personal_assistant/app_contacts/views.py | [
{
"identifier": "ContactForm",
"path": "personal_assistant/app_contacts/forms.py",
"snippet": "class ContactForm(ModelForm):\n fullname = CharField(max_length=255, \n widget=forms.TextInput(attrs={'placeholder': 'Name Lastname', \"class\": \"form-control\"}))\n address = CharField(max_lengt... | from datetime import date
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.db.models import Q
from django.urls import reverse_lazy
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator
from datetime import date, timedelta
from .forms import ContactForm, PhoneNumberForm, EmailAddressForm
from .models import Contact, PhoneNumber, EmailAddress | 682 | # from django.db.models import Q
# Create your views here.
@login_required
def dashboard(request):
return render(request, 'app_contacts/dashboard.html', {"title": "Dashboard contact operations"})
@login_required
def contact(request):
contact_form = ContactForm()
| # from django.db.models import Q
# Create your views here.
@login_required
def dashboard(request):
return render(request, 'app_contacts/dashboard.html', {"title": "Dashboard contact operations"})
@login_required
def contact(request):
contact_form = ContactForm() | phone_number_form = PhoneNumberForm() | 1 | 2023-12-08 17:26:59+00:00 | 2k |
SubConv/SubConv | modules/convert/converter.py | [
{
"identifier": "RandUserAgent",
"path": "modules/convert/util.py",
"snippet": "def RandUserAgent() -> str:\n return userAgents[random.randint(0, len(userAgents) - 1)]"
},
{
"identifier": "get",
"path": "modules/convert/util.py",
"snippet": "def get(content):\n if content is None:\... | from modules.convert.util import RandUserAgent
from modules.convert.util import get
from modules.convert.util import uniqueName
from modules.convert.util import urlSafe
from modules.convert.util import base64RawStdDecode
from modules.convert.util import base64RawURLDecode
from modules.convert.v import handleVShareLink
import json
import base64
import urllib.parse as urlparse
import distutils.util | 1,548 |
async def ConvertsV2Ray(buf):
try:
data = base64.b64decode(buf).decode("utf-8")
except:
try:
data = buf.decode("utf-8")
except:
data = buf
arr = data.splitlines()
proxies = []
names = {}
for line in arr:
if line == "":
continue
if -1 == line.find("://"):
continue
else:
scheme, body = line.split("://", 1)
scheme = scheme.lower()
if scheme == "hysteria":
try:
urlHysteria = urlparse.urlparse(line)
except:
continue
query = dict(urlparse.parse_qsl(urlHysteria.query))
name = uniqueName(names, urlparse.unquote(urlHysteria.fragment))
hysteria = {}
hysteria["name"] = name
hysteria["type"] = scheme
hysteria["server"] = urlHysteria.hostname
hysteria["port"] = urlHysteria.port
|
async def ConvertsV2Ray(buf):
try:
data = base64.b64decode(buf).decode("utf-8")
except:
try:
data = buf.decode("utf-8")
except:
data = buf
arr = data.splitlines()
proxies = []
names = {}
for line in arr:
if line == "":
continue
if -1 == line.find("://"):
continue
else:
scheme, body = line.split("://", 1)
scheme = scheme.lower()
if scheme == "hysteria":
try:
urlHysteria = urlparse.urlparse(line)
except:
continue
query = dict(urlparse.parse_qsl(urlHysteria.query))
name = uniqueName(names, urlparse.unquote(urlHysteria.fragment))
hysteria = {}
hysteria["name"] = name
hysteria["type"] = scheme
hysteria["server"] = urlHysteria.hostname
hysteria["port"] = urlHysteria.port | hysteria["sni"] = query.get("peer") | 1 | 2023-12-06 12:57:11+00:00 | 2k |
Opt-Mucca/PySCIPOpt-ML | src/pyscipopt_ml/add_predictor.py | [
{
"identifier": "NotRegistered",
"path": "src/pyscipopt_ml/exceptions.py",
"snippet": "class NotRegistered(Exception):\n \"\"\"Predictor is not supported by pyscipopt-ml.\"\"\"\n\n def __init__(self, predictor):\n super().__init__(\n f\"Object of type {predictor} is not registere... | from warnings import warn
from .exceptions import NotRegistered
from .modelling.get_convertor import get_convertor
from .registered_predictors import registered_predictors | 820 |
def add_predictor_constr(
scip_model, predictor, input_vars, output_vars=None, unique_naming_prefix="p_", **kwargs
):
"""Formulate predictor in PySCIPOpt model.
The formulation predicts the values of output_vars using input_vars according to
predictor.
Parameters
----------
scip_model : PySCIPOpt Model
The pyscipopt model where the predictor should be inserted.
predictor:
The predictor to insert.
input_vars : list or np.ndarray
Decision variables used as input for predictor in scip_model.
output_vars : list or np.ndarray, optional
Decision variables used as output for predictor in scip_model.
unique_naming_prefix : str, optional
A unique naming prefix that is used before all variable and constraint names. This parameter is important if
the SCIP model is later printed to file and many predictors are added to the same SCIP model.
Returns
-------
AbstractPredictorConstr
Object containing information about what was added to scip_model to insert the
predictor in it
Note
----
The parameters `input_vars` and `output_vars` can be either
* Lists of variables (List of lists etc. for higher dimensional input)
* np.ndarray of variables
For internal use in the package they are cast into a np.ndarray of variables
They should have dimensions that conform with the input/output of the predictor.
We denote by `n_samples` the number of samples (or objects) that we want to predict with our predictor.
We denote by `n_features` the dimension of the input of the predictor.
We denote by `n_output` the dimension of the output.
The `input_vars` are therefore of shape `(n_samples, n_features)` and the `output_vars` of
shape `(n_samples, n_outputs)`. In the case of `output_vars` not being passed, appropriate variables will
be automatically created.
In the case of `n_samples == 1` the first dimension can simply be removed from the input.
"""
convertors = registered_predictors()
|
def add_predictor_constr(
scip_model, predictor, input_vars, output_vars=None, unique_naming_prefix="p_", **kwargs
):
"""Formulate predictor in PySCIPOpt model.
The formulation predicts the values of output_vars using input_vars according to
predictor.
Parameters
----------
scip_model : PySCIPOpt Model
The pyscipopt model where the predictor should be inserted.
predictor:
The predictor to insert.
input_vars : list or np.ndarray
Decision variables used as input for predictor in scip_model.
output_vars : list or np.ndarray, optional
Decision variables used as output for predictor in scip_model.
unique_naming_prefix : str, optional
A unique naming prefix that is used before all variable and constraint names. This parameter is important if
the SCIP model is later printed to file and many predictors are added to the same SCIP model.
Returns
-------
AbstractPredictorConstr
Object containing information about what was added to scip_model to insert the
predictor in it
Note
----
The parameters `input_vars` and `output_vars` can be either
* Lists of variables (List of lists etc. for higher dimensional input)
* np.ndarray of variables
For internal use in the package they are cast into a np.ndarray of variables
They should have dimensions that conform with the input/output of the predictor.
We denote by `n_samples` the number of samples (or objects) that we want to predict with our predictor.
We denote by `n_features` the dimension of the input of the predictor.
We denote by `n_output` the dimension of the output.
The `input_vars` are therefore of shape `(n_samples, n_features)` and the `output_vars` of
shape `(n_samples, n_outputs)`. In the case of `output_vars` not being passed, appropriate variables will
be automatically created.
In the case of `n_samples == 1` the first dimension can simply be removed from the input.
"""
convertors = registered_predictors() | convertor = get_convertor(predictor, convertors) | 1 | 2023-12-10 20:28:22+00:00 | 2k |
DongqiShen/qwen-fast | generate.py | [
{
"identifier": "Transformer",
"path": "model.py",
"snippet": "class Transformer(nn.Module):\n def __init__(self, config: ModelArgs) -> None:\n super().__init__()\n self.config = config\n\n self.tok_embeddings = nn.Embedding(config.vocab_size, config.dim)\n self.layers = n... | import sys
import time
import itertools
import torch
import torch._inductor.config
import torch._dynamo.config
import contextlib
import argparse
from pathlib import Path
from typing import Optional, Tuple
from model import Transformer
from tp import maybe_init_dist
from sentencepiece import SentencePieceProcessor
from quantize import WeightOnlyInt8QuantHandler
from quantize import WeightOnlyInt4QuantHandler
from tp import apply_tp | 1,107 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
torch._inductor.config.coordinate_descent_tuning = True
torch._inductor.config.triton.unique_kernel_names = True
torch._inductor.config.fx_graph_cache = True # Experimental feature to reduce compilation times, will be on by default in future
# support running without installing as a package
wd = Path(__file__).parent.parent.resolve()
sys.path.append(str(wd))
def multinomial_sample_one_no_sync(probs_sort): # Does multinomial sampling without a cuda synchronization
q = torch.empty_like(probs_sort).exponential_(1)
return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
def logits_to_probs(logits, temperature: float = 1.0, top_k: Optional[int] = None):
logits = logits / max(temperature, 1e-5)
if top_k is not None:
v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
pivot = v.select(-1, -1).unsqueeze(-1)
logits = torch.where(logits < pivot, -float("Inf"), logits)
probs = torch.nn.functional.softmax(logits, dim=-1)
return probs
def sample(logits, temperature: float = 1.0, top_k: Optional[int] = None):
probs = logits_to_probs(logits[0, -1], temperature, top_k)
idx_next = multinomial_sample_one_no_sync(probs)
return idx_next, probs
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
torch._inductor.config.coordinate_descent_tuning = True
torch._inductor.config.triton.unique_kernel_names = True
torch._inductor.config.fx_graph_cache = True # Experimental feature to reduce compilation times, will be on by default in future
# support running without installing as a package
wd = Path(__file__).parent.parent.resolve()
sys.path.append(str(wd))
def multinomial_sample_one_no_sync(probs_sort): # Does multinomial sampling without a cuda synchronization
q = torch.empty_like(probs_sort).exponential_(1)
return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int)
def logits_to_probs(logits, temperature: float = 1.0, top_k: Optional[int] = None):
logits = logits / max(temperature, 1e-5)
if top_k is not None:
v, _ = torch.topk(logits, min(top_k, logits.size(-1)))
pivot = v.select(-1, -1).unsqueeze(-1)
logits = torch.where(logits < pivot, -float("Inf"), logits)
probs = torch.nn.functional.softmax(logits, dim=-1)
return probs
def sample(logits, temperature: float = 1.0, top_k: Optional[int] = None):
probs = logits_to_probs(logits[0, -1], temperature, top_k)
idx_next = multinomial_sample_one_no_sync(probs)
return idx_next, probs
| def prefill(model: Transformer, x: torch.Tensor, input_pos: torch.Tensor, **sampling_kwargs) -> torch.Tensor: | 0 | 2023-12-05 14:07:19+00:00 | 2k |
Yanyutin753/CowAndPandoraNext | channel/chat_channel.py | [
{
"identifier": "Channel",
"path": "channel/channel.py",
"snippet": "class Channel(object):\n NOT_SUPPORT_REPLYTYPE = [ReplyType.VOICE, ReplyType.IMAGE]\n\n def startup(self):\n \"\"\"\n init channel\n \"\"\"\n raise NotImplementedError\n\n def handle_text(self, msg)... | import os
import re
import threading
import time
from asyncio import CancelledError
from concurrent.futures import Future, ThreadPoolExecutor
from bridge.context import *
from bridge.reply import *
from channel.channel import Channel
from common.dequeue import Dequeue
from common.log import logger
from config import conf
from plugins import *
from voice.audio_convert import any_to_wav | 1,113 |
try:
except Exception as e:
pass
# 抽象类, 它包含了与消息通道无关的通用处理逻辑
class ChatChannel(Channel):
name = None # 登录的用户名
user_id = None # 登录的用户id
futures = {} # 记录每个session_id提交到线程池的future对象, 用于重置会话时把没执行的future取消掉,正在执行的不会被取消
sessions = {} # 用于控制并发,每个session_id同时只能有一个context在处理
lock = threading.Lock() # 用于控制对sessions的访问
handler_pool = ThreadPoolExecutor(max_workers=8) # 处理消息的线程池
def __init__(self):
_thread = threading.Thread(target=self.consume)
_thread.setDaemon(True)
_thread.start()
# 根据消息构造context,消息内容相关的触发项写在这里
def _compose_context(self, ctype: ContextType, content, **kwargs):
context = Context(ctype, content)
context.kwargs = kwargs
# context首次传入时,origin_ctype是None,
# 引入的起因是:当输入语音时,会嵌套生成两个context,第一步语音转文本,第二步通过文本生成文字回复。
# origin_ctype用于第二步文本回复时,判断是否需要匹配前缀,如果是私聊的语音,就不需要匹配前缀
if "origin_ctype" not in context:
context["origin_ctype"] = ctype
# context首次传入时,receiver是None,根据类型设置receiver
first_in = "receiver" not in context
# 群名匹配过程,设置session_id和receiver
if first_in: # context首次传入时,receiver是None,根据类型设置receiver
|
try:
except Exception as e:
pass
# 抽象类, 它包含了与消息通道无关的通用处理逻辑
class ChatChannel(Channel):
name = None # 登录的用户名
user_id = None # 登录的用户id
futures = {} # 记录每个session_id提交到线程池的future对象, 用于重置会话时把没执行的future取消掉,正在执行的不会被取消
sessions = {} # 用于控制并发,每个session_id同时只能有一个context在处理
lock = threading.Lock() # 用于控制对sessions的访问
handler_pool = ThreadPoolExecutor(max_workers=8) # 处理消息的线程池
def __init__(self):
_thread = threading.Thread(target=self.consume)
_thread.setDaemon(True)
_thread.start()
# 根据消息构造context,消息内容相关的触发项写在这里
def _compose_context(self, ctype: ContextType, content, **kwargs):
context = Context(ctype, content)
context.kwargs = kwargs
# context首次传入时,origin_ctype是None,
# 引入的起因是:当输入语音时,会嵌套生成两个context,第一步语音转文本,第二步通过文本生成文字回复。
# origin_ctype用于第二步文本回复时,判断是否需要匹配前缀,如果是私聊的语音,就不需要匹配前缀
if "origin_ctype" not in context:
context["origin_ctype"] = ctype
# context首次传入时,receiver是None,根据类型设置receiver
first_in = "receiver" not in context
# 群名匹配过程,设置session_id和receiver
if first_in: # context首次传入时,receiver是None,根据类型设置receiver | config = conf() | 3 | 2023-12-14 15:21:17+00:00 | 2k |
nerdslab/bams | bams/models/bams.py | [
{
"identifier": "MLP",
"path": "bams/models/mlp.py",
"snippet": "class MLP(nn.Module):\n r\"\"\"Flexible Multi-layer perceptron model, with optional batchnorm layers.\n\n Args:\n hidden_layers (list): List of layer dimensions, from input layer to output\n layer. If first input si... | from collections import OrderedDict
from bams.models import TemporalConvNet, MLP
import torch
import torch.nn as nn | 1,395 |
class BAMS(nn.Module):
r"""BAMS model.
Args:
input_size (int): Number of input features.
predictor (dict): Parameters for the predictor MLP.
encoders (dict[dict]): A dictionnary of encoders, where each key is the name of
the encoder, and each value is a dictionnary of parameters for the encoder.
Each encoder is a TemporalConvNet.
"""
def __init__(
self,
input_size,
*,
predictor=None,
**encoder_kwargs,
):
super().__init__()
self.input_size = input_size
self.representation_size = 0
encoders = dict()
for name, tcn_kwargs in encoder_kwargs.items():
assert "num_inputs" not in tcn_kwargs
encoders[name] = TemporalConvNet(num_inputs=input_size, **tcn_kwargs)
self.representation_size += tcn_kwargs["num_channels"][-1]
self.encoders = torch.nn.ModuleDict(encoders)
# hoa predictor (first layer is a lazy linear layer)
|
class BAMS(nn.Module):
r"""BAMS model.
Args:
input_size (int): Number of input features.
predictor (dict): Parameters for the predictor MLP.
encoders (dict[dict]): A dictionnary of encoders, where each key is the name of
the encoder, and each value is a dictionnary of parameters for the encoder.
Each encoder is a TemporalConvNet.
"""
def __init__(
self,
input_size,
*,
predictor=None,
**encoder_kwargs,
):
super().__init__()
self.input_size = input_size
self.representation_size = 0
encoders = dict()
for name, tcn_kwargs in encoder_kwargs.items():
assert "num_inputs" not in tcn_kwargs
encoders[name] = TemporalConvNet(num_inputs=input_size, **tcn_kwargs)
self.representation_size += tcn_kwargs["num_channels"][-1]
self.encoders = torch.nn.ModuleDict(encoders)
# hoa predictor (first layer is a lazy linear layer) | self.predictor = MLP(**predictor) | 0 | 2023-12-05 16:26:57+00:00 | 2k |
FF14CN/Sarean-arsenal | Utility/sqMall/sqMallDoSign.py | [
{
"identifier": "Daoyu",
"path": "Utility/sdoLogin/Daoyu.py",
"snippet": "def dykey_encrypt(self):\ndef config_handler():\ndef initialize():\ndef get_guid(device_id, manuid):\ndef get_flowid(manuid, deviceid, sessionid, show_username):\ndef get_account_id_list(flowid, deviceid, manuid, sessionid, show_u... | from Utility.sdoLogin import Daoyu
from Utility.sqMall.daoyuBuildinMallSign import daoyumall_sign
from Utility.sqMall.daoyuBuildinMallBalance import daoyu_mall_balance
import Utility.Notifications.push as pusher | 1,368 | """
Author: KuliPoi
Contact: me@pipirapira.com
Created: 2023-12-21
File: sqMailDoSign.py
Version: 2.5.0
Description: Do SQMALL AUTO SIGN, FUCK SQ BY THE WAY
"""
def main():
| """
Author: KuliPoi
Contact: me@pipirapira.com
Created: 2023-12-21
File: sqMailDoSign.py
Version: 2.5.0
Description: Do SQMALL AUTO SIGN, FUCK SQ BY THE WAY
"""
def main(): | if Daoyu.initialize(): | 0 | 2023-12-06 08:48:02+00:00 | 2k |
janmartchouk/vidgen | src/content_getter.py | [
{
"identifier": "SUBREDDITS",
"path": "config/dicts.py",
"snippet": "SUBREDDITS = {\n 'tifu': 'rss',\n 'confession': 'rss',\n 'relationship_advice': 'web',\n 'amitheasshole': 'rss'\n}"
},
{
"identifier": "setup_logger",
"path": "utils/logger.py",
"snippet": "def setup_logger(... | import feedparser
import logging
import time
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
from config.dicts import SUBREDDITS
from utils.logger import setup_logger
from models.post import Post | 1,182 |
class ContentGetter:
def __init__(self, loglevel = logging.INFO):
self.logger = setup_logger(__name__, loglevel, emoji='🌍')
# Get a list of Reddit Posts from an RSS feed
def from_subreddit(self, subreddit):
if not subreddit in SUBREDDITS:
self.logger.error(f"{subreddit} is not configured")
exit(1)
if SUBREDDITS[subreddit] == 'rss':
return self.from_rss_subreddit(subreddit)
elif SUBREDDITS[subreddit] == 'web':
return self.from_web(subreddit)
else:
self.logger.error(f"{subreddit} is not configured properly")
exit(1)
def from_rss_subreddit(self, subreddit):
data = feedparser.parse(f'https://reddit.com/r/{subreddit}/top.rss')
posts = []
failed_number = 0
if data.entries:
try:
for entry in data.entries:
paragraphs = BeautifulSoup(entry.content[0].value, 'html.parser').find_all('p')
content = ''.join([p.get_text() for p in paragraphs])
|
class ContentGetter:
def __init__(self, loglevel = logging.INFO):
self.logger = setup_logger(__name__, loglevel, emoji='🌍')
# Get a list of Reddit Posts from an RSS feed
def from_subreddit(self, subreddit):
if not subreddit in SUBREDDITS:
self.logger.error(f"{subreddit} is not configured")
exit(1)
if SUBREDDITS[subreddit] == 'rss':
return self.from_rss_subreddit(subreddit)
elif SUBREDDITS[subreddit] == 'web':
return self.from_web(subreddit)
else:
self.logger.error(f"{subreddit} is not configured properly")
exit(1)
def from_rss_subreddit(self, subreddit):
data = feedparser.parse(f'https://reddit.com/r/{subreddit}/top.rss')
posts = []
failed_number = 0
if data.entries:
try:
for entry in data.entries:
paragraphs = BeautifulSoup(entry.content[0].value, 'html.parser').find_all('p')
content = ''.join([p.get_text() for p in paragraphs]) | post_obj = Post( | 2 | 2023-12-14 13:00:22+00:00 | 2k |
asdfghjil/XMUCourseCheckin | checkin.py | [
{
"identifier": "getCheckinList",
"path": "checkinList.py",
"snippet": "def getCheckinList(session, http_header, userInfo, today=True):\n try:\n url = serverUrl + \"/getQdKbList\"\n data = {\n 'sign': userInfo['sign'],\n 'userType': userInfo['userType'],\n ... | import json
import requests
import sys
import time
import random
from checkinList import getCheckinList, printCheckinList | 1,515 |
serverUrl = "https://tingke.xmu.edu.cn/app"
def getCheckinInfo(session, http_header, userInfo, lesson):
try:
url = serverUrl + "/getXsQdInfo"
data = {
'sign': userInfo['sign'],
'unitCode': userInfo['unitCode'],
'userCode': userInfo['userCode'],
'userName': userInfo['userName'],
'xkKh': lesson['xkKh'],
'qdRq': lesson['qdRq'],
'xqj': lesson['xqj'],
'djj': lesson['djj'],
'djz': lesson['djz'],
'qdId': lesson['qdId'],
'isFz': lesson['isFz'],
'fzMc': lesson['fzMc']
}
res = session.post(url, data=data, headers=http_header)
if res.status_code != 200:
raise Exception('get Checkin info failed')
res = json.loads(res.text)
return res['Rows']
except:
print(json.dumps({
"status": "failed",
"reason": "Get checkin info failed"
}, indent=4))
raise
def checkin(session, http_header, userInfo, lesson, tips=True):
checkinInfo = getCheckinInfo(session, http_header, userInfo, lesson)
print('签到口令:', checkinInfo['klHm'])
# print(lesson['xsQdQk'], lesson['skXs'], lesson['bqMode'], lesson['qdNum'])
if tips:
if lesson['xsQdQk'] != '0' and lesson['skXs'] == '2' and (lesson['bqMode'] != '2' or lesson['qdNum'] != 1):
choice = input('您似乎已经线下签到过了,是否继续签到?(y/n)')
if choice != 'y':
return
if input('是否进行自动签到?(y/n)') != 'y':
return
try:
url = serverUrl + "/saveXsQdInfo"
data = {
'sign': userInfo['sign'],
'unitCode': userInfo['unitCode'],
'userCode': userInfo['userCode'],
'userName': userInfo['userName'],
'bjMc': userInfo['bj'],
'zyMc': userInfo['zy'],
'xyMc': userInfo['xy'],
'wzJd': str(float(checkinInfo['wzJd']) + (random.random() - 0.5) * 2 * 0.0001),
'wzWd': str(float(checkinInfo['wzWd']) + (random.random() - 0.5) * 2 * 0.0001),
'qdId': checkinInfo['uniqueCode'],
'xkKh': checkinInfo['xkKh'],
'skDd': lesson['skDd'],
'xqj': lesson['xqj'],
'djj': lesson['djj'],
'djz': lesson['djz'],
'isFace': None,
# 'isFace': checkinInfo['xsIsFace'],
'wzAcc': 0,
'bqMode': lesson['bqMode'],
'isFz': checkinInfo['isFz'],
'fzMc': lesson['fzMc'],
'djc': lesson['djc'],
'qdJc': lesson['qdJc']
}
# print("**********")
res = session.post(url, data=data, headers=http_header).text
res = json.loads(res)
if res['status'] == 1:
print('签到成功!')
return True
elif res['status'] == 6:
print('签到异常提醒:', res['msg'])
return False
else:
print('签到失败!', res['msg'])
raise Exception('签到失败:' + res['msg'])
except:
print(json.dumps({
"status": "failed",
"reason": "Checkin failed"
}, indent=4))
return False
def courseCheckin(session, http_header, userInfo):
|
serverUrl = "https://tingke.xmu.edu.cn/app"
def getCheckinInfo(session, http_header, userInfo, lesson):
try:
url = serverUrl + "/getXsQdInfo"
data = {
'sign': userInfo['sign'],
'unitCode': userInfo['unitCode'],
'userCode': userInfo['userCode'],
'userName': userInfo['userName'],
'xkKh': lesson['xkKh'],
'qdRq': lesson['qdRq'],
'xqj': lesson['xqj'],
'djj': lesson['djj'],
'djz': lesson['djz'],
'qdId': lesson['qdId'],
'isFz': lesson['isFz'],
'fzMc': lesson['fzMc']
}
res = session.post(url, data=data, headers=http_header)
if res.status_code != 200:
raise Exception('get Checkin info failed')
res = json.loads(res.text)
return res['Rows']
except:
print(json.dumps({
"status": "failed",
"reason": "Get checkin info failed"
}, indent=4))
raise
def checkin(session, http_header, userInfo, lesson, tips=True):
checkinInfo = getCheckinInfo(session, http_header, userInfo, lesson)
print('签到口令:', checkinInfo['klHm'])
# print(lesson['xsQdQk'], lesson['skXs'], lesson['bqMode'], lesson['qdNum'])
if tips:
if lesson['xsQdQk'] != '0' and lesson['skXs'] == '2' and (lesson['bqMode'] != '2' or lesson['qdNum'] != 1):
choice = input('您似乎已经线下签到过了,是否继续签到?(y/n)')
if choice != 'y':
return
if input('是否进行自动签到?(y/n)') != 'y':
return
try:
url = serverUrl + "/saveXsQdInfo"
data = {
'sign': userInfo['sign'],
'unitCode': userInfo['unitCode'],
'userCode': userInfo['userCode'],
'userName': userInfo['userName'],
'bjMc': userInfo['bj'],
'zyMc': userInfo['zy'],
'xyMc': userInfo['xy'],
'wzJd': str(float(checkinInfo['wzJd']) + (random.random() - 0.5) * 2 * 0.0001),
'wzWd': str(float(checkinInfo['wzWd']) + (random.random() - 0.5) * 2 * 0.0001),
'qdId': checkinInfo['uniqueCode'],
'xkKh': checkinInfo['xkKh'],
'skDd': lesson['skDd'],
'xqj': lesson['xqj'],
'djj': lesson['djj'],
'djz': lesson['djz'],
'isFace': None,
# 'isFace': checkinInfo['xsIsFace'],
'wzAcc': 0,
'bqMode': lesson['bqMode'],
'isFz': checkinInfo['isFz'],
'fzMc': lesson['fzMc'],
'djc': lesson['djc'],
'qdJc': lesson['qdJc']
}
# print("**********")
res = session.post(url, data=data, headers=http_header).text
res = json.loads(res)
if res['status'] == 1:
print('签到成功!')
return True
elif res['status'] == 6:
print('签到异常提醒:', res['msg'])
return False
else:
print('签到失败!', res['msg'])
raise Exception('签到失败:' + res['msg'])
except:
print(json.dumps({
"status": "failed",
"reason": "Checkin failed"
}, indent=4))
return False
def courseCheckin(session, http_header, userInfo): | lesson = printCheckinList(session, http_header, userInfo, today=True) | 1 | 2023-12-13 10:42:20+00:00 | 2k |
Kanaries/kanaries-track | kanaries_track/client.py | [
{
"identifier": "config",
"path": "kanaries_track/config.py",
"snippet": "class Config:"
},
{
"identifier": "RequestClient",
"path": "kanaries_track/request.py",
"snippet": "class RequestClient:\n \"\"\"Client for sending events to kanaries-track server\"\"\"\n def __init__(\n ... | from typing import Dict, Any
from datetime import datetime
from threading import Thread
from functools import lru_cache
from dateutil.tz import tzlocal
from .config import config
from .request import RequestClient
import queue
import uuid
import logging
import time
import atexit | 1,388 | self.ruuning = False
def _upload(self):
"""Upload events"""
start_time = time.monotonic()
events = []
while len(events) < self.upload_size:
elapsed_seconds = time.monotonic() - start_time
if elapsed_seconds >= self.upload_interval_seconds:
break
try:
event = self.event_queue.get(block=True, timeout=self.upload_interval_seconds - elapsed_seconds)
events.append(event)
except queue.Empty:
break
except Exception as e:
logger.error("Failed to get event from queue: %s", str(e))
logger.debug("invoke uploading events, event count: %s", len(events))
if events:
self.request_client.track(events)
class Client:
"""Client for sending events to kanaries-track server"""
def __init__(
self,
*,
host: str,
auth_token: str,
debug: bool,
send: bool,
sync_send: bool,
max_queue_size: int,
timeout_seconds: int,
max_retries: int,
proxies: Dict[str, Any],
thread_count: int,
verify: bool,
upload_interval_seconds: int,
upload_size: int
):
self.host = host
self.auth_token = auth_token
self.debug = debug
self.send = send
self.sync_send = sync_send
self.max_queue_size = max_queue_size
self.timeout_seconds = timeout_seconds
self.max_retries = max_retries
self.proxies = proxies
self.thread_count = thread_count
self.verify = verify
self.upload_interval_seconds = upload_interval_seconds
self.upload_size = upload_size
self._consumers = []
self._request_client = RequestClient(
host=self.host,
auth_token=self.auth_token,
max_retries=self.max_retries,
timeout=self.timeout_seconds,
verify=self.verify,
proxy=self.proxies
)
self._event_queue = queue.Queue(self.max_queue_size)
if not self.sync_send and self.send:
for _ in range(self.thread_count):
consumer = _Consumer(
event_queue=self._event_queue,
request_client=self._request_client,
upload_size=self.upload_size,
upload_interval_seconds=self.upload_interval_seconds
)
consumer.start()
self._consumers.append(consumer)
atexit.register(self._end)
if self.debug:
logger.setLevel(logging.DEBUG)
def track(self, event: Dict[str, Any]):
"""Track an event"""
event = self._fill_data(event)
if not self.send:
return
if self.sync_send:
self._request_client.track([event])
else:
self._enqueue(event)
def _fill_data(self, event: Dict[str, Any]) -> Dict[str, Any]:
"""Fill data for an event"""
event["timestamp"] = datetime.now().replace(tzinfo=tzlocal()).isoformat()
event["message_id"] = str(uuid.uuid4())
return event
def _enqueue(self, event: Dict[str, Any]):
"""Enqueue an event"""
logger.debug("enqueue event: %s", event)
try:
self._event_queue.put(event, block=False)
except queue.Full:
logger.warning("Event queue is full, dropping event")
def _end(self):
"""End the client when the main thread exits"""
for consumer in self._consumers:
consumer.pause()
consumer.join()
@lru_cache(maxsize=1)
def get_client():
"""Get a client"""
return Client(
|
logger = logging.getLogger("kanaries_track")
class _Consumer(Thread):
def __init__(
self,
*,
event_queue: queue.Queue,
request_client: RequestClient,
upload_size: int,
upload_interval_seconds: int
) -> None:
super().__init__()
self.event_queue = event_queue
self.request_client = request_client
self.upload_size = upload_size
self.upload_interval_seconds = upload_interval_seconds
self.daemon = True
self.ruuning = True
def run(self):
"""Run the consumer"""
logger.debug("Consumer started")
while self.ruuning:
self._upload()
logger.debug("Consumer stopped")
def pause(self):
"""Pause the consumer"""
self.ruuning = False
def _upload(self):
"""Upload events"""
start_time = time.monotonic()
events = []
while len(events) < self.upload_size:
elapsed_seconds = time.monotonic() - start_time
if elapsed_seconds >= self.upload_interval_seconds:
break
try:
event = self.event_queue.get(block=True, timeout=self.upload_interval_seconds - elapsed_seconds)
events.append(event)
except queue.Empty:
break
except Exception as e:
logger.error("Failed to get event from queue: %s", str(e))
logger.debug("invoke uploading events, event count: %s", len(events))
if events:
self.request_client.track(events)
class Client:
"""Client for sending events to kanaries-track server"""
def __init__(
self,
*,
host: str,
auth_token: str,
debug: bool,
send: bool,
sync_send: bool,
max_queue_size: int,
timeout_seconds: int,
max_retries: int,
proxies: Dict[str, Any],
thread_count: int,
verify: bool,
upload_interval_seconds: int,
upload_size: int
):
self.host = host
self.auth_token = auth_token
self.debug = debug
self.send = send
self.sync_send = sync_send
self.max_queue_size = max_queue_size
self.timeout_seconds = timeout_seconds
self.max_retries = max_retries
self.proxies = proxies
self.thread_count = thread_count
self.verify = verify
self.upload_interval_seconds = upload_interval_seconds
self.upload_size = upload_size
self._consumers = []
self._request_client = RequestClient(
host=self.host,
auth_token=self.auth_token,
max_retries=self.max_retries,
timeout=self.timeout_seconds,
verify=self.verify,
proxy=self.proxies
)
self._event_queue = queue.Queue(self.max_queue_size)
if not self.sync_send and self.send:
for _ in range(self.thread_count):
consumer = _Consumer(
event_queue=self._event_queue,
request_client=self._request_client,
upload_size=self.upload_size,
upload_interval_seconds=self.upload_interval_seconds
)
consumer.start()
self._consumers.append(consumer)
atexit.register(self._end)
if self.debug:
logger.setLevel(logging.DEBUG)
def track(self, event: Dict[str, Any]):
"""Track an event"""
event = self._fill_data(event)
if not self.send:
return
if self.sync_send:
self._request_client.track([event])
else:
self._enqueue(event)
def _fill_data(self, event: Dict[str, Any]) -> Dict[str, Any]:
"""Fill data for an event"""
event["timestamp"] = datetime.now().replace(tzinfo=tzlocal()).isoformat()
event["message_id"] = str(uuid.uuid4())
return event
def _enqueue(self, event: Dict[str, Any]):
"""Enqueue an event"""
logger.debug("enqueue event: %s", event)
try:
self._event_queue.put(event, block=False)
except queue.Full:
logger.warning("Event queue is full, dropping event")
def _end(self):
"""End the client when the main thread exits"""
for consumer in self._consumers:
consumer.pause()
consumer.join()
@lru_cache(maxsize=1)
def get_client():
"""Get a client"""
return Client( | host=config.host, | 0 | 2023-12-06 06:01:32+00:00 | 2k |
Yingyue-L/Mamba-LLaVA | llava/model/llava_arch.py | [
{
"identifier": "build_vision_tower",
"path": "llava/model/multimodal_encoder/builder.py",
"snippet": "def build_vision_tower(vision_tower_cfg, **kwargs):\n vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))\n is_absolute_path_exists = os.p... | from abc import ABC, abstractmethod
from .multimodal_encoder.builder import build_vision_tower
from .multimodal_projector.builder import build_vision_projector
from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
import torch
import torch.nn as nn | 715 | # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LlavaMetaModel:
def __init__(self, config):
super(LlavaMetaModel, self).__init__(config)
if hasattr(config, "mm_vision_tower"):
| # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LlavaMetaModel:
def __init__(self, config):
super(LlavaMetaModel, self).__init__(config)
if hasattr(config, "mm_vision_tower"): | self.vision_tower = build_vision_tower(config, delay_load=True) | 0 | 2023-12-09 09:39:13+00:00 | 2k |
Theia-4869/MoSA | src/engine/evaluator.py | [
{
"identifier": "multilabel",
"path": "src/engine/eval/multilabel.py",
"snippet": "def get_continuous_ids(probe_labels: List[int]) -> Dict[int, int]:\ndef multihot(x: List[List[int]], nb_classes: int) -> np.ndarray:\ndef compute_map(\n scores: np.ndarray, multihot_targets: np.ndarray\n) -> Tuple[... | import numpy as np
from collections import defaultdict
from typing import List, Union
from .eval import multilabel
from .eval import singlelabel
from ..utils import logging | 898 | #!/usr/bin/env python3
logger = logging.get_logger("MOSA")
class Evaluator():
"""
An evaluator with below logics:
1. find which eval module to use.
2. store the eval results, pretty print it in log file as well.
"""
def __init__(
self,
) -> None:
self.results = defaultdict(dict)
self.iteration = -1
self.threshold_end = 0.5
def update_iteration(self, iteration: int) -> None:
"""update iteration info"""
self.iteration = iteration
def update_result(self, metric: str, value: Union[float, dict]) -> None:
if self.iteration > -1:
key_name = "epoch_" + str(self.iteration)
else:
key_name = "final"
if isinstance(value, float):
self.results[key_name].update({metric: value})
else:
if metric in self.results[key_name]:
self.results[key_name][metric].update(value)
else:
self.results[key_name].update({metric: value})
def classify(self, probs, targets, test_data, multilabel=False):
"""
Evaluate classification result.
Args:
probs: np.ndarray for num_data x num_class, predicted probabilities
targets: np.ndarray for multilabel, list of integers for single label
test_labels: map test image ids to a list of class labels
"""
if not targets:
raise ValueError(
"When evaluating classification, need at least give targets")
if multilabel:
self._eval_multilabel(probs, targets, test_data)
else:
self._eval_singlelabel(probs, targets, test_data)
def _eval_singlelabel(
self,
scores: np.ndarray,
targets: List[int],
eval_type: str
) -> None:
"""
if number of labels > 2:
top1 and topk (5 by default) accuracy
if number of labels == 2:
top1 and rocauc
"""
| #!/usr/bin/env python3
logger = logging.get_logger("MOSA")
class Evaluator():
"""
An evaluator with below logics:
1. find which eval module to use.
2. store the eval results, pretty print it in log file as well.
"""
def __init__(
self,
) -> None:
self.results = defaultdict(dict)
self.iteration = -1
self.threshold_end = 0.5
def update_iteration(self, iteration: int) -> None:
"""update iteration info"""
self.iteration = iteration
def update_result(self, metric: str, value: Union[float, dict]) -> None:
if self.iteration > -1:
key_name = "epoch_" + str(self.iteration)
else:
key_name = "final"
if isinstance(value, float):
self.results[key_name].update({metric: value})
else:
if metric in self.results[key_name]:
self.results[key_name][metric].update(value)
else:
self.results[key_name].update({metric: value})
def classify(self, probs, targets, test_data, multilabel=False):
"""
Evaluate classification result.
Args:
probs: np.ndarray for num_data x num_class, predicted probabilities
targets: np.ndarray for multilabel, list of integers for single label
test_labels: map test image ids to a list of class labels
"""
if not targets:
raise ValueError(
"When evaluating classification, need at least give targets")
if multilabel:
self._eval_multilabel(probs, targets, test_data)
else:
self._eval_singlelabel(probs, targets, test_data)
def _eval_singlelabel(
self,
scores: np.ndarray,
targets: List[int],
eval_type: str
) -> None:
"""
if number of labels > 2:
top1 and topk (5 by default) accuracy
if number of labels == 2:
top1 and rocauc
""" | acc_dict = singlelabel.compute_acc_auc(scores, targets) | 1 | 2023-12-06 07:50:16+00:00 | 2k |
IBM/AI-assisted-chemical-sensing | src/chemsense/vision/cli/classification_analysis.py | [
{
"identifier": "setup_basic_logging_for_scripts",
"path": "src/chemsense/vision/logging_configuration.py",
"snippet": "def setup_basic_logging_for_scripts() -> None:\n \"\"\"Setup basic stdout logging for scripts.\"\"\"\n logging.basicConfig(\n stream=sys.stdout,\n level=logging.INF... | from pathlib import Path
from chemsense.vision.modeling.classification import (
attach_classification_head_fewshots,
attach_classification_head_kfold,
attach_classification_head_loco,
attach_classification_head_loco_sugars,
)
from ..logging_configuration import setup_basic_logging_for_scripts
from ..modeling.encoders import ENCODERS_REGISTRY
import click
import numpy as np
import pandas as pd
| 1,347 | """Training and testing models with extracted features."""
__copyright__ = """
LICENSED INTERNAL CODE. PROPERTY OF IBM.
IBM Research Licensed Internal Code
(C) Copyright IBM Corp. 2023
ALL RIGHTS RESERVED
"""
@click.command()
@click.option("--task", type=str, default="red_wines", help="Dataset name identifier.")
@click.option(
"--validation",
type=str,
default="kfold",
help="Validation strategy. Supported types are kfold, LOCO, few_shots and Sugar_LOCO.",
)
@click.option(
"--number_of_folds",
type=int,
default=5,
help="number of folds to be used in case of kfold validation.",
)
@click.option(
"--number_of_components",
type=int,
default=30,
help="Max number of principal components to be used.",
)
@click.option(
"--features_path",
required=True,
type=click.Path(path_type=Path, exists=True),
help="Path to directory containing extracted features.",
)
@click.option(
"--output_path",
required=True,
type=click.Path(path_type=Path),
help="Path to save classification model validation results.",
)
def main(
task: str,
validation: str,
number_of_folds: int,
number_of_components: int,
features_path: Path,
output_path: Path,
) -> None:
| """Training and testing models with extracted features."""
__copyright__ = """
LICENSED INTERNAL CODE. PROPERTY OF IBM.
IBM Research Licensed Internal Code
(C) Copyright IBM Corp. 2023
ALL RIGHTS RESERVED
"""
@click.command()
@click.option("--task", type=str, default="red_wines", help="Dataset name identifier.")
@click.option(
"--validation",
type=str,
default="kfold",
help="Validation strategy. Supported types are kfold, LOCO, few_shots and Sugar_LOCO.",
)
@click.option(
"--number_of_folds",
type=int,
default=5,
help="number of folds to be used in case of kfold validation.",
)
@click.option(
"--number_of_components",
type=int,
default=30,
help="Max number of principal components to be used.",
)
@click.option(
"--features_path",
required=True,
type=click.Path(path_type=Path, exists=True),
help="Path to directory containing extracted features.",
)
@click.option(
"--output_path",
required=True,
type=click.Path(path_type=Path),
help="Path to save classification model validation results.",
)
def main(
task: str,
validation: str,
number_of_folds: int,
number_of_components: int,
features_path: Path,
output_path: Path,
) -> None:
| setup_basic_logging_for_scripts()
| 0 | 2023-12-05 15:56:12+00:00 | 2k |
pymike00/tinychat | tests/llms/test_google_handler.py | [
{
"identifier": "GoogleAIHandler",
"path": "tinychat/llms/google.py",
"snippet": "class GoogleAIHandler:\n \"\"\"\n Handler class to interact with the OpenAI models.\n\n Returns chat responses and stores the chat history.\n\n TODO: add chat message dataclass so that we can enforce validation... | import json
import unittest
from unittest.mock import MagicMock, Mock, patch
from tinychat.llms.google import GoogleAIHandler, GoogleAIClient | 1,204 |
class TestGoogleGeminiHandlerStreaming(unittest.TestCase):
@patch.object(GoogleAIClient, "perform_stream_request")
def test_stream_response(self, mock_perform_stream_request):
# Create a mock SSEClient with a mock events method
mock_sse_client = MagicMock()
mock_stream = iter(
[
Mock(
data=json.dumps(
{
"candidates": [
{"content": {"parts": [{"text": "response part 1"}]}}
]
}
)
),
Mock(
data=json.dumps(
{
"candidates": [
{"content": {"parts": [{"text": "response part 2"}]}}
]
}
)
),
Mock(data="[DONE]"),
]
)
mock_sse_client.events.return_value = mock_stream
mock_perform_stream_request.return_value = mock_sse_client
|
class TestGoogleGeminiHandlerStreaming(unittest.TestCase):
@patch.object(GoogleAIClient, "perform_stream_request")
def test_stream_response(self, mock_perform_stream_request):
# Create a mock SSEClient with a mock events method
mock_sse_client = MagicMock()
mock_stream = iter(
[
Mock(
data=json.dumps(
{
"candidates": [
{"content": {"parts": [{"text": "response part 1"}]}}
]
}
)
),
Mock(
data=json.dumps(
{
"candidates": [
{"content": {"parts": [{"text": "response part 2"}]}}
]
}
)
),
Mock(data="[DONE]"),
]
)
mock_sse_client.events.return_value = mock_stream
mock_perform_stream_request.return_value = mock_sse_client
| handler = GoogleAIHandler() | 0 | 2023-12-11 20:40:02+00:00 | 2k |
nickruggeri/hypergraph-message-passing | test/model/test_sampling/test_helper_functions.py | [
{
"identifier": "_community_count_combinations",
"path": "src/model/sampling.py",
"snippet": "def _community_count_combinations(\n n_nodes: int, comm_counts: list[int]\n) -> Iterable[list[int]]:\n r\"\"\"Generate all possible community count vectors :math::`\\#`.\n\n Parameters\n ----------\... | import itertools
import numpy as np
import pytest
from collections import Counter
from typing import Dict, List
from scipy import special
from src.model.sampling import (
_community_count_combinations,
_log_n_sharp,
_sample_hye_from_count,
) | 1,425 |
n_nodes_all = [2, 5, 10, 25, 50, 100]
rng = np.random.default_rng(seed=123)
hye_comm_counts_all = [
rng.integers(low=0, high=max_val, size=q)
for _ in range(10)
for max_val in [5, 10]
for q in [2, 3, 4, 5]
]
comm_counts_all = sum(
(
[
hye_comm_count + rng.integers(low=0, high=high, size=len(hye_comm_count))
for hye_comm_count in hye_comm_counts_all
]
for high in [1, 5, 10]
),
start=[],
)
hye_comm_counts_all = [list(x) for x in hye_comm_counts_all]
comm_counts_all = [list(x) for x in comm_counts_all]
def generate_communities(comm_counts: List[int]) -> Dict[int, np.ndarray]:
N = sum(comm_counts)
K = len(comm_counts)
rng_tmp = np.random.default_rng(seed=21)
all_nodes = np.arange(N)
rng_tmp.shuffle(all_nodes)
cumcount = [0] + list(np.cumsum(comm_counts))
comm_nodes = dict()
for comm in range(K):
comm_nodes[comm] = all_nodes[cumcount[comm] : cumcount[comm + 1]]
return comm_nodes
commm_nodes_all = [generate_communities(comm_counts) for comm_counts in comm_counts_all]
########################################################################################
# Test _community_count_combinations, _log_n_sharp
@pytest.mark.parametrize(
"n_nodes, hye_comm_counts", itertools.product(n_nodes_all, hye_comm_counts_all)
)
def test_community_count_combinations_brute_force(n_nodes, hye_comm_counts):
all_combinations = itertools.product(*(range(a + 1) for a in hye_comm_counts))
all_combinations = [list(comb) for comb in all_combinations if n_nodes == sum(comb)]
assert sorted(all_combinations) == sorted(
_community_count_combinations(n_nodes, hye_comm_counts)
)
@pytest.mark.parametrize(
"comm_counts, hye_comm_counts",
zip(comm_counts_all, hye_comm_counts_all * 3),
)
def test_log_n_sharp_brute_force(comm_counts, hye_comm_counts):
brute_force = [special.binom(a, b) for a, b in zip(comm_counts, hye_comm_counts)]
brute_force = np.sum(np.log(brute_force))
assert np.allclose(brute_force, _log_n_sharp(comm_counts, hye_comm_counts))
########################################################################################
# Test _sample_hye_from_count
@pytest.fixture(
params=(
(comm_nodes, hye_comm_counts, rng)
for comm_nodes, hye_comm_counts in zip(commm_nodes_all, hye_comm_counts_all * 3)
for rgn in [None, np.random.default_rng(seed=34)]
)
)
def sampled_hye_with_info(request):
comm_nodes, hye_comm_counts, rng = request.param
node_to_comm = {node: comm for comm in comm_nodes for node in comm_nodes[comm]}
return (
|
n_nodes_all = [2, 5, 10, 25, 50, 100]
rng = np.random.default_rng(seed=123)
hye_comm_counts_all = [
rng.integers(low=0, high=max_val, size=q)
for _ in range(10)
for max_val in [5, 10]
for q in [2, 3, 4, 5]
]
comm_counts_all = sum(
(
[
hye_comm_count + rng.integers(low=0, high=high, size=len(hye_comm_count))
for hye_comm_count in hye_comm_counts_all
]
for high in [1, 5, 10]
),
start=[],
)
hye_comm_counts_all = [list(x) for x in hye_comm_counts_all]
comm_counts_all = [list(x) for x in comm_counts_all]
def generate_communities(comm_counts: List[int]) -> Dict[int, np.ndarray]:
N = sum(comm_counts)
K = len(comm_counts)
rng_tmp = np.random.default_rng(seed=21)
all_nodes = np.arange(N)
rng_tmp.shuffle(all_nodes)
cumcount = [0] + list(np.cumsum(comm_counts))
comm_nodes = dict()
for comm in range(K):
comm_nodes[comm] = all_nodes[cumcount[comm] : cumcount[comm + 1]]
return comm_nodes
commm_nodes_all = [generate_communities(comm_counts) for comm_counts in comm_counts_all]
########################################################################################
# Test _community_count_combinations, _log_n_sharp
@pytest.mark.parametrize(
"n_nodes, hye_comm_counts", itertools.product(n_nodes_all, hye_comm_counts_all)
)
def test_community_count_combinations_brute_force(n_nodes, hye_comm_counts):
all_combinations = itertools.product(*(range(a + 1) for a in hye_comm_counts))
all_combinations = [list(comb) for comb in all_combinations if n_nodes == sum(comb)]
assert sorted(all_combinations) == sorted(
_community_count_combinations(n_nodes, hye_comm_counts)
)
@pytest.mark.parametrize(
"comm_counts, hye_comm_counts",
zip(comm_counts_all, hye_comm_counts_all * 3),
)
def test_log_n_sharp_brute_force(comm_counts, hye_comm_counts):
brute_force = [special.binom(a, b) for a, b in zip(comm_counts, hye_comm_counts)]
brute_force = np.sum(np.log(brute_force))
assert np.allclose(brute_force, _log_n_sharp(comm_counts, hye_comm_counts))
########################################################################################
# Test _sample_hye_from_count
@pytest.fixture(
params=(
(comm_nodes, hye_comm_counts, rng)
for comm_nodes, hye_comm_counts in zip(commm_nodes_all, hye_comm_counts_all * 3)
for rgn in [None, np.random.default_rng(seed=34)]
)
)
def sampled_hye_with_info(request):
comm_nodes, hye_comm_counts, rng = request.param
node_to_comm = {node: comm for comm in comm_nodes for node in comm_nodes[comm]}
return ( | _sample_hye_from_count(comm_nodes, hye_comm_counts, rng), | 2 | 2023-12-06 22:01:38+00:00 | 2k |
sailfishos-chum/sailfishos-chum.github.io | chumweb/package.py | [
{
"identifier": "CONFIG",
"path": "chumweb/config.py",
"snippet": "CONFIG = init_config()"
},
{
"identifier": "RemoteImage",
"path": "chumweb/remote_image.py",
"snippet": "class RemoteImage:\n \"\"\"\n An image located on a remote computer that can be downloaded locally\n\n Attr... | import logging
import enum
import re
from dataclasses import dataclass, field
from datetime import datetime, UTC
from enum import StrEnum
from types import NoneType
from typing import List, Dict, Self, Set, Optional
from markupsafe import Markup
from . import CONFIG
from .remote_image import RemoteImage
from yaml import safe_load as yaml_load
from yaml.parser import ParserError
from yaml.scanner import ScannerError | 675 | """
Data classes for package metadata. It is also responsible for parsing the metadate of a single package
"""
logger = logging.getLogger(__name__)
class PackageApplicationCategory(StrEnum):
"""
Desktop application categories, from https://specifications.freedesktop.org/menu-spec/latest/apa.html
"""
accessibility = "Accessibility" # Added by Chum?
audio_video = "AudioVideo"
audio = "Audio"
video = "Video"
development = "Development"
education = "Education"
game = "Game"
graphics = "Graphics"
library = "Library" # Added by Chum?
maps = "Maps" # Added by Chum?
network = "Network"
office = "Office"
science = "Science"
settings = "Settings"
system = "System"
utility = "Utility"
other = "Other"
class PackageApplicationType(StrEnum):
"""
Type of the application that the package provides
Enums are based on https://www.freedesktop.org/software/appstream/docs/sect-AppStream-YAML.html#field-dep11-type
"""
generic = enum.auto()
console_application = "console-application"
desktop_application = "desktop-application"
addon = enum.auto()
codec = enum.auto()
inputmethod = enum.auto()
firmware = enum.auto()
@dataclass
class PackageVersion:
epoch: str
ver: str
rel: str
def __init__(self, epoch, ver, rel):
self.epoch = epoch
self.ver = ver
self.rel = rel
def to_short_str(self) -> str:
return self.ver.split('+', 2)[0]
def to_full_str(self) -> str:
return f"{self.ver}-{self.rel}"
@dataclass
class Package:
"""
Metadata of a RPM package with associated Chum metadata
"""
name: str
summary: str | None = None
description: str | Markup | None = None
title: str | None = None
| """
Data classes for package metadata. It is also responsible for parsing the metadate of a single package
"""
logger = logging.getLogger(__name__)
class PackageApplicationCategory(StrEnum):
"""
Desktop application categories, from https://specifications.freedesktop.org/menu-spec/latest/apa.html
"""
accessibility = "Accessibility" # Added by Chum?
audio_video = "AudioVideo"
audio = "Audio"
video = "Video"
development = "Development"
education = "Education"
game = "Game"
graphics = "Graphics"
library = "Library" # Added by Chum?
maps = "Maps" # Added by Chum?
network = "Network"
office = "Office"
science = "Science"
settings = "Settings"
system = "System"
utility = "Utility"
other = "Other"
class PackageApplicationType(StrEnum):
"""
Type of the application that the package provides
Enums are based on https://www.freedesktop.org/software/appstream/docs/sect-AppStream-YAML.html#field-dep11-type
"""
generic = enum.auto()
console_application = "console-application"
desktop_application = "desktop-application"
addon = enum.auto()
codec = enum.auto()
inputmethod = enum.auto()
firmware = enum.auto()
@dataclass
class PackageVersion:
epoch: str
ver: str
rel: str
def __init__(self, epoch, ver, rel):
self.epoch = epoch
self.ver = ver
self.rel = rel
def to_short_str(self) -> str:
return self.ver.split('+', 2)[0]
def to_full_str(self) -> str:
return f"{self.ver}-{self.rel}"
@dataclass
class Package:
"""
Metadata of a RPM package with associated Chum metadata
"""
name: str
summary: str | None = None
description: str | Markup | None = None
title: str | None = None | icon: RemoteImage | None = None | 1 | 2023-12-14 19:25:31+00:00 | 2k |
oVo-HxBots/URLUploadBot | Uploader/youtube.py | [
{
"identifier": "get_file_extension_from_url",
"path": "Uploader/functions/help_ytdl.py",
"snippet": "def get_file_extension_from_url(url):\n url_path = urlparse(url).path\n basename = os.path.basename(url_path)\n return basename.split(\".\")[-1]"
},
{
"identifier": "get_resolution",
... | import os
import wget
import asyncio
from urllib.parse import urlparse
from opencc import OpenCC
from youtube_dl import YoutubeDL
from pyrogram import Client, filters, enums
from pyrogram.types import Message
from pyrogram import Client, filters
from Uploader.config import Config
from sample_config import Config
from Uploader.functions.help_ytdl import get_file_extension_from_url, get_resolution | 979 | # MIT License
# Copyright (c) 2022 Hash Minner
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
if bool(os.environ.get("WEBHOOK")):
else:
YTDL_REGEX = (r"^((?:https?:)?\/\/)")
s2tw = OpenCC('s2tw.json').convert
@Client.on_callback_query(filters.regex("^ytdl_audio$"))
async def callback_query_ytdl_audio(_, callback_query):
try:
url = callback_query.message.reply_to_message.text
ydl_opts = {
'format': 'bestaudio',
'outtmpl': '%(title)s - %(extractor)s-%(id)s.%(ext)s',
'writethumbnail': True
}
with YoutubeDL(ydl_opts) as ydl:
message = callback_query.message
await message.reply_chat_action(enums.ChatAction.TYPING)
info_dict = ydl.extract_info(url, download=False)
# download
await callback_query.edit_message_text("**Downloading audio...**")
ydl.process_info(info_dict)
# upload
audio_file = ydl.prepare_filename(info_dict)
task = asyncio.create_task(send_audio(message, info_dict,
audio_file))
while not task.done():
await asyncio.sleep(3)
await message.reply_chat_action(enums.ChatAction.UPLOAD_DOCUMENT)
await message.reply_chat_action(enums.ChatAction.CANCEL)
await message.delete()
except Exception as e:
await message.reply_text(e)
await callback_query.message.reply_to_message.delete()
await callback_query.message.delete()
async def send_audio(message: Message, info_dict, audio_file):
basename = audio_file.rsplit(".", 1)[-2]
if info_dict['ext'] == 'webm':
audio_file_weba = f"{basename}.weba"
os.rename(audio_file, audio_file_weba)
audio_file = audio_file_weba
thumbnail_url = info_dict['thumbnail']
| # MIT License
# Copyright (c) 2022 Hash Minner
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
if bool(os.environ.get("WEBHOOK")):
else:
YTDL_REGEX = (r"^((?:https?:)?\/\/)")
s2tw = OpenCC('s2tw.json').convert
@Client.on_callback_query(filters.regex("^ytdl_audio$"))
async def callback_query_ytdl_audio(_, callback_query):
try:
url = callback_query.message.reply_to_message.text
ydl_opts = {
'format': 'bestaudio',
'outtmpl': '%(title)s - %(extractor)s-%(id)s.%(ext)s',
'writethumbnail': True
}
with YoutubeDL(ydl_opts) as ydl:
message = callback_query.message
await message.reply_chat_action(enums.ChatAction.TYPING)
info_dict = ydl.extract_info(url, download=False)
# download
await callback_query.edit_message_text("**Downloading audio...**")
ydl.process_info(info_dict)
# upload
audio_file = ydl.prepare_filename(info_dict)
task = asyncio.create_task(send_audio(message, info_dict,
audio_file))
while not task.done():
await asyncio.sleep(3)
await message.reply_chat_action(enums.ChatAction.UPLOAD_DOCUMENT)
await message.reply_chat_action(enums.ChatAction.CANCEL)
await message.delete()
except Exception as e:
await message.reply_text(e)
await callback_query.message.reply_to_message.delete()
await callback_query.message.delete()
async def send_audio(message: Message, info_dict, audio_file):
basename = audio_file.rsplit(".", 1)[-2]
if info_dict['ext'] == 'webm':
audio_file_weba = f"{basename}.weba"
os.rename(audio_file, audio_file_weba)
audio_file = audio_file_weba
thumbnail_url = info_dict['thumbnail'] | thumbnail_file = f"{basename}.{get_file_extension_from_url(thumbnail_url)}" | 0 | 2023-12-09 03:24:55+00:00 | 2k |
Jiawei-Yao0812/PixelFormer_DGR | pixelformer/networks/PQI.py | [
{
"identifier": "resize",
"path": "pixelformer/networks/utils.py",
"snippet": "def resize(input,\n size=None,\n scale_factor=None,\n mode='nearest',\n align_corners=None,\n warning=True):\n if warning:\n if size is not None and align_corners:\n... | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from .utils import resize, normal_init | 769 |
class PPM(nn.ModuleList):
"""Pooling Pyramid Module used in PSPNet.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module.
in_channels (int): Input channels.
channels (int): Channels after modules, before conv_seg.
conv_cfg (dict|None): Config of conv layers.
norm_cfg (dict|None): Config of norm layers.
act_cfg (dict): Config of activation layers.
align_corners (bool): align_corners argument of F.interpolate.
"""
def __init__(self, pool_scales, in_channels, channels, conv_cfg, norm_cfg,
act_cfg, align_corners):
super(PPM, self).__init__()
self.pool_scales = pool_scales
self.align_corners = align_corners
self.in_channels = in_channels
self.channels = channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
for pool_scale in pool_scales:
# == if batch size = 1, BN is not supported, change to GN
if pool_scale == 1: norm_cfg = dict(type='GN', requires_grad=True, num_groups=256)
self.append(
nn.Sequential(
nn.AdaptiveAvgPool2d(pool_scale),
ConvModule(
self.in_channels,
self.channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=norm_cfg,
act_cfg=self.act_cfg)))
def forward(self, x):
"""Forward function."""
ppm_outs = []
for ppm in self:
ppm_out = ppm(x)
|
class PPM(nn.ModuleList):
"""Pooling Pyramid Module used in PSPNet.
Args:
pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
Module.
in_channels (int): Input channels.
channels (int): Channels after modules, before conv_seg.
conv_cfg (dict|None): Config of conv layers.
norm_cfg (dict|None): Config of norm layers.
act_cfg (dict): Config of activation layers.
align_corners (bool): align_corners argument of F.interpolate.
"""
def __init__(self, pool_scales, in_channels, channels, conv_cfg, norm_cfg,
act_cfg, align_corners):
super(PPM, self).__init__()
self.pool_scales = pool_scales
self.align_corners = align_corners
self.in_channels = in_channels
self.channels = channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
for pool_scale in pool_scales:
# == if batch size = 1, BN is not supported, change to GN
if pool_scale == 1: norm_cfg = dict(type='GN', requires_grad=True, num_groups=256)
self.append(
nn.Sequential(
nn.AdaptiveAvgPool2d(pool_scale),
ConvModule(
self.in_channels,
self.channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=norm_cfg,
act_cfg=self.act_cfg)))
def forward(self, x):
"""Forward function."""
ppm_outs = []
for ppm in self:
ppm_out = ppm(x) | upsampled_ppm_out = resize( | 0 | 2023-12-13 20:50:32+00:00 | 2k |
kramerlab/PeerLearning | peer.py | [
{
"identifier": "SuggestionBuffer",
"path": "suggestionbuffer.py",
"snippet": "class SuggestionBuffer:\n def __init__(self, capacity):\n self.buffer = deque(maxlen=capacity)\n\n def add(self, *args):\n self.buffer.append(args)\n\n def sample(self, batch_size):\n if len(self... | from abc import ABC
from typing import Type
from suggestionbuffer import SuggestionBuffer
from utils import make_env
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
import itertools as it
import numpy as np
import torch | 1,381 | lr=0.95, switch_ratio=0, use_advantage=False,
max_peer_epochs=1_000_000_000):
"""
:param peers: An iterable of peer agents
:param lr: The learning rate for trust and agent values
:param switch_ratio: switch_ratio == 0 means no switching
:param use_advantage: use advantage instead of value for AV updates
"""
self.peers = peers
self.lr = lr
self.switch_ratio = switch_ratio
self.active_peer = None # index of currently learning peer
self.solo_epoch = False
self.use_advantage = use_advantage
self.max_peer_epochs = max_peer_epochs
if use_agent_values:
self.agent_values = np.full(len(peers), init_agent_values,
dtype=np.float32)
key = "agent_values"
for peer in peers:
peer.n_peers = len(peers)
peer.group = self
# setup agent values
if use_agent_values:
peer.peer_values[key] = self.agent_values # noqa (Eq. 6)
peer.peer_value_functions[key] = self._update_agent_values
def _update_agent_values(self, batch_size=10):
""" Updates the agent values with samples from the peers' buffers"""
targets = np.zeros_like(self.peers, dtype=np.float32)
counts = np.zeros_like(self.peers, dtype=np.float32)
for peer in self.peers:
bs = batch_size // len(self.peers)
# reward, action, peer, new_obs, old_obs
if peer.buffer is not None:
batch = peer.buffer.sample(bs)
if batch is None: # buffer not sufficiently full
return
obs = np.array([b[3] for b in batch]).reshape(bs, -1)
v = peer.value(obs)
if self.use_advantage:
# previous observations
prev_obs = np.array([b[4] for b in batch]).reshape(bs, -1)
prev_v = peer.value(prev_obs)
else:
prev_v = np.zeros_like(v) # no advantage (see Eq. 5)
for i in range(len(batch)): # Eq. 8
target = (batch[i][0] + peer.gamma * v[i]) - prev_v[i]
counts[batch[i][2]] += 1
targets[batch[i][2]] += target
# ensure counts are >= 1, don't change these values
targets[counts == 0] = self.agent_values[counts == 0]
counts[counts == 0] = 1
targets /= counts
self.agent_values += self.lr * (targets - self.agent_values) # Eq. 7
def learn(self, n_epochs, max_epoch_len, callbacks, **kwargs):
""" The outer peer learning routine. """
assert len(callbacks) == len(self.peers)
# more solo epochs
boost_single = 0 < self.switch_ratio < 1
if boost_single:
self.switch_ratio = 1 / self.switch_ratio
self.solo_epoch = False
peer_epochs = 0
for i in range(n_epochs):
# don't do peer learning forever
if peer_epochs < self.max_peer_epochs:
# ratio of 0 never performs a solo episode
if (i % (1 + self.switch_ratio) == 1) ^ boost_single:
self.solo_epoch = True
else:
peer_epochs += 1
else: # budget spent
self.solo_epoch = True
for p, peer, callback in zip(it.count(), self.peers, callbacks):
self.active_peer = p
peer.learn(self.solo_epoch, total_timesteps=max_epoch_len,
callback=callback, tb_log_name=f"Peer{p}",
reset_num_timesteps=False,
log_interval=None, **kwargs)
# update epoch for temperature decay
peer.epoch += 1
self.active_peer = None
def __len__(self):
return len(self.peers)
def make_peer_class(cls: Type[OffPolicyAlgorithm]):
""" Creates a mixin with the corresponding algorithm class.
:param cls: The learning algorithm (needs to have a callable critic).
:return: The mixed in peer agent class.
"""
class Peer(cls, ABC):
""" Abstract Peer class
needs to be mixed with a suitable algorithm. """
def __init__(self, temperature, temp_decay, algo_args, env,
use_trust=False, use_critic=False, init_trust_values=200,
buffer_size=1000, follow_steps=10, seed=None,
use_trust_buffer=True, solo_training=False,
peers_sample_with_noise=False,
sample_random_actions=False, sample_from_suggestions=True,
epsilon=0.0, env_args=None, only_follow_peers=False):
if env_args is None:
env_args = {}
super(Peer, self).__init__(**algo_args,
|
class PeerGroup:
""" A group of peers who train together. """
def __init__(self, peers, use_agent_values=False, init_agent_values=200.,
lr=0.95, switch_ratio=0, use_advantage=False,
max_peer_epochs=1_000_000_000):
"""
:param peers: An iterable of peer agents
:param lr: The learning rate for trust and agent values
:param switch_ratio: switch_ratio == 0 means no switching
:param use_advantage: use advantage instead of value for AV updates
"""
self.peers = peers
self.lr = lr
self.switch_ratio = switch_ratio
self.active_peer = None # index of currently learning peer
self.solo_epoch = False
self.use_advantage = use_advantage
self.max_peer_epochs = max_peer_epochs
if use_agent_values:
self.agent_values = np.full(len(peers), init_agent_values,
dtype=np.float32)
key = "agent_values"
for peer in peers:
peer.n_peers = len(peers)
peer.group = self
# setup agent values
if use_agent_values:
peer.peer_values[key] = self.agent_values # noqa (Eq. 6)
peer.peer_value_functions[key] = self._update_agent_values
def _update_agent_values(self, batch_size=10):
""" Updates the agent values with samples from the peers' buffers"""
targets = np.zeros_like(self.peers, dtype=np.float32)
counts = np.zeros_like(self.peers, dtype=np.float32)
for peer in self.peers:
bs = batch_size // len(self.peers)
# reward, action, peer, new_obs, old_obs
if peer.buffer is not None:
batch = peer.buffer.sample(bs)
if batch is None: # buffer not sufficiently full
return
obs = np.array([b[3] for b in batch]).reshape(bs, -1)
v = peer.value(obs)
if self.use_advantage:
# previous observations
prev_obs = np.array([b[4] for b in batch]).reshape(bs, -1)
prev_v = peer.value(prev_obs)
else:
prev_v = np.zeros_like(v) # no advantage (see Eq. 5)
for i in range(len(batch)): # Eq. 8
target = (batch[i][0] + peer.gamma * v[i]) - prev_v[i]
counts[batch[i][2]] += 1
targets[batch[i][2]] += target
# ensure counts are >= 1, don't change these values
targets[counts == 0] = self.agent_values[counts == 0]
counts[counts == 0] = 1
targets /= counts
self.agent_values += self.lr * (targets - self.agent_values) # Eq. 7
def learn(self, n_epochs, max_epoch_len, callbacks, **kwargs):
""" The outer peer learning routine. """
assert len(callbacks) == len(self.peers)
# more solo epochs
boost_single = 0 < self.switch_ratio < 1
if boost_single:
self.switch_ratio = 1 / self.switch_ratio
self.solo_epoch = False
peer_epochs = 0
for i in range(n_epochs):
# don't do peer learning forever
if peer_epochs < self.max_peer_epochs:
# ratio of 0 never performs a solo episode
if (i % (1 + self.switch_ratio) == 1) ^ boost_single:
self.solo_epoch = True
else:
peer_epochs += 1
else: # budget spent
self.solo_epoch = True
for p, peer, callback in zip(it.count(), self.peers, callbacks):
self.active_peer = p
peer.learn(self.solo_epoch, total_timesteps=max_epoch_len,
callback=callback, tb_log_name=f"Peer{p}",
reset_num_timesteps=False,
log_interval=None, **kwargs)
# update epoch for temperature decay
peer.epoch += 1
self.active_peer = None
def __len__(self):
return len(self.peers)
def make_peer_class(cls: Type[OffPolicyAlgorithm]):
""" Creates a mixin with the corresponding algorithm class.
:param cls: The learning algorithm (needs to have a callable critic).
:return: The mixed in peer agent class.
"""
class Peer(cls, ABC):
""" Abstract Peer class
needs to be mixed with a suitable algorithm. """
def __init__(self, temperature, temp_decay, algo_args, env,
use_trust=False, use_critic=False, init_trust_values=200,
buffer_size=1000, follow_steps=10, seed=None,
use_trust_buffer=True, solo_training=False,
peers_sample_with_noise=False,
sample_random_actions=False, sample_from_suggestions=True,
epsilon=0.0, env_args=None, only_follow_peers=False):
if env_args is None:
env_args = {}
super(Peer, self).__init__(**algo_args, | env=make_env(env, **env_args), | 1 | 2023-12-13 10:40:55+00:00 | 2k |
balewgize/skimmit | url_summary/views.py | [
{
"identifier": "Preference",
"path": "users/models.py",
"snippet": "class Preference(models.Model):\n class AIModels(models.TextChoices):\n GPT_3_5 = \"gpt-3.5-turbo\", \"GPT-3.5\"\n GEMINI_PRO = \"gemini-pro\", \"Gemini Pro\"\n\n SENTENCE_COUNT_CHOICES = tuple(zip(range(3, 11), ran... | import os
import json
import readtime
import google.generativeai as genai
from django.http import JsonResponse
from bs4 import BeautifulSoup
from django.shortcuts import get_object_or_404, redirect, render
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from openai import OpenAI
from youtube_transcript_api import YouTubeTranscriptApi
from youtube_transcript_api.formatters import TextFormatter
from pytube import YouTube
from users.models import Preference
from .forms import ArticleURLForm, VideoURLForm
from .models import URLSummary
from .utils.downloader import download_page | 1,394 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
def home(request):
context = {"article_form": ArticleURLForm(), "video_form": VideoURLForm()}
return render(request, "index.html", context=context)
def article_summary(request):
if request.method == "POST":
form = ArticleURLForm(request.POST)
if form.is_valid():
url = form.cleaned_data["url"]
if request.user.is_authenticated:
user_preference, _ = Preference.objects.get_or_create(user=request.user)
else:
user_preference = None
summary = get_article_summary(url, user_preference)
context = {"result": summary, "article_form": ArticleURLForm()}
else:
context = {"article_form": form}
context["video_form"] = VideoURLForm()
return render(request, "url_summary/article.html", context=context)
else:
return redirect("url_summary:home")
def video_summary(request):
if request.method == "POST":
form = VideoURLForm(request.POST)
if form.is_valid():
url = form.cleaned_data["url"]
if request.user.is_authenticated:
user_preference, _ = Preference.objects.get_or_create(user=request.user)
else:
user_preference = None
summary = get_video_summary(url, user_preference)
context = {"result": summary, "video_form": VideoURLForm()}
else:
context = {"video_form": form}
context["article_form"] = ArticleURLForm()
return render(request, "url_summary/video.html", context=context)
else:
return redirect("url_summary:home")
def get_article_summary(url: str, user_preference: Preference):
"""
Summarize articles by extracting HTML body text.
"""
summary_obj = URLSummary.objects.filter(url=url).first()
if summary_obj:
summary_dict = get_summary_details(summary_obj)
return summary_dict
summary_dict = {}
|
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
def home(request):
context = {"article_form": ArticleURLForm(), "video_form": VideoURLForm()}
return render(request, "index.html", context=context)
def article_summary(request):
if request.method == "POST":
form = ArticleURLForm(request.POST)
if form.is_valid():
url = form.cleaned_data["url"]
if request.user.is_authenticated:
user_preference, _ = Preference.objects.get_or_create(user=request.user)
else:
user_preference = None
summary = get_article_summary(url, user_preference)
context = {"result": summary, "article_form": ArticleURLForm()}
else:
context = {"article_form": form}
context["video_form"] = VideoURLForm()
return render(request, "url_summary/article.html", context=context)
else:
return redirect("url_summary:home")
def video_summary(request):
if request.method == "POST":
form = VideoURLForm(request.POST)
if form.is_valid():
url = form.cleaned_data["url"]
if request.user.is_authenticated:
user_preference, _ = Preference.objects.get_or_create(user=request.user)
else:
user_preference = None
summary = get_video_summary(url, user_preference)
context = {"result": summary, "video_form": VideoURLForm()}
else:
context = {"video_form": form}
context["article_form"] = ArticleURLForm()
return render(request, "url_summary/video.html", context=context)
else:
return redirect("url_summary:home")
def get_article_summary(url: str, user_preference: Preference):
"""
Summarize articles by extracting HTML body text.
"""
summary_obj = URLSummary.objects.filter(url=url).first()
if summary_obj:
summary_dict = get_summary_details(summary_obj)
return summary_dict
summary_dict = {} | response, error = download_page(url) | 4 | 2023-12-13 13:47:20+00:00 | 2k |
ZS-YANG/FemtoDet-v3 | projects/XDecoder/xdecoder/inference/texttoimage_regionretrieval_inferencer.py | [
{
"identifier": "DetInferencer",
"path": "mmdet/apis/det_inferencer.py",
"snippet": " VOID = None\nIMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif',\n '.tiff', '.webp')\nclass DetInferencer(BaseInferencer):\n def __init__(self,\n model: Opt... | import copy
import torch
from typing import Iterable, Optional, Union
from mmengine.dataset import Compose
from rich.progress import track
from mmdet.apis.det_inferencer import DetInferencer, InputsType
from mmdet.utils import ConfigType | 1,193 |
class TextToImageRegionRetrievalInferencer(DetInferencer):
def _init_pipeline(self, cfg: ConfigType) -> Compose:
"""Initialize the test pipeline."""
pipeline_cfg = cfg.test_dataloader.dataset.pipeline
# For inference, the key of ``img_id`` is not used.
if 'meta_keys' in pipeline_cfg[-1]:
pipeline_cfg[-1]['meta_keys'] = tuple(
meta_key for meta_key in pipeline_cfg[-1]['meta_keys']
if meta_key != 'img_id')
load_img_idx = self._get_transform_idx(pipeline_cfg,
'LoadImageFromFile')
if load_img_idx == -1:
raise ValueError(
'LoadImageFromFile is not found in the test pipeline')
pipeline_cfg[load_img_idx]['type'] = 'mmdet.InferencerLoader'
retrieval_pipeline = Compose(pipeline_cfg)
grounding_pipeline_cp = copy.deepcopy(pipeline_cfg)
grounding_pipeline_cp[1].scale = cfg.grounding_scale
grounding_pipeline = Compose(grounding_pipeline_cp)
return {
'grounding_pipeline': grounding_pipeline,
'retrieval_pipeline': retrieval_pipeline
}
def _get_chunk_data(self, inputs: Iterable, pipeline, chunk_size: int):
"""Get batch data from inputs.
Args:
inputs (Iterable): An iterable dataset.
chunk_size (int): Equivalent to batch size.
Yields:
list: batch data.
"""
inputs_iter = iter(inputs)
while True:
try:
chunk_data = []
for _ in range(chunk_size):
inputs_ = next(inputs_iter)
chunk_data.append(
(inputs_, pipeline(copy.deepcopy(inputs_))))
yield chunk_data
except StopIteration:
if chunk_data:
yield chunk_data
break
def preprocess(self,
|
class TextToImageRegionRetrievalInferencer(DetInferencer):
def _init_pipeline(self, cfg: ConfigType) -> Compose:
"""Initialize the test pipeline."""
pipeline_cfg = cfg.test_dataloader.dataset.pipeline
# For inference, the key of ``img_id`` is not used.
if 'meta_keys' in pipeline_cfg[-1]:
pipeline_cfg[-1]['meta_keys'] = tuple(
meta_key for meta_key in pipeline_cfg[-1]['meta_keys']
if meta_key != 'img_id')
load_img_idx = self._get_transform_idx(pipeline_cfg,
'LoadImageFromFile')
if load_img_idx == -1:
raise ValueError(
'LoadImageFromFile is not found in the test pipeline')
pipeline_cfg[load_img_idx]['type'] = 'mmdet.InferencerLoader'
retrieval_pipeline = Compose(pipeline_cfg)
grounding_pipeline_cp = copy.deepcopy(pipeline_cfg)
grounding_pipeline_cp[1].scale = cfg.grounding_scale
grounding_pipeline = Compose(grounding_pipeline_cp)
return {
'grounding_pipeline': grounding_pipeline,
'retrieval_pipeline': retrieval_pipeline
}
def _get_chunk_data(self, inputs: Iterable, pipeline, chunk_size: int):
"""Get batch data from inputs.
Args:
inputs (Iterable): An iterable dataset.
chunk_size (int): Equivalent to batch size.
Yields:
list: batch data.
"""
inputs_iter = iter(inputs)
while True:
try:
chunk_data = []
for _ in range(chunk_size):
inputs_ = next(inputs_iter)
chunk_data.append(
(inputs_, pipeline(copy.deepcopy(inputs_))))
yield chunk_data
except StopIteration:
if chunk_data:
yield chunk_data
break
def preprocess(self, | inputs: InputsType, | 0 | 2023-12-11 15:23:03+00:00 | 2k |
mit-ll-ai-technology/maite | src/maite/_internals/interop/huggingface/image_classifier.py | [
{
"identifier": "BaseHFModel",
"path": "src/maite/_internals/interop/huggingface/base.py",
"snippet": "class BaseHFModel(nn.Module, BaseModel):\n def __init__(\n self,\n model_name: str,\n model: Union[HuggingFaceWithLogits, HuggingFaceWithDetection],\n processor: Optional... | from typing import TYPE_CHECKING, Any, List, Optional, Union, cast
from typing_extensions import Self
from maite.errors import InvalidArgument
from maite.protocols import HasDataImage, HasLogits, SupportsArray
from .base import BaseHFModel, InteropModelMetadata
from .typing import (
HuggingFacePredictions,
HuggingFaceProbs,
HuggingFaceProcessor,
HuggingFaceWithLogits,
)
from transformers import AutoFeatureExtractor, AutoModelForImageClassification
import torch as tr | 1,012 | # Copyright 2023, MASSACHUSETTS INSTITUTE OF TECHNOLOGY
# Subject to FAR 52.227-11 – Patent Rights – Ownership by the Contractor (May 2014).
# SPDX-License-Identifier: MIT
__all__ = ["HuggingFaceImageClassifier"]
class HuggingFaceImageClassifier(BaseHFModel):
"""
Wrapper for HuggingFace image classifiation models.
This interface uses `AutoFeatureExtractor` and `AutoModelForImageClassification`
to load the HuggingFace models.
"""
metadata: InteropModelMetadata
def __init__(
self,
model_name: str,
model: HuggingFaceWithLogits,
processor: Optional[HuggingFaceProcessor] = None,
top_k: Optional[int] = None,
) -> None:
"""
Initialize HuggingFaceImageClassifier.
Parameters
----------
model_name: str
A Huggingface model name from model id, e.g. "microsoft/resnet-50"
processor : HuggingFaceProcessor
A HuggingFace feature extractor for a given model.
model : HuggingFaceModel
A HuggingFace image classification model.
Examples
--------
>>> from transformers import AutoFeatureExtractor, AutoModelForImageClassification
>>> processor = AutoFeatureExtractor.from_pretrained("microsoft/resnet-50")
>>> model = AutoModelForImageClassification.from_pretrained("microsoft/resnet-50")
>>> hf_model = HuggingFaceImageClassifier(processor, model)
"""
super().__init__(model_name=model_name, model=model, processor=processor)
self._top_k = top_k
self.metadata = InteropModelMetadata(
model_name=model_name, provider="HuggingFace", task="Image Classification"
)
def preprocessor(
self,
data: SupportsArray,
) -> HasDataImage:
"""
Preprocess images for a HuggingFace object detector.
Parameters
----------
images : Sequence[ArrayLike]
The images to preprocess.
Returns
-------
tr.Tensor
The preprocessed images.
Examples
--------
"""
assert self._processor is not None, "No processor was provided."
assert isinstance(data, (list, tuple))
image_features = self._processor(images=data, return_tensors="pt")[
"pixel_values"
]
assert isinstance(image_features, tr.Tensor)
return {"image": image_features}
def post_processor(
self, outputs: HasLogits
| # Copyright 2023, MASSACHUSETTS INSTITUTE OF TECHNOLOGY
# Subject to FAR 52.227-11 – Patent Rights – Ownership by the Contractor (May 2014).
# SPDX-License-Identifier: MIT
__all__ = ["HuggingFaceImageClassifier"]
class HuggingFaceImageClassifier(BaseHFModel):
"""
Wrapper for HuggingFace image classifiation models.
This interface uses `AutoFeatureExtractor` and `AutoModelForImageClassification`
to load the HuggingFace models.
"""
metadata: InteropModelMetadata
def __init__(
self,
model_name: str,
model: HuggingFaceWithLogits,
processor: Optional[HuggingFaceProcessor] = None,
top_k: Optional[int] = None,
) -> None:
"""
Initialize HuggingFaceImageClassifier.
Parameters
----------
model_name: str
A Huggingface model name from model id, e.g. "microsoft/resnet-50"
processor : HuggingFaceProcessor
A HuggingFace feature extractor for a given model.
model : HuggingFaceModel
A HuggingFace image classification model.
Examples
--------
>>> from transformers import AutoFeatureExtractor, AutoModelForImageClassification
>>> processor = AutoFeatureExtractor.from_pretrained("microsoft/resnet-50")
>>> model = AutoModelForImageClassification.from_pretrained("microsoft/resnet-50")
>>> hf_model = HuggingFaceImageClassifier(processor, model)
"""
super().__init__(model_name=model_name, model=model, processor=processor)
self._top_k = top_k
self.metadata = InteropModelMetadata(
model_name=model_name, provider="HuggingFace", task="Image Classification"
)
def preprocessor(
self,
data: SupportsArray,
) -> HasDataImage:
"""
Preprocess images for a HuggingFace object detector.
Parameters
----------
images : Sequence[ArrayLike]
The images to preprocess.
Returns
-------
tr.Tensor
The preprocessed images.
Examples
--------
"""
assert self._processor is not None, "No processor was provided."
assert isinstance(data, (list, tuple))
image_features = self._processor(images=data, return_tensors="pt")[
"pixel_values"
]
assert isinstance(image_features, tr.Tensor)
return {"image": image_features}
def post_processor(
self, outputs: HasLogits | ) -> Union[HuggingFacePredictions, HuggingFaceProbs]: | 1 | 2023-12-12 15:34:16+00:00 | 2k |
djcopley/ShellOracle | src/shelloracle/providers/ollama.py | [
{
"identifier": "Provider",
"path": "src/shelloracle/provider.py",
"snippet": "class Provider(Protocol):\n \"\"\"\n LLM Provider Protocol\n\n All LLM backends must implement this interface.\n \"\"\"\n name: str\n\n @abstractmethod\n def generate(self, prompt: str) -> AsyncIterator[s... | import json
import httpx
from dataclasses import dataclass, asdict
from typing import Any, AsyncIterator
from ..provider import Provider, ProviderError
from ..config import Setting | 1,256 | from __future__ import annotations
def dataclass_to_json(obj: Any) -> dict[str, Any]:
"""Convert dataclass to a json dict
This function filters out 'None' values.
:param obj: the dataclass to serialize
:return: serialized dataclass
:raises TypeError: if obj is not a dataclass
"""
return {k: v for k, v in asdict(obj).items() if v is not None}
@dataclass
class GenerateRequest:
model: str
"""(required) the model name"""
prompt: str | None = None
"""the prompt to generate a response for"""
images: list[str] | None = None
"""a list of base64-encoded images (for multimodal models such as llava)"""
format: str | None = None
"""the format to return a response in. Currently the only accepted value is json"""
options: dict | None = None
"""additional model parameters listed in the documentation for the Modelfile such as temperature"""
system: str | None = None
"""system prompt to (overrides what is defined in the Modelfile)"""
template: str | None = None
"""the full prompt or prompt template (overrides what is defined in the Modelfile)"""
context: str | None = None
"""the context parameter returned from a previous request to /generate, this can be used to keep a short
conversational memory"""
stream: bool | None = None
"""if false the response will be returned as a single response object, rather than a stream of objects"""
raw: bool | None = None
"""if true no formatting will be applied to the prompt and no context will be returned. You may choose to use
the raw parameter if you are specifying a full templated prompt in your request to the API, and are managing
history yourself. JSON mode"""
class Ollama(Provider):
name = "Ollama"
host = Setting(default="localhost")
port = Setting(default=11434)
model = Setting(default="codellama:13b")
system_prompt = Setting(
default=(
"Based on the following user description, generate a corresponding Bash command. Focus solely "
"on interpreting the requirements and translating them into a single, executable Bash command. "
"Ensure accuracy and relevance to the user's description. The output should be a valid Bash "
"command that directly aligns with the user's intent, ready for execution in a command-line "
"environment. Output nothing except for the command. No code block, no English explanation, "
"no start/end tags."
)
)
@property
def endpoint(self) -> str:
# computed property because python descriptors need to be bound to an instance before access
return f"http://{self.host}:{self.port}/api/generate"
async def generate(self, prompt: str) -> AsyncIterator[str]:
request = GenerateRequest(self.model, prompt, system=self.system_prompt, stream=True)
data = dataclass_to_json(request)
try:
async with httpx.AsyncClient() as client:
async with client.stream("POST", self.endpoint, json=data, timeout=20.0) as stream:
async for line in stream.aiter_lines():
response = json.loads(line)
if "error" in response:
| from __future__ import annotations
def dataclass_to_json(obj: Any) -> dict[str, Any]:
"""Convert dataclass to a json dict
This function filters out 'None' values.
:param obj: the dataclass to serialize
:return: serialized dataclass
:raises TypeError: if obj is not a dataclass
"""
return {k: v for k, v in asdict(obj).items() if v is not None}
@dataclass
class GenerateRequest:
model: str
"""(required) the model name"""
prompt: str | None = None
"""the prompt to generate a response for"""
images: list[str] | None = None
"""a list of base64-encoded images (for multimodal models such as llava)"""
format: str | None = None
"""the format to return a response in. Currently the only accepted value is json"""
options: dict | None = None
"""additional model parameters listed in the documentation for the Modelfile such as temperature"""
system: str | None = None
"""system prompt to (overrides what is defined in the Modelfile)"""
template: str | None = None
"""the full prompt or prompt template (overrides what is defined in the Modelfile)"""
context: str | None = None
"""the context parameter returned from a previous request to /generate, this can be used to keep a short
conversational memory"""
stream: bool | None = None
"""if false the response will be returned as a single response object, rather than a stream of objects"""
raw: bool | None = None
"""if true no formatting will be applied to the prompt and no context will be returned. You may choose to use
the raw parameter if you are specifying a full templated prompt in your request to the API, and are managing
history yourself. JSON mode"""
class Ollama(Provider):
name = "Ollama"
host = Setting(default="localhost")
port = Setting(default=11434)
model = Setting(default="codellama:13b")
system_prompt = Setting(
default=(
"Based on the following user description, generate a corresponding Bash command. Focus solely "
"on interpreting the requirements and translating them into a single, executable Bash command. "
"Ensure accuracy and relevance to the user's description. The output should be a valid Bash "
"command that directly aligns with the user's intent, ready for execution in a command-line "
"environment. Output nothing except for the command. No code block, no English explanation, "
"no start/end tags."
)
)
@property
def endpoint(self) -> str:
# computed property because python descriptors need to be bound to an instance before access
return f"http://{self.host}:{self.port}/api/generate"
async def generate(self, prompt: str) -> AsyncIterator[str]:
request = GenerateRequest(self.model, prompt, system=self.system_prompt, stream=True)
data = dataclass_to_json(request)
try:
async with httpx.AsyncClient() as client:
async with client.stream("POST", self.endpoint, json=data, timeout=20.0) as stream:
async for line in stream.aiter_lines():
response = json.loads(line)
if "error" in response: | raise ProviderError(response["error"]) | 1 | 2023-12-11 20:23:31+00:00 | 2k |
juniberry/PacketIRC | packetirc.py | [
{
"identifier": "LOG_FILE",
"path": "settings.py",
"snippet": "LOG_FILE = \"packetirc.log\""
},
{
"identifier": "LOG_LEVEL",
"path": "settings.py",
"snippet": "LOG_LEVEL = logging.INFO"
},
{
"identifier": "SERVER",
"path": "settings.py",
"snippet": "SERVER = \"\""
},
... | import socket
import threading
import random
import time
import logging
import re
import irc.client
import os
import sys
from settings import LOG_FILE, LOG_LEVEL, SERVER, PORT, PASS, CHANNEL, HIDE_SERVER, MAX_RETRIES, RETRY_DELAY, HELP_INFO, WELCOME_MESSAGE, BAD_WORDS_FILE, BAD_WORDS_FILTER | 666 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
______ _ _____ ______ ______
(_____ \ | | _ (_____|_____ \ / _____)
_____) )___ ____| | _ ____| |_ _ _____) ) /
| ____/ _ |/ ___) | / ) _ ) _) | | (_____ (| |
| | ( ( | ( (___| |< ( (/ /| |__ _| |_ | | \_____
|_| \_||_|\____)_| \_)____)\___|_____) |_|\______)
PacketIRC is a bandwidth-conscious IRC client specifically designed for packet radio communication.
It includes a client-side implementation with simplified IRC functionalities.
File: client.py
Author: Daria Juniper @juniberry
Date: 10-Dec-2023
Changes:
12-Dec-2023 - Initial version 1.0 beta.
"""
# Import settings from an external configuration file.
# Globals
VERSION = 'v1.1b'
BAD_WORDS = []
HOME_PATH = os.path.dirname(os.path.abspath(__file__)) # Grab home path for use with logging et al.
# State
is_running = True
# Initialize logging.
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
______ _ _____ ______ ______
(_____ \ | | _ (_____|_____ \ / _____)
_____) )___ ____| | _ ____| |_ _ _____) ) /
| ____/ _ |/ ___) | / ) _ ) _) | | (_____ (| |
| | ( ( | ( (___| |< ( (/ /| |__ _| |_ | | \_____
|_| \_||_|\____)_| \_)____)\___|_____) |_|\______)
PacketIRC is a bandwidth-conscious IRC client specifically designed for packet radio communication.
It includes a client-side implementation with simplified IRC functionalities.
File: client.py
Author: Daria Juniper @juniberry
Date: 10-Dec-2023
Changes:
12-Dec-2023 - Initial version 1.0 beta.
"""
# Import settings from an external configuration file.
# Globals
VERSION = 'v1.1b'
BAD_WORDS = []
HOME_PATH = os.path.dirname(os.path.abspath(__file__)) # Grab home path for use with logging et al.
# State
is_running = True
# Initialize logging. | logging.basicConfig(filename=os.path.join(HOME_PATH, LOG_FILE), filemode='w', level=LOG_LEVEL, format='%(asctime)s - %(levelname)s - %(message)s') | 0 | 2023-12-13 19:08:48+00:00 | 2k |
Tps-F/rvc-onnx-test | onnxlib/attentions.py | [
{
"identifier": "commons",
"path": "onnxlib/commons.py",
"snippet": "def init_weights(m, mean=0.0, std=0.01):\ndef get_padding(kernel_size, dilation=1):\ndef kl_divergence(m_p, logs_p, m_q, logs_q):\ndef rand_gumbel(shape):\ndef rand_gumbel_like(x):\ndef slice_segments(x, ids_str, segment_size=4):\ndef ... | import math
import torch
from typing import Optional
from torch import nn
from torch.nn import functional as F
from onnxlib import commons, modules
from onnxlib.modules import LayerNorm | 1,523 |
class Encoder(nn.Module):
def __init__(
self,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size=1,
p_dropout=0.0,
window_size=10,
**kwargs
):
super(Encoder, self).__init__()
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = int(n_layers)
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.window_size = window_size
self.drop = nn.Dropout(p_dropout)
self.attn_layers = nn.ModuleList()
self.norm_layers_1 = nn.ModuleList()
self.ffn_layers = nn.ModuleList()
self.norm_layers_2 = nn.ModuleList()
for i in range(self.n_layers):
self.attn_layers.append(
MultiHeadAttention(
hidden_channels,
hidden_channels,
n_heads,
p_dropout=p_dropout,
window_size=window_size,
)
)
|
class Encoder(nn.Module):
def __init__(
self,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size=1,
p_dropout=0.0,
window_size=10,
**kwargs
):
super(Encoder, self).__init__()
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = int(n_layers)
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.window_size = window_size
self.drop = nn.Dropout(p_dropout)
self.attn_layers = nn.ModuleList()
self.norm_layers_1 = nn.ModuleList()
self.ffn_layers = nn.ModuleList()
self.norm_layers_2 = nn.ModuleList()
for i in range(self.n_layers):
self.attn_layers.append(
MultiHeadAttention(
hidden_channels,
hidden_channels,
n_heads,
p_dropout=p_dropout,
window_size=window_size,
)
) | self.norm_layers_1.append(LayerNorm(hidden_channels)) | 2 | 2023-12-09 04:08:04+00:00 | 2k |
zhenqincn/FedKSeed | utils_data/load_data.py | [
{
"identifier": "DefaultToken",
"path": "utils_data/default_tokens.py",
"snippet": "class DefaultToken(Enum):\n PAD_TOKEN = \"[PAD]\"\n EOS_TOKEN = \"</s>\"\n BOS_TOKEN = \"<s>\"\n UNK_TOKEN = \"<unk>\"\n IGNORE_INDEX = -100"
},
{
"identifier": "partition_idx_labeldir",
"path"... | import numpy as np
import torch
from torch.utils.data import DataLoader, Subset
from transformers import AutoTokenizer
from utils_data.default_tokens import DefaultToken
from utils_data.partition_data import partition_idx_labeldir
from collections import Counter
from utils_data.llm_dataset import LLMDataset, LLMDataCollator
from utils_data.natural_instruction_loader import get_instruction_dataset | 862 |
def get_loaders(args, only_eval=False):
"""
Return: list of train_loaders, eval_loader
"""
tokenizer = AutoTokenizer.from_pretrained(args.model, use_fast=True)
tokenizer.model_max_length = args.max_length
special_tokens = dict()
if tokenizer.pad_token is None:
special_tokens["pad_token"] = DefaultToken.PAD_TOKEN.value
if tokenizer.eos_token is None:
special_tokens["eos_token"] = DefaultToken.EOS_TOKEN.value
if tokenizer.bos_token is None:
special_tokens["bos_token"] = DefaultToken.BOS_TOKEN.value
if tokenizer.unk_token is None:
special_tokens["unk_token"] = DefaultToken.UNK_TOKEN.value
tokenizer.add_special_tokens(special_tokens)
# Generation task
if args.dataset == 'dolly':
if args.eval_metric == 'loss':
raw_datasets = LLMDataset(args.dataset, tokenizer=tokenizer, generation=False)
else:
raw_datasets = LLMDataset(args.dataset, tokenizer=tokenizer, generation=True)
data_collator = LLMDataCollator(tokenizer=tokenizer)
# only use a subset of raw dataset
raw_datasets, _ = torch.utils.data.dataset.random_split(raw_datasets, [int(len(raw_datasets) * args.dataset_subsample), len(raw_datasets) - int(len(raw_datasets) * args.dataset_subsample)])
y_all = np.array([item['categories'] for item in raw_datasets])
index_eval = np.where(y_all == args.zerotask)[0]
# delete the indices of eval samples from the all set
index_train = np.delete(np.arange(len(y_all)), index_eval)
raw_datasets = np.array(raw_datasets)
train_set = raw_datasets[index_train]
eval_set = raw_datasets[index_eval]
y_train = np.array([item['categories'] for item in train_set])
counter = Counter(y_train)
noniid = args.iid
if 'dir' in noniid:
|
def get_loaders(args, only_eval=False):
"""
Return: list of train_loaders, eval_loader
"""
tokenizer = AutoTokenizer.from_pretrained(args.model, use_fast=True)
tokenizer.model_max_length = args.max_length
special_tokens = dict()
if tokenizer.pad_token is None:
special_tokens["pad_token"] = DefaultToken.PAD_TOKEN.value
if tokenizer.eos_token is None:
special_tokens["eos_token"] = DefaultToken.EOS_TOKEN.value
if tokenizer.bos_token is None:
special_tokens["bos_token"] = DefaultToken.BOS_TOKEN.value
if tokenizer.unk_token is None:
special_tokens["unk_token"] = DefaultToken.UNK_TOKEN.value
tokenizer.add_special_tokens(special_tokens)
# Generation task
if args.dataset == 'dolly':
if args.eval_metric == 'loss':
raw_datasets = LLMDataset(args.dataset, tokenizer=tokenizer, generation=False)
else:
raw_datasets = LLMDataset(args.dataset, tokenizer=tokenizer, generation=True)
data_collator = LLMDataCollator(tokenizer=tokenizer)
# only use a subset of raw dataset
raw_datasets, _ = torch.utils.data.dataset.random_split(raw_datasets, [int(len(raw_datasets) * args.dataset_subsample), len(raw_datasets) - int(len(raw_datasets) * args.dataset_subsample)])
y_all = np.array([item['categories'] for item in raw_datasets])
index_eval = np.where(y_all == args.zerotask)[0]
# delete the indices of eval samples from the all set
index_train = np.delete(np.arange(len(y_all)), index_eval)
raw_datasets = np.array(raw_datasets)
train_set = raw_datasets[index_train]
eval_set = raw_datasets[index_eval]
y_train = np.array([item['categories'] for item in train_set])
counter = Counter(y_train)
noniid = args.iid
if 'dir' in noniid: | split_dic = partition_idx_labeldir(y_train, n_parties=args.num_clients, alpha=float(noniid[3:]), num_classes=len(counter)) | 1 | 2023-12-08 02:58:31+00:00 | 2k |
merlresearch/PixPNet | pixpnet/optim.py | [
{
"identifier": "get_logger",
"path": "pixpnet/utils.py",
"snippet": "def get_logger(name):\n logging.basicConfig(\n format=\"%(asctime)s[%(process)d][%(levelname)s] %(message)s\",\n datefmt=\"%Y-%m-%dT%H:%M:%S\",\n )\n logger = logging.getLogger(name)\n logger.setLevel(os.envi... | import argparse
import inspect
import re
import torch
from typing import Any, Dict, Optional, Set, Tuple, Type
from pytorch_warmup import ExponentialWarmup
from pytorch_warmup.base import BaseWarmup
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiStepLR, StepLR
from pixpnet.utils import get_logger, intersect_func_and_kwargs | 760 | # Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL)
#
# SPDX-License-Identifier: AGPL-3.0-or-later
logger = get_logger(__name__)
_OPTIMIZER_MAP = {attr: getattr(torch.optim, attr) for attr in dir(torch.optim) if attr != "Optimizer"}
_OPTIMIZER_MAP = {attr: cls for attr, cls in _OPTIMIZER_MAP.items() if inspect.isclass(cls)}
_LOOSE_OPTIMIZER_MAP = {}
for _attr, _cls in _OPTIMIZER_MAP.items():
_attr_split = re.split(r"(?=(?<!^)[A-Z][a-z]|(?<![A-Z])[A-Z]$)", _attr)
_attr_lower = "".join(map(str.lower, _attr_split))
_attr_lower_ = "_".join(map(str.lower, _attr_split))
if _attr_lower in _LOOSE_OPTIMIZER_MAP or _attr_lower_ in _LOOSE_OPTIMIZER_MAP:
_cls_existing = _LOOSE_OPTIMIZER_MAP.get(_attr_lower, _LOOSE_OPTIMIZER_MAP.get(_attr_lower_))
raise RuntimeError(
f"{_attr_lower} already in optimizers! Overlapping class names in "
f"lowercase was unexpected and cannot be resolved: "
f"{_cls_existing} and {_cls}"
)
_LOOSE_OPTIMIZER_MAP[_attr_lower] = _cls
if _attr_lower != _attr_lower_:
_LOOSE_OPTIMIZER_MAP[_attr_lower_] = _cls
def get_optimizer_cls(
config: argparse.Namespace,
ignore: Optional[Set[str]] = None,
) -> Tuple[Type[torch.optim.Optimizer], Dict[str, Any]]:
if ignore is None:
ignore = set()
try:
optimizer_cls = _LOOSE_OPTIMIZER_MAP[config.optimizer.name.lower()]
except KeyError:
raise ValueError(f'No such optimizer "{config.optimizer.name}"')
| # Copyright (c) 2022-2023 Mitsubishi Electric Research Laboratories (MERL)
#
# SPDX-License-Identifier: AGPL-3.0-or-later
logger = get_logger(__name__)
_OPTIMIZER_MAP = {attr: getattr(torch.optim, attr) for attr in dir(torch.optim) if attr != "Optimizer"}
_OPTIMIZER_MAP = {attr: cls for attr, cls in _OPTIMIZER_MAP.items() if inspect.isclass(cls)}
_LOOSE_OPTIMIZER_MAP = {}
for _attr, _cls in _OPTIMIZER_MAP.items():
_attr_split = re.split(r"(?=(?<!^)[A-Z][a-z]|(?<![A-Z])[A-Z]$)", _attr)
_attr_lower = "".join(map(str.lower, _attr_split))
_attr_lower_ = "_".join(map(str.lower, _attr_split))
if _attr_lower in _LOOSE_OPTIMIZER_MAP or _attr_lower_ in _LOOSE_OPTIMIZER_MAP:
_cls_existing = _LOOSE_OPTIMIZER_MAP.get(_attr_lower, _LOOSE_OPTIMIZER_MAP.get(_attr_lower_))
raise RuntimeError(
f"{_attr_lower} already in optimizers! Overlapping class names in "
f"lowercase was unexpected and cannot be resolved: "
f"{_cls_existing} and {_cls}"
)
_LOOSE_OPTIMIZER_MAP[_attr_lower] = _cls
if _attr_lower != _attr_lower_:
_LOOSE_OPTIMIZER_MAP[_attr_lower_] = _cls
def get_optimizer_cls(
config: argparse.Namespace,
ignore: Optional[Set[str]] = None,
) -> Tuple[Type[torch.optim.Optimizer], Dict[str, Any]]:
if ignore is None:
ignore = set()
try:
optimizer_cls = _LOOSE_OPTIMIZER_MAP[config.optimizer.name.lower()]
except KeyError:
raise ValueError(f'No such optimizer "{config.optimizer.name}"') | hparams, invalid_keys = intersect_func_and_kwargs( | 1 | 2023-12-06 23:49:31+00:00 | 2k |
dhh1995/MeGraph | megraph/args_utils.py | [
{
"identifier": "get_default_config",
"path": "megraph/io_utils.py",
"snippet": "def get_default_config(args):\n dataset_name = args.dataset_name\n dataset_subname = args.dataset_subname\n model_name = args.model\n conv_name = args.layer\n\n # Config\n cfg_file = args.config_file\n ... | import git
from .io_utils import get_default_config, get_raw_cmdline | 704 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : args.py
# Author : Honghua Dong
# Email : dhh19951@gmail.com
#
# Distributed under terms of the MIT license.
__all__ = ["ArgsBuilder", "add_git_and_cmd_line_info", "get_args_and_model"]
class ArgsBuilder(object):
"""A meta-class to be inherit that support args register and setup from args"""
__hyperparams__ = []
__parser__ = None
__prefix__ = "--"
@classmethod
def _set_parser_and_prefix(cls, parser, prefix):
cls.__parser__ = parser
if prefix is None:
prefix = "--"
else:
prefix = f"--{prefix}-"
cls.__prefix__ = prefix
@classmethod
def _add_argument(cls, name, *args, **kwargs):
cls.__hyperparams__.append(name)
name = name.replace("_", "-")
cls.__parser__.add_argument(cls.__prefix__ + name, *args, **kwargs)
@classmethod
def from_args(cls, args, prefix=None, **kwargs):
if prefix is None:
prefix = ""
else:
prefix = str(prefix) + "_"
print(f"From Args: {cls.__name__} with {kwargs}")
init_params = {k: getattr(args, prefix + k) for k in cls.__hyperparams__}
init_params.update(kwargs)
return cls(**init_params)
def add_git_and_cmd_line_info(args):
| #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : args.py
# Author : Honghua Dong
# Email : dhh19951@gmail.com
#
# Distributed under terms of the MIT license.
__all__ = ["ArgsBuilder", "add_git_and_cmd_line_info", "get_args_and_model"]
class ArgsBuilder(object):
"""A meta-class to be inherit that support args register and setup from args"""
__hyperparams__ = []
__parser__ = None
__prefix__ = "--"
@classmethod
def _set_parser_and_prefix(cls, parser, prefix):
cls.__parser__ = parser
if prefix is None:
prefix = "--"
else:
prefix = f"--{prefix}-"
cls.__prefix__ = prefix
@classmethod
def _add_argument(cls, name, *args, **kwargs):
cls.__hyperparams__.append(name)
name = name.replace("_", "-")
cls.__parser__.add_argument(cls.__prefix__ + name, *args, **kwargs)
@classmethod
def from_args(cls, args, prefix=None, **kwargs):
if prefix is None:
prefix = ""
else:
prefix = str(prefix) + "_"
print(f"From Args: {cls.__name__} with {kwargs}")
init_params = {k: getattr(args, prefix + k) for k in cls.__hyperparams__}
init_params.update(kwargs)
return cls(**init_params)
def add_git_and_cmd_line_info(args): | args.raw_cmdline = get_raw_cmdline() | 1 | 2023-12-12 04:17:13+00:00 | 2k |
SJTU-Quant/SUNNY-GNN | main.py | [
{
"identifier": "train_baseline",
"path": "train/train_baseline.py",
"snippet": "def train(cfg):\ndef train_explain(cfg):"
},
{
"identifier": "train_gnn",
"path": "train/train_gnn.py",
"snippet": "def train(cfg):"
},
{
"identifier": "train_hgn",
"path": "train/train_hgn.py",
... | import argparse
import yaml
import os
import torch
import random
import copy
import numpy as np
from train import train_baseline, train_gnn, train_hgn
from tools.get_data import get_dataset | 847 |
def parse_args():
parser = argparse.ArgumentParser(description="Self-explainable GNN/HGN")
parser.add_argument('--method', type=str, default='snexgnn',
help='self-explainable GNN/HGN type',
choices=['snexgnn', 'snexhgn', 'gat', 'gcn', 'simplehgn'])
parser.add_argument('--encoder', type=str, default='gat',
help='GNN/HGN encoder type',
choices=['gat', 'gcn', 'simplehgn'])
parser.add_argument('--dataset', type=str, default='citeseer',
help='dataset name',
choices=['citeseer', 'cora', 'pubmed',
'amazon-photo', 'coauthor-physics', 'coauthor-cs',
'imdb', 'dblp', 'acm'])
parser.add_argument('--gpu', type=int, default=0, help='gpu id')
parser.add_argument('--num_seeds', type=int, default=1, help='number of random seeds')
parser.add_argument('--eval_explanation', type=bool, default=False,
help='whether to evaluate explanation fidelity')
return parser.parse_args()
class Config(object):
def __init__(self, args):
abs_dir = os.path.dirname(os.path.realpath(__file__))
log_dir = os.path.join(abs_dir, 'log')
os.makedirs(log_dir, exist_ok=True)
data_dir = os.path.join(abs_dir, 'dataset', args.dataset)
self.method = args.method
self.encoder_type = args.encoder
self.dataset = args.dataset
self.abs_dir = abs_dir
self.data_dir = data_dir
self.gpu = args.gpu
self.index = None
self.graph_path = f'{data_dir}/{args.dataset}_graph.bin'
self.index_path = f'{data_dir}/{args.dataset}_index.bin'
self.check_dataset()
self.ckpt_dir = os.path.join(abs_dir, 'ckpt')
self.hyparams = self.load_hyperparams(args)
self.eval_explanation = args.eval_explanation
def check_dataset(self):
if not os.path.exists(self.graph_path):
get_dataset(self.dataset, self.data_dir)
def load_hyperparams(self, args):
yml_path = os.path.join(self.abs_dir, 'configs', f'{args.dataset}.yml')
with open(yml_path, 'r') as f:
hyperparams = yaml.load(f, Loader=yaml.FullLoader)
return hyperparams
def set_seed(self, seed):
self.seed = seed
self.encoder_path = f'{self.ckpt_dir}/{self.dataset}/{self.encoder_type}-seed-{seed}-pretrain.pt'
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def main():
results = {}
for seed in range(args.num_seeds):
setup_seed(seed)
cfg.set_seed(seed)
print(f'===========seed: {seed}===========')
if cfg.method in ['snexgnn', 'snexhgn']:
print(f"Dataset: {cfg.dataset}, Method: {cfg.method}-{cfg.encoder_type}")
if not os.path.exists(cfg.encoder_path):
print(f"Pretrain {cfg.encoder_type}...")
cfg_cp = copy.deepcopy(cfg)
cfg_cp.method = cfg_cp.encoder_type
|
def parse_args():
parser = argparse.ArgumentParser(description="Self-explainable GNN/HGN")
parser.add_argument('--method', type=str, default='snexgnn',
help='self-explainable GNN/HGN type',
choices=['snexgnn', 'snexhgn', 'gat', 'gcn', 'simplehgn'])
parser.add_argument('--encoder', type=str, default='gat',
help='GNN/HGN encoder type',
choices=['gat', 'gcn', 'simplehgn'])
parser.add_argument('--dataset', type=str, default='citeseer',
help='dataset name',
choices=['citeseer', 'cora', 'pubmed',
'amazon-photo', 'coauthor-physics', 'coauthor-cs',
'imdb', 'dblp', 'acm'])
parser.add_argument('--gpu', type=int, default=0, help='gpu id')
parser.add_argument('--num_seeds', type=int, default=1, help='number of random seeds')
parser.add_argument('--eval_explanation', type=bool, default=False,
help='whether to evaluate explanation fidelity')
return parser.parse_args()
class Config(object):
def __init__(self, args):
abs_dir = os.path.dirname(os.path.realpath(__file__))
log_dir = os.path.join(abs_dir, 'log')
os.makedirs(log_dir, exist_ok=True)
data_dir = os.path.join(abs_dir, 'dataset', args.dataset)
self.method = args.method
self.encoder_type = args.encoder
self.dataset = args.dataset
self.abs_dir = abs_dir
self.data_dir = data_dir
self.gpu = args.gpu
self.index = None
self.graph_path = f'{data_dir}/{args.dataset}_graph.bin'
self.index_path = f'{data_dir}/{args.dataset}_index.bin'
self.check_dataset()
self.ckpt_dir = os.path.join(abs_dir, 'ckpt')
self.hyparams = self.load_hyperparams(args)
self.eval_explanation = args.eval_explanation
def check_dataset(self):
if not os.path.exists(self.graph_path):
get_dataset(self.dataset, self.data_dir)
def load_hyperparams(self, args):
yml_path = os.path.join(self.abs_dir, 'configs', f'{args.dataset}.yml')
with open(yml_path, 'r') as f:
hyperparams = yaml.load(f, Loader=yaml.FullLoader)
return hyperparams
def set_seed(self, seed):
self.seed = seed
self.encoder_path = f'{self.ckpt_dir}/{self.dataset}/{self.encoder_type}-seed-{seed}-pretrain.pt'
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def main():
results = {}
for seed in range(args.num_seeds):
setup_seed(seed)
cfg.set_seed(seed)
print(f'===========seed: {seed}===========')
if cfg.method in ['snexgnn', 'snexhgn']:
print(f"Dataset: {cfg.dataset}, Method: {cfg.method}-{cfg.encoder_type}")
if not os.path.exists(cfg.encoder_path):
print(f"Pretrain {cfg.encoder_type}...")
cfg_cp = copy.deepcopy(cfg)
cfg_cp.method = cfg_cp.encoder_type | train_gnn.train(cfg_cp) | 1 | 2023-12-12 02:46:00+00:00 | 2k |
dvmazur/mixtral-offloading | src/expert_wrapper.py | [
{
"identifier": "nested_flatten",
"path": "src/utils.py",
"snippet": "def nested_flatten(t):\n \"\"\"\n Turn nested list/tuple/dict into a flat iterator.\n \"\"\"\n if isinstance(t, (list, tuple)):\n for x in t:\n yield from nested_flatten(x)\n elif isinstance(t, dict):\... | import typing as tp
import torch
from torch import nn
from .utils import nested_flatten, nested_pack | 742 |
class MixtralExpertWrapper(nn.Module):
def __init__(
self,
expert_module: tp.Any,
device: torch.device,
):
super().__init__()
expert_module, self.storage = self.replace_layer_storage(expert_module, device)
self.expert_module = lambda *args, **kwargs: expert_module(*args, **kwargs)
self._register_state_dict_hook(self._add_storage_to_state_dict_hook)
self._register_load_state_dict_pre_hook(self._load_storage_from_state_dict_hook)
@staticmethod
def _add_storage_to_state_dict_hook(self, state_dict, prefix, local_metadata):
state_dict[prefix + 'storage'] = torch.as_tensor(self.storage, dtype=torch.uint8)
return state_dict
def _load_storage_from_state_dict_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
self.storage.copy_(state_dict[prefix + 'storage'].storage().untyped())
del state_dict[prefix + 'storage']
def forward(self, *args, **kwargs):
return self.expert_module(*args, **kwargs)
@staticmethod
def replace_layer_storage(
layer: tp.Any,
device: torch.device,
):
state_dict = {
f"w{i}": {
"W_q": getattr(layer, f"w{i}").W_q,
"meta": getattr(layer, f"w{i}").meta,
"bias": getattr(layer, f"w{i}").bias,
}
for i in range(1, 4)
}
storage_size = 0
offsets = [0]
for x in nested_flatten(state_dict):
if not isinstance(x, torch.Tensor):
continue
storage_size += x.nbytes
offsets.append(storage_size)
storage = torch.UntypedStorage(storage_size, device=device)
i = 0
new_flattened_states = list()
for x in nested_flatten(state_dict):
if not isinstance(x, torch.Tensor):
new_flattened_states.append(x)
continue
start = offsets[i]
end = offsets[i + 1]
a_view = torch.as_tensor(storage[start:end], dtype=x.dtype, device=device).view(x.shape)
a_view[...] = x
assert a_view.data_ptr() == storage.data_ptr() + start
i += 1
new_flattened_states.append(a_view)
|
class MixtralExpertWrapper(nn.Module):
def __init__(
self,
expert_module: tp.Any,
device: torch.device,
):
super().__init__()
expert_module, self.storage = self.replace_layer_storage(expert_module, device)
self.expert_module = lambda *args, **kwargs: expert_module(*args, **kwargs)
self._register_state_dict_hook(self._add_storage_to_state_dict_hook)
self._register_load_state_dict_pre_hook(self._load_storage_from_state_dict_hook)
@staticmethod
def _add_storage_to_state_dict_hook(self, state_dict, prefix, local_metadata):
state_dict[prefix + 'storage'] = torch.as_tensor(self.storage, dtype=torch.uint8)
return state_dict
def _load_storage_from_state_dict_hook(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
self.storage.copy_(state_dict[prefix + 'storage'].storage().untyped())
del state_dict[prefix + 'storage']
def forward(self, *args, **kwargs):
return self.expert_module(*args, **kwargs)
@staticmethod
def replace_layer_storage(
layer: tp.Any,
device: torch.device,
):
state_dict = {
f"w{i}": {
"W_q": getattr(layer, f"w{i}").W_q,
"meta": getattr(layer, f"w{i}").meta,
"bias": getattr(layer, f"w{i}").bias,
}
for i in range(1, 4)
}
storage_size = 0
offsets = [0]
for x in nested_flatten(state_dict):
if not isinstance(x, torch.Tensor):
continue
storage_size += x.nbytes
offsets.append(storage_size)
storage = torch.UntypedStorage(storage_size, device=device)
i = 0
new_flattened_states = list()
for x in nested_flatten(state_dict):
if not isinstance(x, torch.Tensor):
new_flattened_states.append(x)
continue
start = offsets[i]
end = offsets[i + 1]
a_view = torch.as_tensor(storage[start:end], dtype=x.dtype, device=device).view(x.shape)
a_view[...] = x
assert a_view.data_ptr() == storage.data_ptr() + start
i += 1
new_flattened_states.append(a_view)
| state_dict = nested_pack(new_flattened_states, state_dict) | 1 | 2023-12-15 03:32:35+00:00 | 2k |
CircleRadon/Osprey | osprey/datasets/stage2_data.py | [
{
"identifier": "preprocess",
"path": "osprey/train/train.py",
"snippet": "def preprocess(\n sources: Sequence[str],\n tokenizer: transformers.PreTrainedTokenizer,\n has_image: bool = False\n) -> Dict:\n \"\"\"\n Given a list of sources, each is a conversation list. This transform:\n 1... | import copy
import os
import random
import numpy as np
import torch
from osprey.train.train import preprocess, preprocess_multimodal
from torch.utils.data import Dataset
from pycocotools.coco import COCO
from pycocotools import mask as maskUtils
from PIL import Image | 1,598 |
class CustomDataset(Dataset):
def __init__(self,
tokenizer=None,
data_args=None,
ann_file=None,
img_prefix=None,
max_gt_per_img=20,
):
self.data_args = data_args
self.tokenizer = tokenizer
self.max_gt_per_img = max_gt_per_img
self.img_prefix = img_prefix
self.data_infos = self.load_annotations(ann_file)
super().__init__()
def __len__(self):
return len(self.data_infos)
def load_annotations(self, ann_file):
self.coco = COCO(ann_file)
self.img_ids = self.coco.getImgIds()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.loadImgs([i])[0]
info['filename'] = info['file_name']
info['height'] = int(info['height'])
info['width'] = int(info['width'])
ann_ids = self.coco.getAnnIds(imgIds=[i])
ann_info = self.coco.loadAnns(ann_ids)
if len(ann_info)==0:
continue
data_infos.append(info)
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_ann_info(self, idx):
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
return ann_info
def annToMask(self, mask_ann, h, w):
if isinstance(mask_ann, list):
rles = maskUtils.frPyObjects(mask_ann, h, w)
rle = maskUtils.merge(rles)
elif isinstance(mask_ann['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(mask_ann, h, w)
else:
# rle
rle = mask_ann
mask = maskUtils.decode(rle)
return mask
def process_text(self, data_item):
image = data_item['img']
ori_labels = data_item['gt_labels']
ori_masks = np.array(data_item['gt_masks'])
ori_masks = torch.from_numpy(ori_masks)
shuffle_ids = torch.randperm(len(ori_labels))
if len(shuffle_ids) > self.max_gt_per_img:
shuffle_ids = shuffle_ids[:self.max_gt_per_img]
ori_masks = ori_masks[shuffle_ids]
ori_labels = [ori_labels[i] for i in shuffle_ids]
sources = dict()
sources['conversations'] = []
# print("num:",len(ori_labels))
for i in range(len(ori_labels)):
question = '<region>'
question = question.replace('<region>', '<mask><pos>')
if i == 0:
question = self.begin_str + question
answer = ori_labels[i]
sources['conversations'].append(
{'from': 'human', 'value': question})
sources['conversations'].append({'from': 'gpt', 'value': answer})
cur_token_len = (image.shape[1] // 16) * (image.shape[2] // 16)
assert image.shape[1] == image.shape[2]
# a hard code [] for sources
sources = preprocess_multimodal(
copy.deepcopy([sources['conversations']]),
self.data_args,
cur_token_len)
# print(sources)
|
class CustomDataset(Dataset):
def __init__(self,
tokenizer=None,
data_args=None,
ann_file=None,
img_prefix=None,
max_gt_per_img=20,
):
self.data_args = data_args
self.tokenizer = tokenizer
self.max_gt_per_img = max_gt_per_img
self.img_prefix = img_prefix
self.data_infos = self.load_annotations(ann_file)
super().__init__()
def __len__(self):
return len(self.data_infos)
def load_annotations(self, ann_file):
self.coco = COCO(ann_file)
self.img_ids = self.coco.getImgIds()
data_infos = []
total_ann_ids = []
for i in self.img_ids:
info = self.coco.loadImgs([i])[0]
info['filename'] = info['file_name']
info['height'] = int(info['height'])
info['width'] = int(info['width'])
ann_ids = self.coco.getAnnIds(imgIds=[i])
ann_info = self.coco.loadAnns(ann_ids)
if len(ann_info)==0:
continue
data_infos.append(info)
total_ann_ids.extend(ann_ids)
assert len(set(total_ann_ids)) == len(
total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_ann_info(self, idx):
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
ann_info = self.coco.loadAnns(ann_ids)
return ann_info
def annToMask(self, mask_ann, h, w):
if isinstance(mask_ann, list):
rles = maskUtils.frPyObjects(mask_ann, h, w)
rle = maskUtils.merge(rles)
elif isinstance(mask_ann['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(mask_ann, h, w)
else:
# rle
rle = mask_ann
mask = maskUtils.decode(rle)
return mask
def process_text(self, data_item):
image = data_item['img']
ori_labels = data_item['gt_labels']
ori_masks = np.array(data_item['gt_masks'])
ori_masks = torch.from_numpy(ori_masks)
shuffle_ids = torch.randperm(len(ori_labels))
if len(shuffle_ids) > self.max_gt_per_img:
shuffle_ids = shuffle_ids[:self.max_gt_per_img]
ori_masks = ori_masks[shuffle_ids]
ori_labels = [ori_labels[i] for i in shuffle_ids]
sources = dict()
sources['conversations'] = []
# print("num:",len(ori_labels))
for i in range(len(ori_labels)):
question = '<region>'
question = question.replace('<region>', '<mask><pos>')
if i == 0:
question = self.begin_str + question
answer = ori_labels[i]
sources['conversations'].append(
{'from': 'human', 'value': question})
sources['conversations'].append({'from': 'gpt', 'value': answer})
cur_token_len = (image.shape[1] // 16) * (image.shape[2] // 16)
assert image.shape[1] == image.shape[2]
# a hard code [] for sources
sources = preprocess_multimodal(
copy.deepcopy([sources['conversations']]),
self.data_args,
cur_token_len)
# print(sources)
| data_dict = preprocess( | 0 | 2023-12-17 16:21:45+00:00 | 2k |
open-mmlab/PIA | animatediff/data/dataset.py | [
{
"identifier": "zero_rank_print",
"path": "animatediff/utils/util.py",
"snippet": "def zero_rank_print(s):\n if (not dist.is_initialized()) or (dist.is_initialized() and dist.get_rank() == 0): print(\"### \" + s)"
},
{
"identifier": "detect_edges",
"path": "animatediff/utils/util.py",
... | import os, io, csv, math, random
import numpy as np
import torch
import torchvision.transforms as transforms
import cv2
from einops import rearrange
from decord import VideoReader
from torch.utils.data.dataset import Dataset
from animatediff.utils.util import zero_rank_print, detect_edges | 851 |
def get_score(video_data,
cond_frame_idx,
weight=[1.0, 1.0, 1.0, 1.0],
use_edge=True):
"""
Similar to get_score under utils/util.py/detect_edges
"""
"""
the shape of video_data is f c h w, np.ndarray
"""
h, w = video_data.shape[1], video_data.shape[2]
cond_frame = video_data[cond_frame_idx]
cond_hsv_list = list(
cv2.split(
cv2.cvtColor(cond_frame.astype(np.float32), cv2.COLOR_RGB2HSV)))
if use_edge:
cond_frame_lum = cond_hsv_list[-1]
cond_frame_edge = detect_edges(cond_frame_lum.astype(np.uint8))
cond_hsv_list.append(cond_frame_edge)
score_sum = []
for frame_idx in range(video_data.shape[0]):
frame = video_data[frame_idx]
hsv_list = list(
cv2.split(cv2.cvtColor(frame.astype(np.float32),
cv2.COLOR_RGB2HSV)))
if use_edge:
frame_img_lum = hsv_list[-1]
frame_img_edge = detect_edges(lum=frame_img_lum.astype(np.uint8))
hsv_list.append(frame_img_edge)
hsv_diff = [
np.abs(hsv_list[c] - cond_hsv_list[c]) for c in range(len(weight))
]
hsv_mse = [np.sum(hsv_diff[c]) * weight[c] for c in range(len(weight))]
score_sum.append(sum(hsv_mse) / (h * w) / (sum(weight)))
return score_sum
class WebVid10M(Dataset):
def __init__(
self,
csv_path, video_folder,
sample_size=256, sample_stride=4, sample_n_frames=16,
is_image=False,
):
|
def get_score(video_data,
cond_frame_idx,
weight=[1.0, 1.0, 1.0, 1.0],
use_edge=True):
"""
Similar to get_score under utils/util.py/detect_edges
"""
"""
the shape of video_data is f c h w, np.ndarray
"""
h, w = video_data.shape[1], video_data.shape[2]
cond_frame = video_data[cond_frame_idx]
cond_hsv_list = list(
cv2.split(
cv2.cvtColor(cond_frame.astype(np.float32), cv2.COLOR_RGB2HSV)))
if use_edge:
cond_frame_lum = cond_hsv_list[-1]
cond_frame_edge = detect_edges(cond_frame_lum.astype(np.uint8))
cond_hsv_list.append(cond_frame_edge)
score_sum = []
for frame_idx in range(video_data.shape[0]):
frame = video_data[frame_idx]
hsv_list = list(
cv2.split(cv2.cvtColor(frame.astype(np.float32),
cv2.COLOR_RGB2HSV)))
if use_edge:
frame_img_lum = hsv_list[-1]
frame_img_edge = detect_edges(lum=frame_img_lum.astype(np.uint8))
hsv_list.append(frame_img_edge)
hsv_diff = [
np.abs(hsv_list[c] - cond_hsv_list[c]) for c in range(len(weight))
]
hsv_mse = [np.sum(hsv_diff[c]) * weight[c] for c in range(len(weight))]
score_sum.append(sum(hsv_mse) / (h * w) / (sum(weight)))
return score_sum
class WebVid10M(Dataset):
def __init__(
self,
csv_path, video_folder,
sample_size=256, sample_stride=4, sample_n_frames=16,
is_image=False,
): | zero_rank_print(f"loading annotations from {csv_path} ...") | 0 | 2023-12-21 03:29:34+00:00 | 2k |
VikParuchuri/texify | ocr_image.py | [
{
"identifier": "batch_inference",
"path": "texify/inference.py",
"snippet": "def batch_inference(images, model, processor, temperature=settings.TEMPERATURE, max_tokens=settings.MAX_TOKENS):\n images = [image.convert(\"RGB\") for image in images]\n encodings = processor(images=images, return_tenso... | import argparse
import os.path
import json
from texify.inference import batch_inference
from texify.model.model import load_model
from texify.model.processor import load_processor
from PIL import Image
from texify.output import replace_katex_invalid
from texify.settings import settings
from texify.util import is_valid_image | 1,160 |
def inference_single_image(image_path, json_path, model, processor, katex_compatible=False):
image = Image.open(image_path)
text = batch_inference([image], model, processor)
if katex_compatible:
text = [replace_katex_invalid(t) for t in text]
write_data = [{"image_path": image_path, "text": text[0]}]
with open(json_path, "w+") as f:
json_repr = json.dumps(write_data, indent=4)
f.write(json_repr)
def inference_image_dir(image_dir, json_path, model, processor, max=None, katex_compatible=False):
image_paths = [os.path.join(image_dir, image_name) for image_name in os.listdir(image_dir)]
|
def inference_single_image(image_path, json_path, model, processor, katex_compatible=False):
image = Image.open(image_path)
text = batch_inference([image], model, processor)
if katex_compatible:
text = [replace_katex_invalid(t) for t in text]
write_data = [{"image_path": image_path, "text": text[0]}]
with open(json_path, "w+") as f:
json_repr = json.dumps(write_data, indent=4)
f.write(json_repr)
def inference_image_dir(image_dir, json_path, model, processor, max=None, katex_compatible=False):
image_paths = [os.path.join(image_dir, image_name) for image_name in os.listdir(image_dir)] | image_paths = [ip for ip in image_paths if is_valid_image(ip)] | 5 | 2023-12-18 22:59:58+00:00 | 2k |
dcharatan/pixelsplat | src/visualization/drawing/points.py | [
{
"identifier": "generate_conversions",
"path": "src/visualization/drawing/coordinate_conversion.py",
"snippet": "def generate_conversions(\n shape: tuple[int, int],\n device: torch.device,\n x_range: Optional[Pair] = None,\n y_range: Optional[Pair] = None,\n) -> tuple[\n ConversionFuncti... | from typing import Optional
from einops import repeat
from jaxtyping import Float
from torch import Tensor
from .coordinate_conversion import generate_conversions
from .rendering import render_over_image
from .types import Pair, Scalar, Vector, sanitize_scalar, sanitize_vector
import torch | 839 |
def draw_points(
image: Float[Tensor, "3 height width"],
points: Vector,
color: Vector = [1, 1, 1],
radius: Scalar = 1,
inner_radius: Scalar = 0,
num_msaa_passes: int = 1,
x_range: Optional[Pair] = None,
y_range: Optional[Pair] = None,
) -> Float[Tensor, "3 height width"]:
device = image.device
points = sanitize_vector(points, 2, device)
color = sanitize_vector(color, 3, device)
radius = sanitize_scalar(radius, device)
inner_radius = sanitize_scalar(inner_radius, device)
(num_points,) = torch.broadcast_shapes(
points.shape[0],
color.shape[0],
radius.shape,
inner_radius.shape,
)
# Convert world-space points to pixel space.
_, h, w = image.shape
|
def draw_points(
image: Float[Tensor, "3 height width"],
points: Vector,
color: Vector = [1, 1, 1],
radius: Scalar = 1,
inner_radius: Scalar = 0,
num_msaa_passes: int = 1,
x_range: Optional[Pair] = None,
y_range: Optional[Pair] = None,
) -> Float[Tensor, "3 height width"]:
device = image.device
points = sanitize_vector(points, 2, device)
color = sanitize_vector(color, 3, device)
radius = sanitize_scalar(radius, device)
inner_radius = sanitize_scalar(inner_radius, device)
(num_points,) = torch.broadcast_shapes(
points.shape[0],
color.shape[0],
radius.shape,
inner_radius.shape,
)
# Convert world-space points to pixel space.
_, h, w = image.shape | world_to_pixel, _ = generate_conversions((h, w), device, x_range, y_range) | 0 | 2023-12-20 19:45:59+00:00 | 2k |
nianhua99/PandoraNext-Helper | share/share.py | [
{
"identifier": "db",
"path": "model.py",
"snippet": "class User(db.Model):\n def keys(self):\n def __getitem__(self, item):\n def __repr__(self):"
},
{
"identifier": "share_tools",
"path": "util/share_tools.py",
"snippet": "def get_host():\ndef get_share_token(access_token, uni... | import json
from datetime import datetime
from flask import Blueprint, request
from flask_jwt_extended import jwt_required
from loguru import logger
from sqlalchemy import and_, text
from model import db, User
from util import share_tools
from util.api_response import ApiResponse
from util.pandora_tools import sync_pandora | 680 |
share_bp = Blueprint('share_bp', __name__)
def account2share(accounts):
shares = []
for account in accounts:
_share_list = json.loads(account.share_list)
for share in _share_list:
share['email'] = account.email
share['account_id'] = account.id
shares.append(share)
return shares
@share_bp.route('/list')
@jwt_required()
def share_list():
accounts = db.session.query(User).all()
return ApiResponse.success(account2share(accounts))
@share_bp.route('/search', methods=['POST'])
@jwt_required()
def search():
# 根据email和unique_name模糊搜索
email = request.json.get('email')
unique_name = request.json.get('unique_name')
accounts = db.session.query(User).filter(and_(User.email.like(f'%{email}%') if email else text(''), User.share_list.like(f'%{unique_name}%') if unique_name else text(''))).all()
shares = account2share(accounts)
if unique_name:
shares = list(filter(lambda x: unique_name in x['unique_name'], shares))
return ApiResponse.success(shares)
@share_bp.route('/add', methods=['POST'])
@jwt_required()
def share_add():
account_id = request.json.get('account_id')
unique_name = request.json.get('unique_name')
password = request.json.get('password')
comment = request.form.get('comment')
account = db.session.query(User).filter_by(id=account_id).first()
if account:
if not account.access_token:
return ApiResponse.error('请先登录账号')
else:
try:
|
share_bp = Blueprint('share_bp', __name__)
def account2share(accounts):
shares = []
for account in accounts:
_share_list = json.loads(account.share_list)
for share in _share_list:
share['email'] = account.email
share['account_id'] = account.id
shares.append(share)
return shares
@share_bp.route('/list')
@jwt_required()
def share_list():
accounts = db.session.query(User).all()
return ApiResponse.success(account2share(accounts))
@share_bp.route('/search', methods=['POST'])
@jwt_required()
def search():
# 根据email和unique_name模糊搜索
email = request.json.get('email')
unique_name = request.json.get('unique_name')
accounts = db.session.query(User).filter(and_(User.email.like(f'%{email}%') if email else text(''), User.share_list.like(f'%{unique_name}%') if unique_name else text(''))).all()
shares = account2share(accounts)
if unique_name:
shares = list(filter(lambda x: unique_name in x['unique_name'], shares))
return ApiResponse.success(shares)
@share_bp.route('/add', methods=['POST'])
@jwt_required()
def share_add():
account_id = request.json.get('account_id')
unique_name = request.json.get('unique_name')
password = request.json.get('password')
comment = request.form.get('comment')
account = db.session.query(User).filter_by(id=account_id).first()
if account:
if not account.access_token:
return ApiResponse.error('请先登录账号')
else:
try: | res = share_tools.get_share_token(account.access_token, unique_name) | 1 | 2023-12-18 13:18:50+00:00 | 2k |
shroominic/fastui-chat | src/fastui_chat/chat.py | [
{
"identifier": "ChatInputForm",
"path": "src/fastui_chat/components.py",
"snippet": "class ChatInputForm(c.Form):\n \"\"\"\n Component for displaying a chat input form.\n \"\"\"\n\n fire_page_event: str\n display_mode: str = \"inline\"\n class_name: str = \"row row-cols-lg-3 justify-c... | from typing import Annotated, AsyncIterable
from fastapi import APIRouter, Depends, Form
from fastapi.responses import StreamingResponse
from fastui import AnyComponent, FastUI
from fastui import components as c
from fastui.events import PageEvent
from langchain_core.chat_history import BaseChatMessageHistory
from .components import ChatInputForm, ChatMessage
from .db import get_history, get_session
from .session import ChatSession
import asyncio | 1,428 |
router = APIRouter()
@router.get("/", response_model=FastUI, response_model_exclude_none=True)
async def chat_ui() -> list[AnyComponent]:
"""
Main endpoint for showing the Chat UI and handling user input.
"""
return [
c.Page(
components=[
c.ServerLoad(
path="/chat/history",
load_trigger=PageEvent(name="chat-load"),
components=[],
),
ChatInputForm(
submit_url="/api/chat/generate",
fire_page_event="chat-load",
),
],
)
]
@router.get("/chat/history", response_model=FastUI, response_model_exclude_none=True)
async def chat_history(
history: Annotated[BaseChatMessageHistory, Depends(get_history)],
) -> list[AnyComponent]:
"""
Endpoint for showing the Chat History UI.
"""
return [ChatMessage(msg.type, msg.content) for msg in history.messages]
@router.post("/chat/generate", response_model=FastUI, response_model_exclude_none=True)
async def chat_generate(user_msg: Annotated[str, Form(...)]) -> list[AnyComponent]:
"""
Endpoint for showing the Chat Generate UI.
"""
return [
ChatMessage("human", user_msg),
c.ServerLoad(
path="/chat/generate/response?user_msg=" + user_msg,
load_trigger=PageEvent(name="generate-response"),
components=[c.Text(text="...")],
sse=True,
),
ChatInputForm(
submit_url="/api/chat/generate",
fire_page_event="generate-response",
),
]
@router.get("/chat/generate/response")
async def sse_ai_response(
user_msg: str,
|
router = APIRouter()
@router.get("/", response_model=FastUI, response_model_exclude_none=True)
async def chat_ui() -> list[AnyComponent]:
"""
Main endpoint for showing the Chat UI and handling user input.
"""
return [
c.Page(
components=[
c.ServerLoad(
path="/chat/history",
load_trigger=PageEvent(name="chat-load"),
components=[],
),
ChatInputForm(
submit_url="/api/chat/generate",
fire_page_event="chat-load",
),
],
)
]
@router.get("/chat/history", response_model=FastUI, response_model_exclude_none=True)
async def chat_history(
history: Annotated[BaseChatMessageHistory, Depends(get_history)],
) -> list[AnyComponent]:
"""
Endpoint for showing the Chat History UI.
"""
return [ChatMessage(msg.type, msg.content) for msg in history.messages]
@router.post("/chat/generate", response_model=FastUI, response_model_exclude_none=True)
async def chat_generate(user_msg: Annotated[str, Form(...)]) -> list[AnyComponent]:
"""
Endpoint for showing the Chat Generate UI.
"""
return [
ChatMessage("human", user_msg),
c.ServerLoad(
path="/chat/generate/response?user_msg=" + user_msg,
load_trigger=PageEvent(name="generate-response"),
components=[c.Text(text="...")],
sse=True,
),
ChatInputForm(
submit_url="/api/chat/generate",
fire_page_event="generate-response",
),
]
@router.get("/chat/generate/response")
async def sse_ai_response(
user_msg: str, | session: Annotated[ChatSession, Depends(get_session)], | 3 | 2023-12-17 15:07:48+00:00 | 2k |
SHI-Labs/VCoder | vcoder_llava/model/vcoder_ds_llava_arch.py | [
{
"identifier": "build_vision_tower",
"path": "vcoder_llava/model/multimodal_encoder/builder.py",
"snippet": "def build_vision_tower(vision_tower_cfg, **kwargs):\n vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))\n is_absolute_path_exists... | from abc import ABC, abstractmethod
from .multimodal_encoder.builder import build_vision_tower
from .multimodal_projector.builder import build_vision_projector
from .multimodal_adapter.builder import build_seg_projector
from .multimodal_depth_adapter.builder import build_depth_projector
from vcoder_llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, SEG_TOKEN_INDEX, DEPTH_TOKEN_INDEX
import torch
import torch.nn as nn | 1,210 | # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class VCoderDSLlavaMetaModel:
def __init__(self, config):
super(VCoderDSLlavaMetaModel, self).__init__(config)
self.config = config
if hasattr(config, "mm_vision_tower"):
self.vision_tower = build_vision_tower(config, delay_load=True)
| # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class VCoderDSLlavaMetaModel:
def __init__(self, config):
super(VCoderDSLlavaMetaModel, self).__init__(config)
self.config = config
if hasattr(config, "mm_vision_tower"):
self.vision_tower = build_vision_tower(config, delay_load=True) | self.mm_projector = build_vision_projector(config) | 1 | 2023-12-17 07:46:27+00:00 | 2k |
galatolofederico/microchain | microchain/engine/engine.py | [
{
"identifier": "Function",
"path": "microchain/engine/function.py",
"snippet": "class Function:\n def __init__(self):\n self.call_signature = inspect.signature(self.__call__) \n self.call_parameters = []\n for name, parameter in self.call_signature.parameters.items():\n ... | import ast
from microchain.engine.function import Function, FunctionResult | 801 |
class Engine:
def __init__(self, state=dict()):
self.state = state
self.functions = dict()
self.help_called = False
self.agent = None
def register(self, function):
self.functions[function.name] = function
function.bind(state=self.state, engine=self)
def bind(self, agent):
self.agent = agent
def stop(self):
if self.agent is None:
raise ValueError("You must bind the engine to an agent before stopping")
self.agent.stop()
def execute(self, command):
if self.agent is None:
raise ValueError("You must bind the engine to an agent before executing commands")
if not self.help_called:
raise ValueError("You never accessed the help property. Building a prompt without including the help string is a very bad idea.")
try:
tree = ast.parse(command)
except SyntaxError:
|
class Engine:
def __init__(self, state=dict()):
self.state = state
self.functions = dict()
self.help_called = False
self.agent = None
def register(self, function):
self.functions[function.name] = function
function.bind(state=self.state, engine=self)
def bind(self, agent):
self.agent = agent
def stop(self):
if self.agent is None:
raise ValueError("You must bind the engine to an agent before stopping")
self.agent.stop()
def execute(self, command):
if self.agent is None:
raise ValueError("You must bind the engine to an agent before executing commands")
if not self.help_called:
raise ValueError("You never accessed the help property. Building a prompt without including the help string is a very bad idea.")
try:
tree = ast.parse(command)
except SyntaxError: | return FunctionResult.ERROR, f"Error: syntax error in command {command}. Please try again." | 1 | 2023-12-19 10:57:56+00:00 | 2k |
OSU-NLP-Group/SeeAct | src/data_utils/format_prompt_utils.py | [
{
"identifier": "get_tree_repr",
"path": "src/data_utils/dom_utils.py",
"snippet": "def get_tree_repr(\n tree, max_value_length=5, max_length=20, id_mapping={}, keep_html_brackets=False\n):\n if isinstance(tree, str):\n tree = etree.fromstring(tree)\n else:\n tree = copy.deepc... | import string
import lxml
from .dom_utils import get_tree_repr, data_prune_tree | 1,404 | # -*- coding: utf-8 -*-
# Copyright (c) 2024 OSU Natural Language Processing Group
#
# Licensed under the OpenRAIL-S License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.licenses.ai/ai-pubs-open-rails-vz1
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def data_format_input_multichoice(
sample, candidate_ids, gt=-1, previous_k=5, keep_html_brackets=False
):
# Parse html into a dom tree
dom_tree = lxml.etree.fromstring(sample["cleaned_html"])
dom_tree, node_to_keep = data_prune_tree(dom_tree, candidate_ids)
| # -*- coding: utf-8 -*-
# Copyright (c) 2024 OSU Natural Language Processing Group
#
# Licensed under the OpenRAIL-S License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.licenses.ai/ai-pubs-open-rails-vz1
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def data_format_input_multichoice(
sample, candidate_ids, gt=-1, previous_k=5, keep_html_brackets=False
):
# Parse html into a dom tree
dom_tree = lxml.etree.fromstring(sample["cleaned_html"])
dom_tree, node_to_keep = data_prune_tree(dom_tree, candidate_ids) | tree_repr, id_mapping = get_tree_repr( | 0 | 2023-12-21 18:22:11+00:00 | 2k |
DeepWok/mase | machop/chop/passes/graph/analysis/add_metadata/add_software_metadata.py | [
{
"identifier": "get_mase_op",
"path": "machop/chop/passes/graph/utils.py",
"snippet": "def get_mase_op(node):\n return node.meta[\"mase\"].parameters[\"common\"][\"mase_op\"]"
},
{
"identifier": "get_mase_type",
"path": "machop/chop/passes/graph/utils.py",
"snippet": "def get_mase_ty... | import logging
from ...utils import get_mase_op, get_mase_type
from .software_metadata_layers import SOFTWARE_PARAM_ANALYSIS_LAYERS | 1,107 |
logger = logging.getLogger(__name__)
def add_software_metadata_analysis_pass(graph, pass_args=None):
"""add software metadata
:param graph: a MaseGraph
:type graph: MaseGraph
:param pass_args: this pass does not need any arguments, defaults to None
:type pass_args: _type_, optional
:return: return a tuple of a MaseGraph and an empty dict (no additional info to return)
:rtype: tuple(MaseGraph, Dict)
"""
for node in graph.fx_graph.nodes:
|
logger = logging.getLogger(__name__)
def add_software_metadata_analysis_pass(graph, pass_args=None):
"""add software metadata
:param graph: a MaseGraph
:type graph: MaseGraph
:param pass_args: this pass does not need any arguments, defaults to None
:type pass_args: _type_, optional
:return: return a tuple of a MaseGraph and an empty dict (no additional info to return)
:rtype: tuple(MaseGraph, Dict)
"""
for node in graph.fx_graph.nodes: | mase_op = get_mase_op(node) | 0 | 2023-12-18 12:50:53+00:00 | 2k |
PratikSingh121/ResearchPlot | main.py | [
{
"identifier": "GetPromptTemplates",
"path": "app/prompt_templates.py",
"snippet": "class GetPromptTemplates:\n def __init__(self, topic):\n self.topic = topic\n self.question_parser = CommaSeparatedListOutputParser()\n \n def ResearchPromptTemplate(self, questions = ''):\n if questions != ... | from langchain.output_parsers import CommaSeparatedListOutputParser
from app.prompt_templates import GetPromptTemplates
from app.question_framing import QuestionFraming
from packages.chains import Chains
import subprocess
import os | 818 | #app
#package
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# Getting Topic
print('\033[93m' + "Enter the topic. You can add just a keyword or a description.\nTopic : > " + '\033[0m', end="")
topic = input()
print()
#Objects
Chain = Chains()
PromptTemplate = GetPromptTemplates(topic)
QuestionParser = CommaSeparatedListOutputParser()
# Getting Questions
print('\033[92m' + "Do you want to answer some questions? (y/n) \nAnswer : > " + '\033[0m', end="")
questions_allowed = input()
print()
if questions_allowed == 'y':
questions_allowed = True
else:
questions_allowed = False
if questions_allowed:
QuestionsList = Chain.chain(PromptTemplate = PromptTemplate.QuestionPromptTemplate(), parser = QuestionParser)
| #app
#package
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# Getting Topic
print('\033[93m' + "Enter the topic. You can add just a keyword or a description.\nTopic : > " + '\033[0m', end="")
topic = input()
print()
#Objects
Chain = Chains()
PromptTemplate = GetPromptTemplates(topic)
QuestionParser = CommaSeparatedListOutputParser()
# Getting Questions
print('\033[92m' + "Do you want to answer some questions? (y/n) \nAnswer : > " + '\033[0m', end="")
questions_allowed = input()
print()
if questions_allowed == 'y':
questions_allowed = True
else:
questions_allowed = False
if questions_allowed:
QuestionsList = Chain.chain(PromptTemplate = PromptTemplate.QuestionPromptTemplate(), parser = QuestionParser) | questionframing = QuestionFraming(QuestionsList) | 1 | 2023-12-17 10:23:00+00:00 | 2k |
yeyt97/AirDropPlus | AirDropPlus.py | [
{
"identifier": "Config",
"path": "config.py",
"snippet": "class Config:\n def __init__(self, config_path):\n self.config = configparser.ConfigParser()\n self.config.read(config_path, encoding='utf-8')\n\n self.config_path = config_path\n self.key = self.config.get('config... | import os
import sys
import utils
from config import Config
from notifier import create_notifier
from server import Server | 1,571 |
if __name__ == '__main__':
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
config_file_path = os.path.join(SCRIPT_DIR, 'config', 'config.ini')
|
if __name__ == '__main__':
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
config_file_path = os.path.join(SCRIPT_DIR, 'config', 'config.ini') | config = Config(config_file_path) | 0 | 2023-12-19 08:16:21+00:00 | 2k |
byeongjun-park/HarmonyView | ldm/thirdp/psp/model_irse.py | [
{
"identifier": "get_blocks",
"path": "ldm/thirdp/psp/helpers.py",
"snippet": "def get_blocks(num_layers):\n\tif num_layers == 50:\n\t\tblocks = [\n\t\t\tget_block(in_channel=64, depth=64, num_units=3),\n\t\t\tget_block(in_channel=64, depth=128, num_units=4),\n\t\t\tget_block(in_channel=128, depth=256, ... | from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module
from ldm.thirdp.psp.helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm | 1,154 | # https://github.com/eladrich/pixel2style2pixel
"""
Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Backbone(Module):
def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True):
super(Backbone, self).__init__()
assert input_size in [112, 224], "input_size should be 112 or 224"
assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
| # https://github.com/eladrich/pixel2style2pixel
"""
Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Backbone(Module):
def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True):
super(Backbone, self).__init__()
assert input_size in [112, 224], "input_size should be 112 or 224"
assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se" | blocks = get_blocks(num_layers) | 0 | 2023-12-21 04:44:00+00:00 | 2k |
srlabs/black-basta-buster | extractblock.py | [
{
"identifier": "detect_magic_size",
"path": "decryptblocks.py",
"snippet": "def make_int(i):\ndef make_int_or_percent(i):\ndef xor_blocks(var, key, byteorder=sys.byteorder):\ndef write_block(fd, offset, block):\ndef main():\ndef decrypt_file(f, keyblock, fsize=None, is_dry=True, lower_limit=None, upper... | import argparse
import logging
import sys
import logging
import math
from collections import deque
from itertools import islice
from pathlib import Path
from hexdump import hexdump
from decryptblocks import detect_magic_size, make_int, make_int_or_percent, Percent
from ranges import ranges_for_file
from collections import Counter | 1,455 |
log = logging.getLogger(__name__)
def extract_block(fd, offset, size=64):
#log.debug("Reading %r at %r for %r ", fd, offset, size)
fd.seek(offset)
block = fd.read(size)
log.debug("Read %i bytes at %r for %r:\n%s", len(block), offset, size, hexdump(block, result="return"))
return block
def make_int_or_auto(s):
if s.strip() == "auto":
return "auto"
else:
return make_int(s)
### Entropy taken from https://stackoverflow.com/a/37890790/2015768
def eta(data, unit='natural'):
base = {
'shannon' : 2.,
'natural' : math.exp(1),
'hartley' : 10.
}
if len(data) <= 1:
return 0
counts = Counter()
for d in data:
counts[d] += 1
ent = 0
probs = [float(c) / len(data) for c in counts.values()]
for p in probs:
if p > 0.:
ent -= p * math.log(p, base[unit])
return ent
BLOCKSIZE = 64
NULLBLOCK = b'\x00' * BLOCKSIZE
def auto_detect_key_block(f, fsize=None, lower_limit=None, upper_limit=None):
if fsize is None:
fsize = detect_magic_size(f)
block = None
if lower_limit is None:
# we skip the first few block, unless explicitly requested
lower_limit = next(islice(ranges_for_file(f, fsize), 5, 6))[0]
if upper_limit is None:
upper_limit = fsize
CONFIDENCE = 5
with open(f, "rb") as fd:
confidence_blocks = deque(maxlen=CONFIDENCE)
for n, (offset, length) in enumerate(filter(lambda offset_len: lower_limit <= offset_len[0] < upper_limit, ranges_for_file(f, fsize))):
t = True
for i in (-2, -1, 1, 2):
b = extract_block(fd, offset-i*BLOCKSIZE)
t &= b == NULLBLOCK
log.debug("T is now: %s", t)
#if not t:
# raise
if t:
log.debug("Confidence: %s", confidence_blocks)
b = extract_block(fd, offset)
if b == NULLBLOCK:
log.debug("B is null")
else:
log.debug("Adding confidence at %d %r", offset, b)
confidence_blocks.append((offset, b))
if len(confidence_blocks) == CONFIDENCE:
if all((b == x[1] for x in confidence_blocks)):
log.info ("Found blocks: %r", confidence_blocks)
block = b # Urhgs. This is spaghetti control flow. Sorry.
break
else:
log.info("Not all blocks are equal to %r: %s", b, confidence_blocks)
raise
else:
log.info("only %d blocks: %s", len(confidence_blocks), confidence_blocks)
else:
print ("non found")
raise
return block
def main():
argparser = argparse.ArgumentParser(description="Extracts a 64 byte long chunk out of a file. This can be useful for taking that block as an encryption key.")
argparser.add_argument("--hexdump", action="store_true")
argparser.add_argument("--dry", action="store_true",
help="Do not write anything")
argparser.add_argument("--size", type=int, default=0x40, help="Chunk size")
argparser.add_argument("--start-at", type=make_int_or_percent, default=None, help="Start the automatic determination from here, only")
argparser.add_argument("--output", type=Path, help="Write the chunk to a file rather than stdout")
argparser.add_argument("file", type=Path, help="The file to cut a chunk out of")
argparser.add_argument("offset", type=make_int_or_auto, help="Position to cut the chunk out of the file, or 'auto' to detect encrypted zero bytes")
args = argparser.parse_args()
offset = args.offset
f = args.file
size = args.size
start_at = args.start_at
logging.basicConfig(level=logging.INFO)
fsize = detect_magic_size(f)
| #!/usr/bin/env python3
# Copyright 2023 Tobias Mueller <tobias@srlabs.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
try:
except ModuleNotFoundError:
log = logging.getLogger(__name__)
log.warning("hexdump module not found. Try pip install hexdump")
def hexdump(*args, **kwargs):
log.error("Cannot find the hexdump module. Try pip install hexdump")
log = logging.getLogger(__name__)
def extract_block(fd, offset, size=64):
#log.debug("Reading %r at %r for %r ", fd, offset, size)
fd.seek(offset)
block = fd.read(size)
log.debug("Read %i bytes at %r for %r:\n%s", len(block), offset, size, hexdump(block, result="return"))
return block
def make_int_or_auto(s):
if s.strip() == "auto":
return "auto"
else:
return make_int(s)
### Entropy taken from https://stackoverflow.com/a/37890790/2015768
def eta(data, unit='natural'):
base = {
'shannon' : 2.,
'natural' : math.exp(1),
'hartley' : 10.
}
if len(data) <= 1:
return 0
counts = Counter()
for d in data:
counts[d] += 1
ent = 0
probs = [float(c) / len(data) for c in counts.values()]
for p in probs:
if p > 0.:
ent -= p * math.log(p, base[unit])
return ent
BLOCKSIZE = 64
NULLBLOCK = b'\x00' * BLOCKSIZE
def auto_detect_key_block(f, fsize=None, lower_limit=None, upper_limit=None):
if fsize is None:
fsize = detect_magic_size(f)
block = None
if lower_limit is None:
# we skip the first few block, unless explicitly requested
lower_limit = next(islice(ranges_for_file(f, fsize), 5, 6))[0]
if upper_limit is None:
upper_limit = fsize
CONFIDENCE = 5
with open(f, "rb") as fd:
confidence_blocks = deque(maxlen=CONFIDENCE)
for n, (offset, length) in enumerate(filter(lambda offset_len: lower_limit <= offset_len[0] < upper_limit, ranges_for_file(f, fsize))):
t = True
for i in (-2, -1, 1, 2):
b = extract_block(fd, offset-i*BLOCKSIZE)
t &= b == NULLBLOCK
log.debug("T is now: %s", t)
#if not t:
# raise
if t:
log.debug("Confidence: %s", confidence_blocks)
b = extract_block(fd, offset)
if b == NULLBLOCK:
log.debug("B is null")
else:
log.debug("Adding confidence at %d %r", offset, b)
confidence_blocks.append((offset, b))
if len(confidence_blocks) == CONFIDENCE:
if all((b == x[1] for x in confidence_blocks)):
log.info ("Found blocks: %r", confidence_blocks)
block = b # Urhgs. This is spaghetti control flow. Sorry.
break
else:
log.info("Not all blocks are equal to %r: %s", b, confidence_blocks)
raise
else:
log.info("only %d blocks: %s", len(confidence_blocks), confidence_blocks)
else:
print ("non found")
raise
return block
def main():
argparser = argparse.ArgumentParser(description="Extracts a 64 byte long chunk out of a file. This can be useful for taking that block as an encryption key.")
argparser.add_argument("--hexdump", action="store_true")
argparser.add_argument("--dry", action="store_true",
help="Do not write anything")
argparser.add_argument("--size", type=int, default=0x40, help="Chunk size")
argparser.add_argument("--start-at", type=make_int_or_percent, default=None, help="Start the automatic determination from here, only")
argparser.add_argument("--output", type=Path, help="Write the chunk to a file rather than stdout")
argparser.add_argument("file", type=Path, help="The file to cut a chunk out of")
argparser.add_argument("offset", type=make_int_or_auto, help="Position to cut the chunk out of the file, or 'auto' to detect encrypted zero bytes")
args = argparser.parse_args()
offset = args.offset
f = args.file
size = args.size
start_at = args.start_at
logging.basicConfig(level=logging.INFO)
fsize = detect_magic_size(f) | if isinstance(start_at, Percent): | 0 | 2023-12-20 20:04:51+00:00 | 2k |
EntySec/SeaShell | seashell/core/console.py | [
{
"identifier": "Banner",
"path": "seashell/utils/ui/banner.py",
"snippet": "class Banner(object):\n \"\"\" Subclass of seashell.core module.\n\n This subclass of seashell.core module is intended for\n providing tools for printing banners in UI.\n \"\"\"\n\n def __init__(self) -> None:\n ... | import os
import cmd
import sys
from badges import Badges, Tables
from colorscript import ColorScript
from hatsploit.lib.commands import Commands
from hatsploit.lib.runtime import Runtime
from seashell.utils.ui.banner import Banner
from seashell.utils.ui.tip import Tip
from seashell.lib.config import Config | 1,297 | """
MIT License
Copyright (c) 2020-2024 EntySec
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
class Console(cmd.Cmd):
""" Subclass of seashell.core module.
This subclass of seashell.core modules is intended for providing
main SeaShell Framework console interface.
"""
def __init__(self) -> None:
super().__init__()
cmd.Cmd.__init__(self)
self.badges = Badges()
self.tables = Tables()
self.banner = Banner()
| """
MIT License
Copyright (c) 2020-2024 EntySec
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
class Console(cmd.Cmd):
""" Subclass of seashell.core module.
This subclass of seashell.core modules is intended for providing
main SeaShell Framework console interface.
"""
def __init__(self) -> None:
super().__init__()
cmd.Cmd.__init__(self)
self.badges = Badges()
self.tables = Tables()
self.banner = Banner() | self.tip = Tip() | 1 | 2023-12-17 04:14:16+00:00 | 2k |
FlagOpen/TACO | train.py | [
{
"identifier": "Trainer",
"path": "train_utils.py",
"snippet": "class Trainer(transformers.Trainer):\n \"\"\"Use CosineAnnealingLR from pytorch \n \"\"\"\n \n def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):\n \"\"\"\n Setup the sch... | from typing import Optional, Dict
from dataclasses import dataclass, field
from train_utils import Trainer
from datamodule import DEFAULT_PAD_TOKEN, DEFAULT_EOS_TOKEN, DEFAULT_BOS_TOKEN, TacoDataset, DataCollatorForTacoDataset
import transformers | 1,568 | """
Finetune models on TACO-Dataset train split
"""
@dataclass
class ModelArguments:
model_name_or_path: Optional[str] = field(default="bigcode/tiny_starcoder_py")
@dataclass
class DataArguments:
data_path: str = field(default=None, metadata={"help": "Path to the training data."})
@dataclass
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
adam_beta1: float = field(default=0.9)
adam_beta2: float = field(default=0.95)
use_cosine_anneal_with_warmup: bool = field(default=True)
model_max_length: int = field(
default=2048,
metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
)
resume_from_checkpoint: bool = field(
default=False,
metadata={"help": "load the last checkpoint in args.output_dir as saved by a previous instance of Trainer."}
)
def smart_tokenizer_and_embedding_resize(
special_tokens_dict: Dict,
tokenizer: transformers.PreTrainedTokenizer,
model: transformers.PreTrainedModel,
):
"""Resize tokenizer and embedding.
Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
"""
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
if num_new_tokens > 0:
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
input_embeddings[-num_new_tokens:] = input_embeddings_avg
output_embeddings[-num_new_tokens:] = output_embeddings_avg
def make_taco_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args) -> Dict:
"""Make dataset and collator for fine-tune"""
| """
Finetune models on TACO-Dataset train split
"""
@dataclass
class ModelArguments:
model_name_or_path: Optional[str] = field(default="bigcode/tiny_starcoder_py")
@dataclass
class DataArguments:
data_path: str = field(default=None, metadata={"help": "Path to the training data."})
@dataclass
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
adam_beta1: float = field(default=0.9)
adam_beta2: float = field(default=0.95)
use_cosine_anneal_with_warmup: bool = field(default=True)
model_max_length: int = field(
default=2048,
metadata={"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."},
)
resume_from_checkpoint: bool = field(
default=False,
metadata={"help": "load the last checkpoint in args.output_dir as saved by a previous instance of Trainer."}
)
def smart_tokenizer_and_embedding_resize(
special_tokens_dict: Dict,
tokenizer: transformers.PreTrainedTokenizer,
model: transformers.PreTrainedModel,
):
"""Resize tokenizer and embedding.
Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
"""
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
if num_new_tokens > 0:
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)
input_embeddings[-num_new_tokens:] = input_embeddings_avg
output_embeddings[-num_new_tokens:] = output_embeddings_avg
def make_taco_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args) -> Dict:
"""Make dataset and collator for fine-tune""" | train_dataset = TacoDataset(data_path=data_args.data_path) | 4 | 2023-12-20 03:12:01+00:00 | 2k |
Subsets and Splits
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have consistent code formatting levels across multiple scales (2k, 4k, 8k, 12k) and reveals the structured formatting patterns within these repositories.
SQL Console for tianyang/repobench_python_v1.1
Compares cross-file and in-file code structure patterns across different complexity levels, revealing how file organization strategies vary with code size and potentially informing better code architecture decisions.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have complete performance data across all seven code complexity levels, revealing consistent benchmarking patterns across different code sizes.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that contain all 7 distinct quality levels (2k through 32k), revealing complete datasets that might be useful for comprehensive analysis.