repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
Harvard-Ophthalmology-AI-Lab/FairSeg
SAMed/segment_anything/modeling/image_encoder.py
[ { "identifier": "LayerNorm2d", "path": "SAMed/segment_anything/modeling/common.py", "snippet": "class LayerNorm2d(nn.Module):\n def __init__(self, num_channels: int, eps: float = 1e-6) -> None:\n super().__init__()\n self.weight = nn.Parameter(torch.ones(num_channels))\n self.bia...
import torch import torch.nn as nn import torch.nn.functional as F from icecream import ic from typing import Optional, Tuple, Type from .common import LayerNorm2d, MLPBlock
1,147
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa class ImageEncoderViT(nn.Module): def __init__( self, img_size: int = 1024, patch_size: int = 16, in_chans: int = 3, embed_dim: int = 768, depth: int = 12, num_heads: int = 12, mlp_ratio: float = 4.0, out_chans: int = 256, qkv_bias: bool = True, norm_layer: Type[nn.Module] = nn.LayerNorm, act_layer: Type[nn.Module] = nn.GELU, use_abs_pos: bool = True, use_rel_pos: bool = False, rel_pos_zero_init: bool = True, window_size: int = 0, global_attn_indexes: Tuple[int, ...] = (), ) -> None: """ Args: img_size (int): Input image size. patch_size (int): Patch size. in_chans (int): Number of input image channels. embed_dim (int): Patch embedding dimension. depth (int): Depth of ViT. num_heads (int): Number of attention heads in each ViT block. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool): If True, add a learnable bias to query, key, value. norm_layer (nn.Module): Normalization layer. act_layer (nn.Module): Activation layer. use_abs_pos (bool): If True, use absolute positional embeddings. use_rel_pos (bool): If True, add relative positional embeddings to the attention map. rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. window_size (int): Window size for window attention blocks. global_attn_indexes (list): Indexes for blocks using global attention. """ super().__init__() self.img_size = img_size self.patch_embed = PatchEmbed( kernel_size=(patch_size, patch_size), stride=(patch_size, patch_size), in_chans=in_chans, embed_dim=embed_dim, ) self.pos_embed: Optional[nn.Parameter] = None if use_abs_pos: # Initialize absolute positional embedding with pretrain image size. self.pos_embed = nn.Parameter( torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim) ) self.blocks = nn.ModuleList() for i in range(depth): block = Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, norm_layer=norm_layer, act_layer=act_layer, use_rel_pos=use_rel_pos, rel_pos_zero_init=rel_pos_zero_init, window_size=window_size if i not in global_attn_indexes else 0, input_size=(img_size // patch_size, img_size // patch_size), ) self.blocks.append(block) self.neck = nn.Sequential( nn.Conv2d( embed_dim, out_chans, kernel_size=1, bias=False, ),
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa class ImageEncoderViT(nn.Module): def __init__( self, img_size: int = 1024, patch_size: int = 16, in_chans: int = 3, embed_dim: int = 768, depth: int = 12, num_heads: int = 12, mlp_ratio: float = 4.0, out_chans: int = 256, qkv_bias: bool = True, norm_layer: Type[nn.Module] = nn.LayerNorm, act_layer: Type[nn.Module] = nn.GELU, use_abs_pos: bool = True, use_rel_pos: bool = False, rel_pos_zero_init: bool = True, window_size: int = 0, global_attn_indexes: Tuple[int, ...] = (), ) -> None: """ Args: img_size (int): Input image size. patch_size (int): Patch size. in_chans (int): Number of input image channels. embed_dim (int): Patch embedding dimension. depth (int): Depth of ViT. num_heads (int): Number of attention heads in each ViT block. mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. qkv_bias (bool): If True, add a learnable bias to query, key, value. norm_layer (nn.Module): Normalization layer. act_layer (nn.Module): Activation layer. use_abs_pos (bool): If True, use absolute positional embeddings. use_rel_pos (bool): If True, add relative positional embeddings to the attention map. rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. window_size (int): Window size for window attention blocks. global_attn_indexes (list): Indexes for blocks using global attention. """ super().__init__() self.img_size = img_size self.patch_embed = PatchEmbed( kernel_size=(patch_size, patch_size), stride=(patch_size, patch_size), in_chans=in_chans, embed_dim=embed_dim, ) self.pos_embed: Optional[nn.Parameter] = None if use_abs_pos: # Initialize absolute positional embedding with pretrain image size. self.pos_embed = nn.Parameter( torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim) ) self.blocks = nn.ModuleList() for i in range(depth): block = Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, norm_layer=norm_layer, act_layer=act_layer, use_rel_pos=use_rel_pos, rel_pos_zero_init=rel_pos_zero_init, window_size=window_size if i not in global_attn_indexes else 0, input_size=(img_size // patch_size, img_size // patch_size), ) self.blocks.append(block) self.neck = nn.Sequential( nn.Conv2d( embed_dim, out_chans, kernel_size=1, bias=False, ),
LayerNorm2d(out_chans),
0
2023-11-03 17:05:40+00:00
2k
anand2312/quill-server
quill_server/realtime/events.py
[ { "identifier": "User", "path": "quill_server/db/models.py", "snippet": "class User(Base):\n __tablename__ = \"user\"\n\n id: Mapped[UUID] = mapped_column(pg_UUID(as_uuid=True), primary_key=True, default=uuid4) # noqa: A003\n username: Mapped[str] = mapped_column(unique=True)\n password: Ma...
from enum import StrEnum, auto from functools import partial from typing import Any, Generic, TypeVar from collections.abc import Awaitable from loguru import logger from pydantic import BaseModel from redis.asyncio import Redis from quill_server.db.models import User from quill_server.realtime.room import GameMember, Room, ChatMessage, _db_user_to_game_member from quill_server.schema import MessageResponse import typing
1,548
DataT = TypeVar("DataT", bound=BaseModel) # the excalidraw element event contains many fields # https://github.com/excalidraw/excalidraw/blob/master/src/element/types.ts#L27-L141 ExcalidrawElement = dict[str, Any] class Drawing(BaseModel):
DataT = TypeVar("DataT", bound=BaseModel) # the excalidraw element event contains many fields # https://github.com/excalidraw/excalidraw/blob/master/src/element/types.ts#L27-L141 ExcalidrawElement = dict[str, Any] class Drawing(BaseModel):
user: GameMember
1
2023-11-03 12:43:18+00:00
2k
OPTML-Group/DeepZero
algorithm/prune/main.py
[ { "identifier": "zoo_grasp_importance_score", "path": "algorithm/prune/importance_scores.py", "snippet": "def zoo_grasp_importance_score(\n model,\n dataloader,\n samples_per_class,\n class_num,\n zoo_rs_size,\n zoo_step_size,\n loss_func = torch.nn.CrossEntropyLoss()\n ):\n\n ...
import torch from torch.nn.utils import prune from copy import deepcopy from .importance_scores import zoo_grasp_importance_score, grasp_importance_score, random_importance_score
963
__all__ = ['global_prune', 'check_sparsity', 'check_grad_sparsity', 'custom_prune', 'extract_mask', 'remove_prune', 'layer_sparsity'] def global_prune(model, ratio, method, class_num=None, dataloader=None, sample_per_classes=25, zoo_sample_size=None, zoo_step_size=None, layer_wise_sparsity=None): if method == 'grasp': score_dict = grasp_importance_score(model, dataloader, sample_per_classes, class_num) prune.global_unstructured( parameters=score_dict.keys(), pruning_method=prune.L1Unstructured, amount=ratio, importance_scores=score_dict, ) elif method == 'zo_grasp':
__all__ = ['global_prune', 'check_sparsity', 'check_grad_sparsity', 'custom_prune', 'extract_mask', 'remove_prune', 'layer_sparsity'] def global_prune(model, ratio, method, class_num=None, dataloader=None, sample_per_classes=25, zoo_sample_size=None, zoo_step_size=None, layer_wise_sparsity=None): if method == 'grasp': score_dict = grasp_importance_score(model, dataloader, sample_per_classes, class_num) prune.global_unstructured( parameters=score_dict.keys(), pruning_method=prune.L1Unstructured, amount=ratio, importance_scores=score_dict, ) elif method == 'zo_grasp':
score_dict = zoo_grasp_importance_score(model, dataloader, sample_per_classes, class_num, zoo_sample_size, zoo_step_size)
0
2023-11-01 14:47:38+00:00
2k
S3raphimCS/Hackathon_telehack
backend/SPO_KROT/metrics/admin.py
[ { "identifier": "ExcelFile", "path": "backend/SPO_KROT/metrics/models.py", "snippet": "class ExcelFile(models.Model):\n file = models.FileField(\n upload_to='metrics',\n unique=True,\n blank=True, null=True,\n validators=[FileExtensionValidator(['xlsx', 'xls', 'xlsm'])],\n...
from django.contrib import admin from .models import ExcelFile, Measurements, Operator, Report
1,372
@admin.register(Operator) class OperatorAdmin(admin.ModelAdmin): list_display = ('name',) list_per_page = 15 search_fields = ("name",) readonly_fields = ('id',)
@admin.register(Operator) class OperatorAdmin(admin.ModelAdmin): list_display = ('name',) list_per_page = 15 search_fields = ("name",) readonly_fields = ('id',)
@admin.register(Report)
3
2023-11-09 12:55:04+00:00
2k
lz1oceani/LLM-As-Hierarchical-Policy
hlm/utils/metric_utils.py
[ { "identifier": "normalize_answer", "path": "hlm/utils/math_answer_utils.py", "snippet": "def normalize_answer(text, answer_type=\"text\"):\n ret = normalize_answer_core(text, answer_type)\n try:\n str(ret)\n except:\n ret = None\n return \"No answer!\" if ret is None else ret"...
import os, warnings import numpy as np, re, time, signal, sympy, scipy from sympy.utilities.exceptions import SymPyDeprecationWarning from collections import defaultdict from numbers import Number from IPython import embed from copy import deepcopy from itertools import chain from sympy.parsing.latex import parse_latex from sympy.core.expr import Expr from sympy import Interval, conjugate, Abs from .math_answer_utils import normalize_answer, is_set, is_sympy, is_constant, to_set, is_relation from math import * from .misc import timeout_call
1,417
os.environ["USE_SYMENGINE"] = "1" warnings.simplefilter("ignore", SyntaxWarning) warnings.simplefilter("ignore", RuntimeWarning) warnings.filterwarnings("ignore", category=SymPyDeprecationWarning) # from sympy import Symbol, Eq, simplify, solve NO_ANSWER = "No answer!" SKIP_ANSWER_TEMPLATE = [ "Code cannot be executed!", "Code contains infinite loop!", "no possible values", NO_ANSWER, ] SKIP_ANSWER_TEMPLATE = SKIP_ANSWER_TEMPLATE + [_.lower() for _ in SKIP_ANSWER_TEMPLATE] ZERO_ANSWER_TEMPLATE = [ "doesn't have any money left", "used up all of", ] def check_basics(source, target): if not (isinstance(source, (Expr, Number)) and isinstance(target, (Expr, Number))): return True source_symbols = source.free_symbols if isinstance(source, Expr) else {} target_symbols = target.free_symbols if isinstance(target, Expr) else {} if source_symbols != target_symbols: return False try: if len(source_symbols) > 0: values = {_: np.random.rand() for _ in source_symbols} source = source.subs(values) target = target.subs(values) else: source = source.evalf() target = target.evalf() if not isinstance(source, Number) or not isinstance(target, Number): source = abs(source).simplify() if not isinstance(source, Number) else source target = abs(target).simplify() if not isinstance(target, Number) else target return bool(np.abs(source - target) < 1e-6) except: pass return True def run_sympy_compare(source, target): def has_fn(x): for name in ["equals", "compare", "intersect"]: if hasattr(x, name): return True return False # print(is_constant(source), is_constant(target)) # return False if is_constant(source) and is_constant(target): source = source if isinstance(source, Number) else source.evalf() target = target if isinstance(target, Number) else target.evalf() try: return bool(np.abs(source - target) < 1e-6) except: return False if is_set(source) or is_set(target): source = to_set(source) target = to_set(target) if not has_fn(source): source, target = target, source assert has_fn(source), [source, target, type(source), type(target)] try: if hasattr(source, "equals"): # Work for expressions and points if is_relation(source) != is_relation(target): return False if not is_relation(source) and not check_basics(source, target): return False ret = source.equals(target) ret = False if ret is None else bool(ret) elif hasattr(source, "intersect"): sign1 = source.intersect(target.complement(sympy.S.Reals)).simplify() sign1 = sign1.is_empty or (np.abs(sign1.measure) < 1e-6 and sign1.is_open) sign2 = target.intersect(source.complement(sympy.S.Reals)).simplify() sign2 = sign2.is_empty or (np.abs(sign2.measure) < 1e-6 and sign2.is_open) ret = sign1 and sign2 elif hasattr(source, "compare"): ret = source.compare(target) == 0 except: ret = False return bool(ret) def compare_items(source, target, answer_type="text", need_normalize=True): if isinstance(source, (list, tuple)): return [compare_items(_, target, answer_type=answer_type, need_normalize=need_normalize) for _ in source] if source == "No answer!" or target == "No answer!" or source is None or target is None: return False if answer_type in ["text", "date", "bool"]: return source.lower() == target.lower() if isinstance(source, str) and isinstance(target, str): if "=" in source and "=" not in target: source = source.split("=")[-1] if "=" in target and "=" not in source: target = target.split("=")[-1] if need_normalize:
os.environ["USE_SYMENGINE"] = "1" warnings.simplefilter("ignore", SyntaxWarning) warnings.simplefilter("ignore", RuntimeWarning) warnings.filterwarnings("ignore", category=SymPyDeprecationWarning) # from sympy import Symbol, Eq, simplify, solve NO_ANSWER = "No answer!" SKIP_ANSWER_TEMPLATE = [ "Code cannot be executed!", "Code contains infinite loop!", "no possible values", NO_ANSWER, ] SKIP_ANSWER_TEMPLATE = SKIP_ANSWER_TEMPLATE + [_.lower() for _ in SKIP_ANSWER_TEMPLATE] ZERO_ANSWER_TEMPLATE = [ "doesn't have any money left", "used up all of", ] def check_basics(source, target): if not (isinstance(source, (Expr, Number)) and isinstance(target, (Expr, Number))): return True source_symbols = source.free_symbols if isinstance(source, Expr) else {} target_symbols = target.free_symbols if isinstance(target, Expr) else {} if source_symbols != target_symbols: return False try: if len(source_symbols) > 0: values = {_: np.random.rand() for _ in source_symbols} source = source.subs(values) target = target.subs(values) else: source = source.evalf() target = target.evalf() if not isinstance(source, Number) or not isinstance(target, Number): source = abs(source).simplify() if not isinstance(source, Number) else source target = abs(target).simplify() if not isinstance(target, Number) else target return bool(np.abs(source - target) < 1e-6) except: pass return True def run_sympy_compare(source, target): def has_fn(x): for name in ["equals", "compare", "intersect"]: if hasattr(x, name): return True return False # print(is_constant(source), is_constant(target)) # return False if is_constant(source) and is_constant(target): source = source if isinstance(source, Number) else source.evalf() target = target if isinstance(target, Number) else target.evalf() try: return bool(np.abs(source - target) < 1e-6) except: return False if is_set(source) or is_set(target): source = to_set(source) target = to_set(target) if not has_fn(source): source, target = target, source assert has_fn(source), [source, target, type(source), type(target)] try: if hasattr(source, "equals"): # Work for expressions and points if is_relation(source) != is_relation(target): return False if not is_relation(source) and not check_basics(source, target): return False ret = source.equals(target) ret = False if ret is None else bool(ret) elif hasattr(source, "intersect"): sign1 = source.intersect(target.complement(sympy.S.Reals)).simplify() sign1 = sign1.is_empty or (np.abs(sign1.measure) < 1e-6 and sign1.is_open) sign2 = target.intersect(source.complement(sympy.S.Reals)).simplify() sign2 = sign2.is_empty or (np.abs(sign2.measure) < 1e-6 and sign2.is_open) ret = sign1 and sign2 elif hasattr(source, "compare"): ret = source.compare(target) == 0 except: ret = False return bool(ret) def compare_items(source, target, answer_type="text", need_normalize=True): if isinstance(source, (list, tuple)): return [compare_items(_, target, answer_type=answer_type, need_normalize=need_normalize) for _ in source] if source == "No answer!" or target == "No answer!" or source is None or target is None: return False if answer_type in ["text", "date", "bool"]: return source.lower() == target.lower() if isinstance(source, str) and isinstance(target, str): if "=" in source and "=" not in target: source = source.split("=")[-1] if "=" in target and "=" not in source: target = target.split("=")[-1] if need_normalize:
source = normalize_answer(source, answer_type)
0
2023-11-01 17:15:42+00:00
2k
mitre/arlin
tests/test_dataset/test_collectors/test_sb3_collectors.py
[ { "identifier": "SB3DQNDataCollector", "path": "arlin/dataset/collectors/sb3_collectors.py", "snippet": "class SB3DQNDataCollector(BaseDataCollector):\n \"\"\"Data collector for a model trained with DQN in stable-baselines3.\"\"\"\n\n def __init__(self, datapoint_cls: Type[BaseDatapoint], policy: ...
import pytest from stable_baselines3 import DQN from arlin.dataset.collectors import SB3DQNDataCollector, SB3PPODataCollector from arlin.dataset.collectors.datapoints import SB3DQNDatapoint, SB3PPODatapoint
1,031
@pytest.fixture def dqn_model(env): model = DQN("MlpPolicy", env, verbose=1) model.learn(total_timesteps=int(100)) return model class TestSB3Collectors: def test_sb3_ppo_collector(self, ppo_model, env):
@pytest.fixture def dqn_model(env): model = DQN("MlpPolicy", env, verbose=1) model.learn(total_timesteps=int(100)) return model class TestSB3Collectors: def test_sb3_ppo_collector(self, ppo_model, env):
collector = SB3PPODataCollector(SB3PPODatapoint, ppo_model.policy)
1
2023-11-08 13:57:45+00:00
2k
Giftify-Bot/Giftify-Bot
utils/paginator.py
[ { "identifier": "ARROW_BACK_EMOJI", "path": "utils/constants.py", "snippet": "ARROW_BACK_EMOJI = \"<:GiftifyBack:1120372002939744308>\"" }, { "identifier": "ARROW_EMOJI", "path": "utils/constants.py", "snippet": "ARROW_EMOJI = \"<:GiftifyArrow:1117849870678638653>\"" }, { "identi...
import abc import discord from typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, TypeVar, Union from discord.ext import commands from typing import TypeAlias from typing_extensions import TypeAlias from utils.constants import ARROW_BACK_EMOJI, ARROW_EMOJI, STOP_EMOJI from utils.tree import Interaction from bot import Giftify
1,496
@property def max_page(self) -> int: """The max page count for this paginator.""" return len(self.pages) @property def min_page(self) -> int: """The min page count for this paginator.""" return 1 @property def current_page(self) -> int: """The current page the user is on.""" return self._current_page_index + 1 @property def total_pages(self) -> int: """Returns the total amount of pages.""" return len(self.pages) @abc.abstractmethod def format_page(self, entries: List[T], /) -> discord.Embed: """ Used to make the embed that the user sees. This can be a coroutine or a regular function. This must be overwritten by the subclass. Parameters ---------- entries: List[Any] A list of entries for the current page. Returns ------- discord.Embed The embed for this page. """ raise NotImplementedError("Subclass did not overwrite format_page coro.") async def embed(self) -> discord.Embed: """ A helper function to get the embed for the current page. Returns ------- discord.Embed The embed for the current page. """ return await discord.utils.maybe_coroutine( self.format_page, self.pages[self._current_page_index] ) async def interaction_check(self, interaction: Interaction, /) -> Optional[bool]: """ The base interaction check for the given view. This will always return ``True`` if the target is ``None``, otherwise it will check that the user invoking the paginator is the same user that is interacting with the paginator. Parameters ---------- interaction: discord.Interaction The interaction to check. Returns ------- Optional[bool] The result of the interaction check. If this returns ``None`` then the interaction was responded to with an error message to the user. """ if self.target is None: return True assert self.author # Ensure this is the correct invoker if self.author.id != interaction.user.id: return await interaction.response.send_message( "Hey, this isn't yours!", ephemeral=True ) # Ensure they invoke it in the correct channel. if ( self.target.channel and interaction.channel and self.target.channel.id != interaction.channel.id ): return await interaction.response.send_message( "Hey, this isn't in the right channel!", ephemeral=True ) return True def _switch_page(self, count: int, /) -> None: self._current_page_index += count if self.clamp_pages: if count < 0: # Going down if self._current_page_index < 0: self._current_page_index = self.max_page - 1 elif count > 0: # Going up if self._current_page_index > self.max_page - 1: # - 1 for indexing self._current_page_index = 0 return @discord.ui.button(emoji=ARROW_BACK_EMOJI) async def on_arrow_backward( self, interaction: Interaction, button: discord.ui.Button[BaseButtonPaginator] ) -> discord.InteractionMessage: """ The button to represent going backwards a page. Parameters ---------- interaction: discord.Interaction The interaction created from the user invoking the button. button: discord.ui.Button The button that was pressed. """ await interaction.response.defer() self._switch_page(-1) embed = await self.embed() return await interaction.edit_original_response(embed=embed)
from __future__ import annotations try: except ImportError: if TYPE_CHECKING: T = TypeVar("T") TargetType: TypeAlias = Union[Interaction, commands.Context["Giftify"]] class BaseButtonPaginator(Generic[T], discord.ui.View, abc.ABC): """The base implementation of a button paginator. This class should be inherited then the custom instance defined. Parameters ---------- entries: List[Any] The entries to paginate. per_page: int The amount of entries to show per page. clamp_pages: bool Whether to clamp the pages to the max and min page. This means that when the user reaches the max page, it will go back to the first page. Likewise, when the user reaches the first page, it will go back to the last page. target: Optional[Union[discord.Interaction, commands.Context]] The target interaction or context to use for the paginator. This is used to ensure that the user invoking the paginator is the same user that is interacting with the paginator. If this is ``None`` then the interaction check will always return True. """ def __init__( self, *, entries: List[T], per_page: int = 6, clamp_pages: bool = True, target: Optional[TargetType] = None, extras: Optional[Dict[Any, Any]] = None, ) -> None: super().__init__(timeout=180) self.entries: List[T] = entries self.per_page: int = per_page self.clamp_pages: bool = clamp_pages self.target: Optional[TargetType] = target self.extras = extras self.author: Optional[Union[discord.User, discord.Member]] = target and ( target.user if isinstance(target, discord.Interaction) else target.author ) self.bot: Optional[Giftify] = target and ( target.client if isinstance(target, discord.Interaction) else target.bot ) self._current_page_index = 0 self.pages = [ entries[i : i + per_page] for i in range(0, len(entries), per_page) ] @property def max_page(self) -> int: """The max page count for this paginator.""" return len(self.pages) @property def min_page(self) -> int: """The min page count for this paginator.""" return 1 @property def current_page(self) -> int: """The current page the user is on.""" return self._current_page_index + 1 @property def total_pages(self) -> int: """Returns the total amount of pages.""" return len(self.pages) @abc.abstractmethod def format_page(self, entries: List[T], /) -> discord.Embed: """ Used to make the embed that the user sees. This can be a coroutine or a regular function. This must be overwritten by the subclass. Parameters ---------- entries: List[Any] A list of entries for the current page. Returns ------- discord.Embed The embed for this page. """ raise NotImplementedError("Subclass did not overwrite format_page coro.") async def embed(self) -> discord.Embed: """ A helper function to get the embed for the current page. Returns ------- discord.Embed The embed for the current page. """ return await discord.utils.maybe_coroutine( self.format_page, self.pages[self._current_page_index] ) async def interaction_check(self, interaction: Interaction, /) -> Optional[bool]: """ The base interaction check for the given view. This will always return ``True`` if the target is ``None``, otherwise it will check that the user invoking the paginator is the same user that is interacting with the paginator. Parameters ---------- interaction: discord.Interaction The interaction to check. Returns ------- Optional[bool] The result of the interaction check. If this returns ``None`` then the interaction was responded to with an error message to the user. """ if self.target is None: return True assert self.author # Ensure this is the correct invoker if self.author.id != interaction.user.id: return await interaction.response.send_message( "Hey, this isn't yours!", ephemeral=True ) # Ensure they invoke it in the correct channel. if ( self.target.channel and interaction.channel and self.target.channel.id != interaction.channel.id ): return await interaction.response.send_message( "Hey, this isn't in the right channel!", ephemeral=True ) return True def _switch_page(self, count: int, /) -> None: self._current_page_index += count if self.clamp_pages: if count < 0: # Going down if self._current_page_index < 0: self._current_page_index = self.max_page - 1 elif count > 0: # Going up if self._current_page_index > self.max_page - 1: # - 1 for indexing self._current_page_index = 0 return @discord.ui.button(emoji=ARROW_BACK_EMOJI) async def on_arrow_backward( self, interaction: Interaction, button: discord.ui.Button[BaseButtonPaginator] ) -> discord.InteractionMessage: """ The button to represent going backwards a page. Parameters ---------- interaction: discord.Interaction The interaction created from the user invoking the button. button: discord.ui.Button The button that was pressed. """ await interaction.response.defer() self._switch_page(-1) embed = await self.embed() return await interaction.edit_original_response(embed=embed)
@discord.ui.button(emoji=STOP_EMOJI)
2
2023-11-09 15:00:15+00:00
2k
Zjy0401/CoCoFormer
model/rpr.py
[ { "identifier": "get_device", "path": "utilities/device.py", "snippet": "def get_device():\n\n if((not USE_CUDA) or (TORCH_CUDA_DEVICE is None)):\n return TORCH_CPU_DEVICE\n else:\n return TORCH_CUDA_DEVICE" }, { "identifier": "parse_train_args", "path": "utilities/argume...
import torch import torch.nn as nn from torch.nn import functional as F from torch.nn.parameter import Parameter from torch.nn import Module from torch.nn.modules.transformer import _get_clones from torch.nn.modules.linear import Linear from torch.nn.modules.dropout import Dropout from torch.nn.modules.normalization import LayerNorm from torch.nn.init import * from torch.nn.modules.activation import MultiheadAttention from torch.nn.functional import linear, softmax, dropout from utilities.device import get_device from utilities.argument_funcs import parse_train_args
1,158
# TransformerEncoderRPR class TransformerEncoderRPR(Module): def __init__(self, encoder_layer, num_layers, encoder_past, max_seq, c_max_seq, b_max_seq, norm=None): super(TransformerEncoderRPR, self).__init__() self.past_layers = _get_clones(encoder_past, 1) self.layers = _get_clones(encoder_layer, num_layers) self.num_layers = num_layers self.norm = norm self.max_seq = max_seq self.c_max_seq = c_max_seq self.b_max_seq = b_max_seq def forward(self, src, mask=None, src_key_padding_mask=None):
# TransformerEncoderRPR class TransformerEncoderRPR(Module): def __init__(self, encoder_layer, num_layers, encoder_past, max_seq, c_max_seq, b_max_seq, norm=None): super(TransformerEncoderRPR, self).__init__() self.past_layers = _get_clones(encoder_past, 1) self.layers = _get_clones(encoder_layer, num_layers) self.num_layers = num_layers self.norm = norm self.max_seq = max_seq self.c_max_seq = c_max_seq self.b_max_seq = b_max_seq def forward(self, src, mask=None, src_key_padding_mask=None):
args = parse_train_args()
1
2023-11-01 08:33:08+00:00
2k
a16z-infra/sunlight
model/agent.py
[ { "identifier": "DiffbotClient", "path": "model/diffbot.py", "snippet": "class DiffbotClient(object):\n\n BASE_API_URL = 'http://api.diffbot.com'\n TIMEOUT_MS = 15000\n\n def request(self, url, token, api, version=3):\n ''' Issue a request to the Diffbot API and return the response if va...
from datetime import datetime from threading import Thread from langchain.callbacks.base import BaseCallbackHandler from langchain.chains import LLMChain from langchain.chat_models import ChatOpenAI from langchain.prompts import PromptTemplate from .diffbot import DiffbotClient from .prompts import BIAS_REPORT, FACTUAL_CLAIMS, SLANT_DESCRIPTION import fcntl import json import logging import multiprocessing import os import tiktoken
1,398
DIFFBOT_API_KEY = os.environ['DIFFBOT_API_KEY'] REQUEST_LOG_FILE = os.environ['REQUEST_LOG_FILE'] MAX_MODEL_CONTEXT = { 'gpt-3.5-turbo': 4096, 'text-davinci-003': 4096, 'gpt-4': 8192, } class OpenAIStreamHandler(BaseCallbackHandler): def __init__(self, stream_queue, *args, **kwargs): super(OpenAIStreamHandler, self).__init__(*args, **kwargs) self.stream_queue = stream_queue def on_llm_new_token(self, token, *args, **kwargs): self.stream_queue.put(token) def on_llm_end(self, *args, **kwargs): self.stream_queue.put(False) class Agent(multiprocessing.Process): def __init__(self, in_queue, out_queue): super(Agent, self).__init__() logging.basicConfig(filename='/var/log/build/sunlight.out', level=logging.INFO) self.in_queue = in_queue self.out_queue = out_queue self.fact_prompt = PromptTemplate(input_variables=['headline', 'body'], template=FACTUAL_CLAIMS) self.critique_prompt = PromptTemplate(input_variables=['headline', 'body'], template=BIAS_REPORT) self.slant_prompt = PromptTemplate(input_variables=['bias_report'], template=SLANT_DESCRIPTION) gpt35 = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0.0, request_timeout=300) davinci = ChatOpenAI(model_name='text-davinci-003', temperature=0.0, request_timeout=300) gpt4 = ChatOpenAI(model_name='gpt-4', temperature=0.0, request_timeout=900) self.stream_queue = multiprocessing.Queue() gpt4_stream = ChatOpenAI( model_name='gpt-4', temperature=0.0, streaming=True, callbacks=[OpenAIStreamHandler(stream_queue=self.stream_queue)], request_timeout=900, ) self.fact_chains = { 'gpt-3.5-turbo': LLMChain(llm=gpt35, prompt=self.fact_prompt), 'text-davinci-003': LLMChain(llm=davinci, prompt=self.fact_prompt), 'gpt-4': LLMChain(llm=gpt4_stream, prompt=self.fact_prompt), } self.critique_chains = { 'gpt-3.5-turbo': LLMChain(llm=gpt35, prompt=self.critique_prompt), 'text-davinci-003': LLMChain(llm=davinci, prompt=self.critique_prompt), 'gpt-4': LLMChain(llm=gpt4_stream, prompt=self.critique_prompt), } self.slant_chains = { 'gpt-3.5-turbo': LLMChain(llm=gpt35, prompt=self.slant_prompt), 'text-davinci-003': LLMChain(llm=davinci, prompt=self.slant_prompt), 'gpt-4': LLMChain(llm=gpt4, prompt=self.slant_prompt), } self._load_processed_jobs() def run(self): logging.basicConfig(filename='/var/log/build/sunlight.out', level=logging.INFO)
DIFFBOT_API_KEY = os.environ['DIFFBOT_API_KEY'] REQUEST_LOG_FILE = os.environ['REQUEST_LOG_FILE'] MAX_MODEL_CONTEXT = { 'gpt-3.5-turbo': 4096, 'text-davinci-003': 4096, 'gpt-4': 8192, } class OpenAIStreamHandler(BaseCallbackHandler): def __init__(self, stream_queue, *args, **kwargs): super(OpenAIStreamHandler, self).__init__(*args, **kwargs) self.stream_queue = stream_queue def on_llm_new_token(self, token, *args, **kwargs): self.stream_queue.put(token) def on_llm_end(self, *args, **kwargs): self.stream_queue.put(False) class Agent(multiprocessing.Process): def __init__(self, in_queue, out_queue): super(Agent, self).__init__() logging.basicConfig(filename='/var/log/build/sunlight.out', level=logging.INFO) self.in_queue = in_queue self.out_queue = out_queue self.fact_prompt = PromptTemplate(input_variables=['headline', 'body'], template=FACTUAL_CLAIMS) self.critique_prompt = PromptTemplate(input_variables=['headline', 'body'], template=BIAS_REPORT) self.slant_prompt = PromptTemplate(input_variables=['bias_report'], template=SLANT_DESCRIPTION) gpt35 = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0.0, request_timeout=300) davinci = ChatOpenAI(model_name='text-davinci-003', temperature=0.0, request_timeout=300) gpt4 = ChatOpenAI(model_name='gpt-4', temperature=0.0, request_timeout=900) self.stream_queue = multiprocessing.Queue() gpt4_stream = ChatOpenAI( model_name='gpt-4', temperature=0.0, streaming=True, callbacks=[OpenAIStreamHandler(stream_queue=self.stream_queue)], request_timeout=900, ) self.fact_chains = { 'gpt-3.5-turbo': LLMChain(llm=gpt35, prompt=self.fact_prompt), 'text-davinci-003': LLMChain(llm=davinci, prompt=self.fact_prompt), 'gpt-4': LLMChain(llm=gpt4_stream, prompt=self.fact_prompt), } self.critique_chains = { 'gpt-3.5-turbo': LLMChain(llm=gpt35, prompt=self.critique_prompt), 'text-davinci-003': LLMChain(llm=davinci, prompt=self.critique_prompt), 'gpt-4': LLMChain(llm=gpt4_stream, prompt=self.critique_prompt), } self.slant_chains = { 'gpt-3.5-turbo': LLMChain(llm=gpt35, prompt=self.slant_prompt), 'text-davinci-003': LLMChain(llm=davinci, prompt=self.slant_prompt), 'gpt-4': LLMChain(llm=gpt4, prompt=self.slant_prompt), } self._load_processed_jobs() def run(self): logging.basicConfig(filename='/var/log/build/sunlight.out', level=logging.INFO)
diffbot = DiffbotClient()
0
2023-11-01 17:19:54+00:00
2k
elenacliu/GraspStudio
cameras/realsense.py
[ { "identifier": "CameraConfig", "path": "cameras/camera.py", "snippet": "class CameraConfig(InstantiateConfig):\n \"\"\"Camera Config\"\"\"\n _target: Type = field(default_factory=lambda : Camera)\n # focal length of x axis\n fx: float = 0.0\n # focal length of y axis\n fy: float = 0.0...
from dataclasses import dataclass, field from typing import Type from .camera import CameraConfig, Camera import pyrealsense2 as rs import numpy as np import cv2
1,298
# Copyright 2023 Chang Liu. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @dataclass class RealSenseCameraConfig(CameraConfig): _target: Type = field(default_factory=lambda : RealSenseCamera) exposure: float = 500.0 max_depth_value: float = 800.0
# Copyright 2023 Chang Liu. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @dataclass class RealSenseCameraConfig(CameraConfig): _target: Type = field(default_factory=lambda : RealSenseCamera) exposure: float = 500.0 max_depth_value: float = 800.0
class RealSenseCamera(Camera):
1
2023-11-08 09:44:22+00:00
2k
serl-robot/serl
serl/wrappers/pixels.py
[ { "identifier": "FrameStack", "path": "serl/wrappers/frame_stack.py", "snippet": "class FrameStack(gym.Wrapper):\n def __init__(self, env, num_stack: int, stacking_key: str = \"pixels\"):\n super().__init__(env)\n self._num_stack = num_stack\n self._stacking_key = stacking_key\n\...
from typing import Optional, Tuple from gym.wrappers.pixel_observation import PixelObservationWrapper from serl.wrappers.frame_stack import FrameStack from serl.wrappers.repeat_action import RepeatAction from serl.wrappers.universal_seed import UniversalSeed import gym
809
def wrap_pixels( env: gym.Env, action_repeat: int, image_size: int = 84, num_stack: Optional[int] = 3, camera_id: int = 0, pixel_keys: Tuple[str, ...] = ("pixels",), ) -> gym.Env: if action_repeat > 1: env = RepeatAction(env, action_repeat)
def wrap_pixels( env: gym.Env, action_repeat: int, image_size: int = 84, num_stack: Optional[int] = 3, camera_id: int = 0, pixel_keys: Tuple[str, ...] = ("pixels",), ) -> gym.Env: if action_repeat > 1: env = RepeatAction(env, action_repeat)
env = UniversalSeed(env)
2
2023-11-02 23:32:24+00:00
2k
daily-demos/ai-meeting-assistant
server/llm/openai_assistant.py
[ { "identifier": "Assistant", "path": "server/llm/assistant.py", "snippet": "class Assistant(ABC):\n \"\"\"Abstract class defining methods that should be implemented by any assistant\"\"\"\n\n @abstractmethod\n def register_new_context(self, new_text: str,\n name: lis...
import asyncio import logging import threading from collections import deque from openai import OpenAI from openai.types.beta import Assistant from openai.types.chat import ChatCompletionMessageParam, ChatCompletionSystemMessageParam, \ ChatCompletionUserMessageParam from server.llm.assistant import Assistant, NoContextError
1,479
def probe_api_key(api_key: str) -> bool: """Probes the OpenAI API with the provided key to ensure it is valid.""" try: client = OpenAI(api_key=api_key) client.chat.completions.create( model="gpt-3.5-turbo", messages=[ ChatCompletionUserMessageParam( content="This is a test", role="user")], ) return True except Exception as e: print(f"Failed to probe OpenAI API key: {e}") return False class OpenAIAssistant(Assistant): """Class that implements assistant features using the OpenAI API""" _client: OpenAI = None _oai_assistant_id: int = None _oai_summary_thread_id: int = None _model_name: str = None _logger: logging.Logger = None # For now, just store context in memory. _raw_context: deque([ChatCompletionMessageParam]) = None _clean_transcript: str = None _clean_transcript_running: bool = False _summary_context: str = None # Process 20 context items at a time. _transcript_batch_size: int = 25 _default_transcript_prompt = ChatCompletionSystemMessageParam(content=""" Using the exact transcript provided in the previous messages, convert it into a cleaned-up, paragraphed format. It is crucial that you strictly adhere to the content of the provided transcript without adding or modifying any of the original dialogue. Your tasks are to: 1. Correct punctuation and spelling mistakes. 2. Merge broken sentences into complete ones. 3. Remove timestamps and transcript types. 4. Clearly indicate the speaker's name at the beginning of their dialogue. Do not add any new content or dialogue that was not present in the original transcript. The focus is on cleaning and reformatting the existing content for clarity and readability. """, role="system") _default_prompt = """ Primary Instruction: Based on the provided meeting transcripts, please create a concise summary. Your summary should include: 1. Key discussion points. 2. Decisions made. 3. Action items assigned. Keep the summary within six sentences, ensuring it captures the essence of the conversation. Structure it in clear, digestible parts for easy understanding. Rely solely on information from the transcript; do not infer or add information not explicitly mentioned. Exclude any square brackets, tags, or timestamps from the summary. Instead of re-parsing the entire context, use previous summaries you've generated to inform the completion of each new summary. Each summary should be holistic and represent the entire call. """ def __init__(self, api_key: str, model_name: str = None, logger: logging.Logger = None): if not api_key: raise Exception("OpenAI API key not provided, but required.") self._raw_context = deque() self._summary_context = "" self._clean_transcript = "" self._logger = logger if not model_name: model_name = "gpt-4-1106-preview" self._model_name = model_name self._client = OpenAI( api_key=api_key, ) self._oai_assistant_id = self.get_or_create_assistant(model_name) def get_or_create_assistant(self, model_name) -> str: """Gets or creates an OpenAI assistant""" all_assistants = self._client.beta.assistants.list() for assistant in all_assistants.data: if assistant.name == _assistant_name and assistant.instructions == self._default_prompt: return assistant.id return self._client.beta.assistants.create(name=_assistant_name, description="Daily meeting summary assistant", instructions=self._default_prompt, model=model_name).id def destroy(self): """Destroys the assistant and relevant resources""" self._logger.info( "Destroying thread (%s) and assistant (%s)", self._oai_summary_thread_id, self._oai_assistant_id) bc = self._client.beta if self._oai_summary_thread_id: bc.threads.delete(self._oai_summary_thread_id) if self._oai_assistant_id: bc.assistants.delete(self._oai_assistant_id) def register_new_context(self, new_text: str, metadata: list[str] = None): """Registers new context (usually a transcription line).""" content = self._compile_ctx_content(new_text, metadata) user_msg = ChatCompletionUserMessageParam(content=content, role="user") self._raw_context.append(user_msg) def get_clean_transcript(self) -> str: """Returns latest clean transcript.""" return self._clean_transcript async def cleanup_transcript(self) -> str: """Cleans up transcript from raw context.""" if self._clean_transcript_running: raise Exception("Clean transcript process already running") # Set this bool to ensure only one cleanup process # is running at a time. self._clean_transcript_running = True if len(self._raw_context) == 0: self._clean_transcript_running = False
"""Module that defines an OpenAI assistant.""" _assistant_name = "daily-ai-assistant" def probe_api_key(api_key: str) -> bool: """Probes the OpenAI API with the provided key to ensure it is valid.""" try: client = OpenAI(api_key=api_key) client.chat.completions.create( model="gpt-3.5-turbo", messages=[ ChatCompletionUserMessageParam( content="This is a test", role="user")], ) return True except Exception as e: print(f"Failed to probe OpenAI API key: {e}") return False class OpenAIAssistant(Assistant): """Class that implements assistant features using the OpenAI API""" _client: OpenAI = None _oai_assistant_id: int = None _oai_summary_thread_id: int = None _model_name: str = None _logger: logging.Logger = None # For now, just store context in memory. _raw_context: deque([ChatCompletionMessageParam]) = None _clean_transcript: str = None _clean_transcript_running: bool = False _summary_context: str = None # Process 20 context items at a time. _transcript_batch_size: int = 25 _default_transcript_prompt = ChatCompletionSystemMessageParam(content=""" Using the exact transcript provided in the previous messages, convert it into a cleaned-up, paragraphed format. It is crucial that you strictly adhere to the content of the provided transcript without adding or modifying any of the original dialogue. Your tasks are to: 1. Correct punctuation and spelling mistakes. 2. Merge broken sentences into complete ones. 3. Remove timestamps and transcript types. 4. Clearly indicate the speaker's name at the beginning of their dialogue. Do not add any new content or dialogue that was not present in the original transcript. The focus is on cleaning and reformatting the existing content for clarity and readability. """, role="system") _default_prompt = """ Primary Instruction: Based on the provided meeting transcripts, please create a concise summary. Your summary should include: 1. Key discussion points. 2. Decisions made. 3. Action items assigned. Keep the summary within six sentences, ensuring it captures the essence of the conversation. Structure it in clear, digestible parts for easy understanding. Rely solely on information from the transcript; do not infer or add information not explicitly mentioned. Exclude any square brackets, tags, or timestamps from the summary. Instead of re-parsing the entire context, use previous summaries you've generated to inform the completion of each new summary. Each summary should be holistic and represent the entire call. """ def __init__(self, api_key: str, model_name: str = None, logger: logging.Logger = None): if not api_key: raise Exception("OpenAI API key not provided, but required.") self._raw_context = deque() self._summary_context = "" self._clean_transcript = "" self._logger = logger if not model_name: model_name = "gpt-4-1106-preview" self._model_name = model_name self._client = OpenAI( api_key=api_key, ) self._oai_assistant_id = self.get_or_create_assistant(model_name) def get_or_create_assistant(self, model_name) -> str: """Gets or creates an OpenAI assistant""" all_assistants = self._client.beta.assistants.list() for assistant in all_assistants.data: if assistant.name == _assistant_name and assistant.instructions == self._default_prompt: return assistant.id return self._client.beta.assistants.create(name=_assistant_name, description="Daily meeting summary assistant", instructions=self._default_prompt, model=model_name).id def destroy(self): """Destroys the assistant and relevant resources""" self._logger.info( "Destroying thread (%s) and assistant (%s)", self._oai_summary_thread_id, self._oai_assistant_id) bc = self._client.beta if self._oai_summary_thread_id: bc.threads.delete(self._oai_summary_thread_id) if self._oai_assistant_id: bc.assistants.delete(self._oai_assistant_id) def register_new_context(self, new_text: str, metadata: list[str] = None): """Registers new context (usually a transcription line).""" content = self._compile_ctx_content(new_text, metadata) user_msg = ChatCompletionUserMessageParam(content=content, role="user") self._raw_context.append(user_msg) def get_clean_transcript(self) -> str: """Returns latest clean transcript.""" return self._clean_transcript async def cleanup_transcript(self) -> str: """Cleans up transcript from raw context.""" if self._clean_transcript_running: raise Exception("Clean transcript process already running") # Set this bool to ensure only one cleanup process # is running at a time. self._clean_transcript_running = True if len(self._raw_context) == 0: self._clean_transcript_running = False
raise NoContextError()
1
2023-11-02 11:17:16+00:00
2k
Kushalhk/AutoFilter
plugins/inline.py
[ { "identifier": "get_search_results", "path": "database/ia_filterdb.py", "snippet": "async def get_search_results(chat_id, query, file_type=None, max_results=10, offset=0, filter=False):\n \"\"\"For given query return (results, next_offset)\"\"\"\n if chat_id is not None:\n settings = await...
import logging from pyrogram import Client, emoji, filters from pyrogram.errors.exceptions.bad_request_400 import QueryIdInvalid from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, InlineQueryResultCachedDocument, InlineQuery from database.ia_filterdb import get_search_results from utils import is_subscribed, get_size, temp from info import CACHE_TIME, AUTH_USERS, AUTH_CHANNEL, CUSTOM_FILE_CAPTION from database.connections_mdb import active_connection
1,485
logger = logging.getLogger(__name__) cache_time = 0 if AUTH_USERS or AUTH_CHANNEL else CACHE_TIME async def inline_users(query: InlineQuery): if AUTH_USERS: if query.from_user and query.from_user.id in AUTH_USERS: return True else: return False if query.from_user and query.from_user.id not in temp.BANNED_USERS: return True return False @Client.on_inline_query() async def answer(bot, query): """Show search results for given inline query""" chat_id = await active_connection(str(query.from_user.id)) if not await inline_users(query): await query.answer(results=[], cache_time=0, switch_pm_text='okDa', switch_pm_parameter="hehe") return if AUTH_CHANNEL and not await is_subscribed(bot, query): await query.answer(results=[], cache_time=0, switch_pm_text='You have to subscribe my channel to use the bot', switch_pm_parameter="subscribe") return results = [] if '|' in query.query: string, file_type = query.query.split('|', maxsplit=1) string = string.strip() file_type = file_type.strip().lower() else: string = query.query.strip() file_type = None offset = int(query.offset or 0) reply_markup = get_reply_markup(query=string) files, next_offset, total = await get_search_results( chat_id, string, file_type=file_type, max_results=10, offset=offset) for file in files: title=file.file_name size=get_size(file.file_size) f_caption=file.caption
logger = logging.getLogger(__name__) cache_time = 0 if AUTH_USERS or AUTH_CHANNEL else CACHE_TIME async def inline_users(query: InlineQuery): if AUTH_USERS: if query.from_user and query.from_user.id in AUTH_USERS: return True else: return False if query.from_user and query.from_user.id not in temp.BANNED_USERS: return True return False @Client.on_inline_query() async def answer(bot, query): """Show search results for given inline query""" chat_id = await active_connection(str(query.from_user.id)) if not await inline_users(query): await query.answer(results=[], cache_time=0, switch_pm_text='okDa', switch_pm_parameter="hehe") return if AUTH_CHANNEL and not await is_subscribed(bot, query): await query.answer(results=[], cache_time=0, switch_pm_text='You have to subscribe my channel to use the bot', switch_pm_parameter="subscribe") return results = [] if '|' in query.query: string, file_type = query.query.split('|', maxsplit=1) string = string.strip() file_type = file_type.strip().lower() else: string = query.query.strip() file_type = None offset = int(query.offset or 0) reply_markup = get_reply_markup(query=string) files, next_offset, total = await get_search_results( chat_id, string, file_type=file_type, max_results=10, offset=offset) for file in files: title=file.file_name size=get_size(file.file_size) f_caption=file.caption
if CUSTOM_FILE_CAPTION:
7
2023-11-03 12:21:26+00:00
2k
tiendatnguyen-vision/Orbit-symmetrize
RotatedMNIST/LPS/emlp-pytorch/tests/model_tests.py
[ { "identifier": "rel_error", "path": "RotatedMNIST/LPS/emlp-pytorch/tests/equivariance_tests.py", "snippet": "def rel_error(t1, t2):\r\n \"\"\" Computes the relative error of two tensors. \"\"\"\r\n error = torch.sqrt(torch.mean(torch.abs(t1-t2)**2))\r\n scale = torch.sqrt(torch.mean(torch.abs(...
import torch from torch.utils.data import DataLoader from oil.utils.utils import FixedNumpySeed, FixedPytorchSeed from emlp_pytorch.nn import EMLP from emlp_pytorch.groups import S, SO, DirectProduct from emlp_pytorch.reps import vis, sparsify_basis, V, Rep, LazyKron, T from .equivariance_tests import rel_error, scale_adjusted_rel_error
1,384
""" Tests for the EMLP model.""" def equivariance_err(model, mb, repin, repout, group): """ Computes the equivariance error of a model on a minibatch mb. """ x, y = mb gs = group.samples(x.size(0)) rho_gin = torch.vmap(repin(group).rho_dense)(gs) rho_gout = torch.vmap(repout(group).rho_dense)(gs) y1 = model((rho_gin@x[..., None])[..., 0]) y2 = (rho_gout@model(x)[..., None])[..., 0] return scale_adjusted_rel_error(y1, y2, gs) def get_dsmb(dsclass, device='cpu'): """ Returns a dataset and minibatch for a given dataset class. """ seed = 2021 bs = 50 with FixedNumpySeed(seed), FixedPytorchSeed(seed): ds = dsclass(100) ds = ds.to(device) dataloader = DataLoader(ds, batch_size=min(bs, len(ds)), num_workers=0, pin_memory=False) mb = next(iter(dataloader)) return ds, mb def test_init_forward_and_equivariance(dsclass, device='cpu'): """ Tests that the model can be initialized, forward pass is correct, and equivariance is correct. """ network = EMLP ds, mb = get_dsmb(dsclass, device) model = network(ds.rep_in, ds.rep_out, group=ds.symmetry).to(device) assert equivariance_err(model, mb, ds.rep_in, ds.rep_out, ds.symmetry) < 1e-4, \ "EMLP failed equivariance test" def test_utilities(device='cpu'): """ Tests that the utilities work. """ W = V(SO(3).to(device)) # W = V(DirectProduct(SO(3).to(device), S(6).to(device))) vis(W, W) Q = (W**2 >> W).equivariant_basis() SQ = sparsify_basis(Q) A = SQ@(1+torch.arange(SQ.size(-1), device=device)).to(torch.float) nunique = len(torch.unique(torch.abs(A))) assert nunique in (SQ.size(-1), SQ.size(-1) + 1), "Sparsify failes on SO(3) T3" def test_bespoke_representations(device='cpu'): """ Tests that bespoke representations work. """ class ProductSubRep(Rep): """ A representation of a product group G = G1 x G2 as a sum of two subrepresentations """ def __init__(self, G, subgroup_id, size): """ Produces the representation of the subgroup of G = G1 x G2 with the index subgroup_id in {0,1} specifying G1 or G2. Also requires specifying the size of the representation given by G1.d or G2.d """ super().__init__() self.G = G self.index = subgroup_id self._size = size self.device = device def __repr__(self): return "V_"+str(self.G).split('x')[self.index] def __hash__(self): return hash((type(self), (self.G, self.index))) def size(self): return self._size def rho(self, M): # Given that M is a LazyKron object, we can just get the argument return M.Ms[self.index] def drho(self, A): return A.Ms[self.index] def __call__(self, G): # adding this will probably not be necessary in a future release, # necessary now because rep is __call__ed in nn.EMLP constructor assert self.G == G return self G1, G2 = SO(3).to(device), S(5).to(device) G = G1 * G2 VSO3 = ProductSubRep(G, 0, G1.d) VS5 = ProductSubRep(G, 1, G2.d) Vin = VS5 + V(G) Vout = VSO3 str(Vin >> Vout) model = EMLP(Vin, Vout, group=G) model.to(device) input_point = torch.randn(Vin.size(), device=device)*10 lazy_G_sample = LazyKron([G1.sample(), G2.sample()]) out1 = model(Vin.rho(lazy_G_sample)@input_point) out2 = Vout.rho(lazy_G_sample)@model(input_point)
""" Tests for the EMLP model.""" def equivariance_err(model, mb, repin, repout, group): """ Computes the equivariance error of a model on a minibatch mb. """ x, y = mb gs = group.samples(x.size(0)) rho_gin = torch.vmap(repin(group).rho_dense)(gs) rho_gout = torch.vmap(repout(group).rho_dense)(gs) y1 = model((rho_gin@x[..., None])[..., 0]) y2 = (rho_gout@model(x)[..., None])[..., 0] return scale_adjusted_rel_error(y1, y2, gs) def get_dsmb(dsclass, device='cpu'): """ Returns a dataset and minibatch for a given dataset class. """ seed = 2021 bs = 50 with FixedNumpySeed(seed), FixedPytorchSeed(seed): ds = dsclass(100) ds = ds.to(device) dataloader = DataLoader(ds, batch_size=min(bs, len(ds)), num_workers=0, pin_memory=False) mb = next(iter(dataloader)) return ds, mb def test_init_forward_and_equivariance(dsclass, device='cpu'): """ Tests that the model can be initialized, forward pass is correct, and equivariance is correct. """ network = EMLP ds, mb = get_dsmb(dsclass, device) model = network(ds.rep_in, ds.rep_out, group=ds.symmetry).to(device) assert equivariance_err(model, mb, ds.rep_in, ds.rep_out, ds.symmetry) < 1e-4, \ "EMLP failed equivariance test" def test_utilities(device='cpu'): """ Tests that the utilities work. """ W = V(SO(3).to(device)) # W = V(DirectProduct(SO(3).to(device), S(6).to(device))) vis(W, W) Q = (W**2 >> W).equivariant_basis() SQ = sparsify_basis(Q) A = SQ@(1+torch.arange(SQ.size(-1), device=device)).to(torch.float) nunique = len(torch.unique(torch.abs(A))) assert nunique in (SQ.size(-1), SQ.size(-1) + 1), "Sparsify failes on SO(3) T3" def test_bespoke_representations(device='cpu'): """ Tests that bespoke representations work. """ class ProductSubRep(Rep): """ A representation of a product group G = G1 x G2 as a sum of two subrepresentations """ def __init__(self, G, subgroup_id, size): """ Produces the representation of the subgroup of G = G1 x G2 with the index subgroup_id in {0,1} specifying G1 or G2. Also requires specifying the size of the representation given by G1.d or G2.d """ super().__init__() self.G = G self.index = subgroup_id self._size = size self.device = device def __repr__(self): return "V_"+str(self.G).split('x')[self.index] def __hash__(self): return hash((type(self), (self.G, self.index))) def size(self): return self._size def rho(self, M): # Given that M is a LazyKron object, we can just get the argument return M.Ms[self.index] def drho(self, A): return A.Ms[self.index] def __call__(self, G): # adding this will probably not be necessary in a future release, # necessary now because rep is __call__ed in nn.EMLP constructor assert self.G == G return self G1, G2 = SO(3).to(device), S(5).to(device) G = G1 * G2 VSO3 = ProductSubRep(G, 0, G1.d) VS5 = ProductSubRep(G, 1, G2.d) Vin = VS5 + V(G) Vout = VSO3 str(Vin >> Vout) model = EMLP(Vin, Vout, group=G) model.to(device) input_point = torch.randn(Vin.size(), device=device)*10 lazy_G_sample = LazyKron([G1.sample(), G2.sample()]) out1 = model(Vin.rho(lazy_G_sample)@input_point) out2 = Vout.rho(lazy_G_sample)@model(input_point)
assert rel_error(out1, out2) < 1e-4, "EMLP equivariance fails on bespoke productsubrep"
0
2023-11-01 07:19:02+00:00
2k
crizbae/PictoPlan
backend/mongo_api/app/server/routes/item_routes.py
[ { "identifier": "collection", "path": "backend/mongo_api/app/server/database.py", "snippet": "MONGO_URI = config(\"MONGO_URI\")\ndef item_helper(item) -> dict:\ndef ret_link(item) -> dict:\nasync def retrieve_all_items():\nasync def retrieve_item(item_id: str):\nasync def retrieve_links(session_id: str)...
from fastapi import APIRouter, Depends, HTTPException from ..database import collection from ..models.item import Item from ..database import retrieve_all_items, retrieve_item, update_item_in_db, delete_item_from_db, retrieve_links
815
router = APIRouter() @router.post("/items/") def create_item(item: Item): item_dict = item.dict() inserted_item = collection.insert_one(item_dict) item_id = str(inserted_item.inserted_id) del item_dict["_id"] item_dict["id"] = item_id return item_dict @router.get("/items/") async def get_all_items(): items = await retrieve_all_items() return items # get by frontend UUID @router.get("/items/session/{session_id}") async def get_item_by_session_id(session_id: str): item = await retrieve_links(session_id) if len(item) == 0: raise HTTPException(status_code=404, detail="Items not found") return item # get by link @router.get("/items/{item_id}") async def get_item_by_id(item_id: str): item = await retrieve_item(item_id) if len(item) == 0: raise HTTPException(status_code=404, detail="Item not found") return item @router.put("/items/{item_id}") async def update_item(item_id: str, updated_item: Item): updated_item = updated_item.dict() success = await update_item_in_db(item_id, updated_item) if not success: raise HTTPException(status_code=404, detail="Item not found") return {**updated_item, "id": item_id} @router.delete("/items/{item_id}") async def delete_item(item_id: str):
router = APIRouter() @router.post("/items/") def create_item(item: Item): item_dict = item.dict() inserted_item = collection.insert_one(item_dict) item_id = str(inserted_item.inserted_id) del item_dict["_id"] item_dict["id"] = item_id return item_dict @router.get("/items/") async def get_all_items(): items = await retrieve_all_items() return items # get by frontend UUID @router.get("/items/session/{session_id}") async def get_item_by_session_id(session_id: str): item = await retrieve_links(session_id) if len(item) == 0: raise HTTPException(status_code=404, detail="Items not found") return item # get by link @router.get("/items/{item_id}") async def get_item_by_id(item_id: str): item = await retrieve_item(item_id) if len(item) == 0: raise HTTPException(status_code=404, detail="Item not found") return item @router.put("/items/{item_id}") async def update_item(item_id: str, updated_item: Item): updated_item = updated_item.dict() success = await update_item_in_db(item_id, updated_item) if not success: raise HTTPException(status_code=404, detail="Item not found") return {**updated_item, "id": item_id} @router.delete("/items/{item_id}") async def delete_item(item_id: str):
success = await delete_item_from_db(item_id)
5
2023-11-04 16:48:55+00:00
2k
xenxxxx/BitPay-Crypto-Signal-Trading-Bot
tests/data/test_btanalysis.py
[ { "identifier": "CURRENT_TEST_STRATEGY", "path": "tests/conftest.py", "snippet": "CURRENT_TEST_STRATEGY = 'StrategyTestV3'" }, { "identifier": "create_mock_trades", "path": "tests/conftest.py", "snippet": "def create_mock_trades(fee, is_short: Optional[bool] = False, use_db: bool = True)...
from datetime import datetime, timedelta, timezone from pathlib import Path from unittest.mock import MagicMock from pandas import DataFrame, DateOffset, Timestamp, to_datetime from freqtrade.configuration import TimeRange from freqtrade.constants import LAST_BT_RESULT_FN from freqtrade.data.btanalysis import (BT_DATA_COLUMNS, analyze_trade_parallelism, extract_trades_of_period, get_latest_backtest_filename, get_latest_hyperopt_file, load_backtest_data, load_backtest_metadata, load_trades, load_trades_from_db) from freqtrade.data.history import load_data, load_pair_history from freqtrade.data.metrics import (calculate_cagr, calculate_calmar, calculate_csum, calculate_expectancy, calculate_market_change, calculate_max_drawdown, calculate_sharpe, calculate_sortino, calculate_underwater, combine_dataframes_with_mean, create_cum_profit) from freqtrade.exceptions import OperationalException from freqtrade.util import dt_utc from tests.conftest import CURRENT_TEST_STRATEGY, create_mock_trades from tests.conftest_trades import MOCK_TRADE_COUNT import pytest
1,532
def test_get_latest_backtest_filename(testdatadir, mocker): with pytest.raises(ValueError, match=r"Directory .* does not exist\."): get_latest_backtest_filename(testdatadir / 'does_not_exist') with pytest.raises(ValueError, match=r"Directory .* does not seem to contain .*"): get_latest_backtest_filename(testdatadir) testdir_bt = testdatadir / "backtest_results" res = get_latest_backtest_filename(testdir_bt) assert res == 'backtest-result.json' res = get_latest_backtest_filename(str(testdir_bt)) assert res == 'backtest-result.json' mocker.patch("freqtrade.data.btanalysis.json_load", return_value={}) with pytest.raises(ValueError, match=r"Invalid '.last_result.json' format."): get_latest_backtest_filename(testdir_bt) def test_get_latest_hyperopt_file(testdatadir): res = get_latest_hyperopt_file(testdatadir / 'does_not_exist', 'testfile.pickle') assert res == testdatadir / 'does_not_exist/testfile.pickle' res = get_latest_hyperopt_file(testdatadir.parent) assert res == testdatadir.parent / "hyperopt_results.pickle" res = get_latest_hyperopt_file(str(testdatadir.parent)) assert res == testdatadir.parent / "hyperopt_results.pickle" # Test with absolute path with pytest.raises( OperationalException, match="--hyperopt-filename expects only the filename, not an absolute path."): get_latest_hyperopt_file(str(testdatadir.parent), str(testdatadir.parent)) def test_load_backtest_metadata(mocker, testdatadir): res = load_backtest_metadata(testdatadir / 'nonexistant.file.json') assert res == {} mocker.patch('freqtrade.data.btanalysis.get_backtest_metadata_filename') mocker.patch('freqtrade.data.btanalysis.json_load', side_effect=Exception()) with pytest.raises(OperationalException, match=r"Unexpected error.*loading backtest metadata\."): load_backtest_metadata(testdatadir / 'nonexistant.file.json') def test_load_backtest_data_old_format(testdatadir, mocker): filename = testdatadir / "backtest-result_test222.json" mocker.patch('freqtrade.data.btanalysis.load_backtest_stats', return_value=[]) with pytest.raises(OperationalException, match=r"Backtest-results with only trades data are no longer supported."): load_backtest_data(filename) def test_load_backtest_data_new_format(testdatadir): filename = testdatadir / "backtest_results/backtest-result.json" bt_data = load_backtest_data(filename) assert isinstance(bt_data, DataFrame) assert set(bt_data.columns) == set(BT_DATA_COLUMNS) assert len(bt_data) == 179 # Test loading from string (must yield same result) bt_data2 = load_backtest_data(str(filename)) assert bt_data.equals(bt_data2) # Test loading from folder (must yield same result) bt_data3 = load_backtest_data(testdatadir / "backtest_results") assert bt_data.equals(bt_data3) with pytest.raises(ValueError, match=r"File .* does not exist\."): load_backtest_data("filename" + "nofile") with pytest.raises(ValueError, match=r"Unknown dataformat."): load_backtest_data(testdatadir / "backtest_results" / LAST_BT_RESULT_FN) def test_load_backtest_data_multi(testdatadir): filename = testdatadir / "backtest_results/backtest-result_multistrat.json" for strategy in ('StrategyTestV2', 'TestStrategy'): bt_data = load_backtest_data(filename, strategy=strategy) assert isinstance(bt_data, DataFrame) assert set(bt_data.columns) == set( BT_DATA_COLUMNS) assert len(bt_data) == 179 # Test loading from string (must yield same result) bt_data2 = load_backtest_data(str(filename), strategy=strategy) assert bt_data.equals(bt_data2) with pytest.raises(ValueError, match=r"Strategy XYZ not available in the backtest result\."): load_backtest_data(filename, strategy='XYZ') with pytest.raises(ValueError, match=r"Detected backtest result with more than one strategy.*"): load_backtest_data(filename) @pytest.mark.usefixtures("init_persistence") @pytest.mark.parametrize('is_short', [False, True]) def test_load_trades_from_db(default_conf, fee, is_short, mocker):
def test_get_latest_backtest_filename(testdatadir, mocker): with pytest.raises(ValueError, match=r"Directory .* does not exist\."): get_latest_backtest_filename(testdatadir / 'does_not_exist') with pytest.raises(ValueError, match=r"Directory .* does not seem to contain .*"): get_latest_backtest_filename(testdatadir) testdir_bt = testdatadir / "backtest_results" res = get_latest_backtest_filename(testdir_bt) assert res == 'backtest-result.json' res = get_latest_backtest_filename(str(testdir_bt)) assert res == 'backtest-result.json' mocker.patch("freqtrade.data.btanalysis.json_load", return_value={}) with pytest.raises(ValueError, match=r"Invalid '.last_result.json' format."): get_latest_backtest_filename(testdir_bt) def test_get_latest_hyperopt_file(testdatadir): res = get_latest_hyperopt_file(testdatadir / 'does_not_exist', 'testfile.pickle') assert res == testdatadir / 'does_not_exist/testfile.pickle' res = get_latest_hyperopt_file(testdatadir.parent) assert res == testdatadir.parent / "hyperopt_results.pickle" res = get_latest_hyperopt_file(str(testdatadir.parent)) assert res == testdatadir.parent / "hyperopt_results.pickle" # Test with absolute path with pytest.raises( OperationalException, match="--hyperopt-filename expects only the filename, not an absolute path."): get_latest_hyperopt_file(str(testdatadir.parent), str(testdatadir.parent)) def test_load_backtest_metadata(mocker, testdatadir): res = load_backtest_metadata(testdatadir / 'nonexistant.file.json') assert res == {} mocker.patch('freqtrade.data.btanalysis.get_backtest_metadata_filename') mocker.patch('freqtrade.data.btanalysis.json_load', side_effect=Exception()) with pytest.raises(OperationalException, match=r"Unexpected error.*loading backtest metadata\."): load_backtest_metadata(testdatadir / 'nonexistant.file.json') def test_load_backtest_data_old_format(testdatadir, mocker): filename = testdatadir / "backtest-result_test222.json" mocker.patch('freqtrade.data.btanalysis.load_backtest_stats', return_value=[]) with pytest.raises(OperationalException, match=r"Backtest-results with only trades data are no longer supported."): load_backtest_data(filename) def test_load_backtest_data_new_format(testdatadir): filename = testdatadir / "backtest_results/backtest-result.json" bt_data = load_backtest_data(filename) assert isinstance(bt_data, DataFrame) assert set(bt_data.columns) == set(BT_DATA_COLUMNS) assert len(bt_data) == 179 # Test loading from string (must yield same result) bt_data2 = load_backtest_data(str(filename)) assert bt_data.equals(bt_data2) # Test loading from folder (must yield same result) bt_data3 = load_backtest_data(testdatadir / "backtest_results") assert bt_data.equals(bt_data3) with pytest.raises(ValueError, match=r"File .* does not exist\."): load_backtest_data("filename" + "nofile") with pytest.raises(ValueError, match=r"Unknown dataformat."): load_backtest_data(testdatadir / "backtest_results" / LAST_BT_RESULT_FN) def test_load_backtest_data_multi(testdatadir): filename = testdatadir / "backtest_results/backtest-result_multistrat.json" for strategy in ('StrategyTestV2', 'TestStrategy'): bt_data = load_backtest_data(filename, strategy=strategy) assert isinstance(bt_data, DataFrame) assert set(bt_data.columns) == set( BT_DATA_COLUMNS) assert len(bt_data) == 179 # Test loading from string (must yield same result) bt_data2 = load_backtest_data(str(filename), strategy=strategy) assert bt_data.equals(bt_data2) with pytest.raises(ValueError, match=r"Strategy XYZ not available in the backtest result\."): load_backtest_data(filename, strategy='XYZ') with pytest.raises(ValueError, match=r"Detected backtest result with more than one strategy.*"): load_backtest_data(filename) @pytest.mark.usefixtures("init_persistence") @pytest.mark.parametrize('is_short', [False, True]) def test_load_trades_from_db(default_conf, fee, is_short, mocker):
create_mock_trades(fee, is_short)
1
2023-11-07 18:46:03+00:00
2k
ssajedi/SAiF-GPT
bin/main.py
[ { "identifier": "anonymize_text", "path": "utils.py", "snippet": "def augment_prompt(prompt,ref_doc):\ndef extract_pdf_text(file):" }, { "identifier": "extract_pdf_text", "path": "utils.py", "snippet": "def extract_pdf_text(file):\n \"\"\"\n Extracts text paragraphs from a PDF file...
import streamlit as st import random import time import openai import openai import streamlit as st from utils import anonymize_text, deanonymize_text, chatbot_response from utils import extract_pdf_text from text_effects import highlight_phrases_in_paragraph from DetectEntity import DetectEntity
815
st.title("AInonymous") system_prompt="""You are a helpful assistant, your task is to review an uploaded document\ uploaded by a user.\ The user query is delimited by triple asterisks.\ The reference documents in that message are delimited with triple backticks.\ A user might ask follow up questions. """ # add a selectbox to the sidebar st.sidebar.multiselect("Entity list", ["email", "phone",'location'], ["email", "phone","location"]) # add a clear button to the sidebar if st.sidebar.button("Clear"): st.session_state.chat_hist = [] st.session_state.messages = [] st.session_state.cls = None # add # add a n upload pdf button to the sidebar uploaded_file = st.sidebar.file_uploader("Choose a PDF file", accept_multiple_files=False) if uploaded_file is not None:
st.title("AInonymous") system_prompt="""You are a helpful assistant, your task is to review an uploaded document\ uploaded by a user.\ The user query is delimited by triple asterisks.\ The reference documents in that message are delimited with triple backticks.\ A user might ask follow up questions. """ # add a selectbox to the sidebar st.sidebar.multiselect("Entity list", ["email", "phone",'location'], ["email", "phone","location"]) # add a clear button to the sidebar if st.sidebar.button("Clear"): st.session_state.chat_hist = [] st.session_state.messages = [] st.session_state.cls = None # add # add a n upload pdf button to the sidebar uploaded_file = st.sidebar.file_uploader("Choose a PDF file", accept_multiple_files=False) if uploaded_file is not None:
_,chunks = extract_pdf_text(uploaded_file)
1
2023-11-04 18:14:49+00:00
2k
awslabs/optimizing-multitask-training-through-dynamic-pipelines
tests/test_kv_store.py
[ { "identifier": "_get_from_shared_kv_store", "path": "dynapipe/pipe/data_loader.py", "snippet": "def _get_from_shared_kv_store(\n kv_store: RedisKVStore,\n key: str,\n reader_idx: int,\n n_total_readers: int,\n decode: bool = True,\n logger=None,\n):\n reader_count_key = key + \"_rc...
import multiprocessing as mp import time import traceback import traceback from dynapipe.pipe.data_loader import ( _get_from_shared_kv_store, _init_kv_store, _put_to_shared_kv_store, )
1,336
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 # Note: this test requires torch # to run this test, exec: # DYNAPIPE_DEBUG=DEBUG DYNAPIPE_LOGGING_DEBUG_DIR=./test_debug \ # torchrun --standalone --nnodes=1 --nproc_per_node=1 test_kv_store.py def _producer_process(max_iters, buffer_size=32): try: kv_store, _, _ = _init_kv_store(is_master=True) # set all ack keys for i in range(buffer_size): kv_store.set(f"key_{i}_ack".format(i), "1") kv_store.set(f"key_{i}_r0_ack".format(i), "1") for i in range(max_iters): key = "key_{}".format(i % buffer_size) payload = str(i) _put_to_shared_kv_store(kv_store, key, payload) print("[producer] put key: {}".format(key), flush=True) time.sleep(2) except Exception as e: traceback.print_exc() raise e def _consumer_process(max_iters, buffer_size=32): try: kv_store, _, _ = _init_kv_store(is_master=False) for i in range(max_iters): key = "key_{}".format(i % buffer_size)
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 # Note: this test requires torch # to run this test, exec: # DYNAPIPE_DEBUG=DEBUG DYNAPIPE_LOGGING_DEBUG_DIR=./test_debug \ # torchrun --standalone --nnodes=1 --nproc_per_node=1 test_kv_store.py def _producer_process(max_iters, buffer_size=32): try: kv_store, _, _ = _init_kv_store(is_master=True) # set all ack keys for i in range(buffer_size): kv_store.set(f"key_{i}_ack".format(i), "1") kv_store.set(f"key_{i}_r0_ack".format(i), "1") for i in range(max_iters): key = "key_{}".format(i % buffer_size) payload = str(i) _put_to_shared_kv_store(kv_store, key, payload) print("[producer] put key: {}".format(key), flush=True) time.sleep(2) except Exception as e: traceback.print_exc() raise e def _consumer_process(max_iters, buffer_size=32): try: kv_store, _, _ = _init_kv_store(is_master=False) for i in range(max_iters): key = "key_{}".format(i % buffer_size)
payload = _get_from_shared_kv_store(
0
2023-11-08 07:58:20+00:00
2k
dask-contrib/dask-databricks
dask_databricks/tests/test_databricks.py
[ { "identifier": "DatabricksCluster", "path": "dask_databricks/databrickscluster.py", "snippet": "class DatabricksCluster(Cluster):\n \"\"\"Connect to a Dask cluster deployed via databricks.\"\"\"\n\n def __init__(\n self,\n loop: Optional[IOLoop] = None,\n asynchronous: bool =...
import os import pytest from dask.distributed import Client from distributed.deploy import Cluster, LocalCluster from dask_databricks import DatabricksCluster, get_client
669
@pytest.fixture(scope="session") def dask_cluster(): """Start a LocalCluster to simulate the cluster that would be started on Databricks.""" return LocalCluster(scheduler_port=8786) @pytest.fixture def remove_spark_local_ip(): original_spark_local_ip = os.getenv("SPARK_LOCAL_IP") if original_spark_local_ip: del os.environ["SPARK_LOCAL_IP"] yield None if original_spark_local_ip: os.environ["SPARK_LOCAL_IP"] = original_spark_local_ip @pytest.fixture def set_spark_local_ip(): original_spark_local_ip = os.getenv("SPARK_LOCAL_IP") os.environ["SPARK_LOCAL_IP"] = "127.0.0.1" yield None if original_spark_local_ip: os.environ["SPARK_LOCAL_IP"] = original_spark_local_ip else: del os.environ["SPARK_LOCAL_IP"] def test_databricks_cluster_raises_key_error_when_initialised_outside_of_databricks(remove_spark_local_ip): with pytest.raises(KeyError):
@pytest.fixture(scope="session") def dask_cluster(): """Start a LocalCluster to simulate the cluster that would be started on Databricks.""" return LocalCluster(scheduler_port=8786) @pytest.fixture def remove_spark_local_ip(): original_spark_local_ip = os.getenv("SPARK_LOCAL_IP") if original_spark_local_ip: del os.environ["SPARK_LOCAL_IP"] yield None if original_spark_local_ip: os.environ["SPARK_LOCAL_IP"] = original_spark_local_ip @pytest.fixture def set_spark_local_ip(): original_spark_local_ip = os.getenv("SPARK_LOCAL_IP") os.environ["SPARK_LOCAL_IP"] = "127.0.0.1" yield None if original_spark_local_ip: os.environ["SPARK_LOCAL_IP"] = original_spark_local_ip else: del os.environ["SPARK_LOCAL_IP"] def test_databricks_cluster_raises_key_error_when_initialised_outside_of_databricks(remove_spark_local_ip): with pytest.raises(KeyError):
DatabricksCluster()
0
2023-11-02 13:49:27+00:00
2k
indiefan/king_smith
custom_components/king_smith/coordinator.py
[ { "identifier": "DOMAIN", "path": "custom_components/king_smith/const.py", "snippet": "DOMAIN = \"king_smith\"" }, { "identifier": "WalkingPadApi", "path": "custom_components/king_smith/walking_pad.py", "snippet": "class WalkingPadApi:\n \"\"\"Walkingpad device.\"\"\"\n\n def __ini...
from datetime import datetime from homeassistant.core import CALLBACK_TYPE, HassJob, HomeAssistant, callback from homeassistant.helpers.event import async_call_later from homeassistant.helpers.update_coordinator import DataUpdateCoordinator from ph4_walkingpad.pad import WalkingPadCurStatus from .const import DOMAIN from .walking_pad import WalkingPadApi import logging import time
1,339
"""The Walking Pad Coordinator.""" _LOGGER = logging.getLogger(__name__) NEVER_TIME = -86400.0 DEBOUNCE_SECONDS = 1.0 class WalkingPadCoordinator(DataUpdateCoordinator[None]): """Data coordinator for receiving Walking Pad updates.""" def __init__(self, hass: HomeAssistant, walking_pad_api: WalkingPadApi) -> None: """Initialise the coordinator.""" super().__init__( hass, _LOGGER,
"""The Walking Pad Coordinator.""" _LOGGER = logging.getLogger(__name__) NEVER_TIME = -86400.0 DEBOUNCE_SECONDS = 1.0 class WalkingPadCoordinator(DataUpdateCoordinator[None]): """Data coordinator for receiving Walking Pad updates.""" def __init__(self, hass: HomeAssistant, walking_pad_api: WalkingPadApi) -> None: """Initialise the coordinator.""" super().__init__( hass, _LOGGER,
name=DOMAIN,
0
2023-11-03 20:45:03+00:00
2k
ndiamant/spice
spice/conditional_histogram.py
[ { "identifier": "BaseLightning", "path": "spice/utils.py", "snippet": "class BaseLightning(LightningModule):\n def _configure_optimizers(self, parameters: Iterator[torch.nn.Parameter]):\n opt = optim.AdamW(\n parameters, lr=self.hparams.lr, weight_decay=self.hparams.wd,\n )\n...
import copy import math import torch import torch.nn.functional as F import matplotlib.pyplot as plt import matplotlib.patches as mpatches from tqdm import tqdm from torch import nn from spice.utils import ( BaseLightning, MLP, unique_quantile, score_to_q_hat, compute_conformal_metrics, )
1,548
def select_bins(y: torch.Tensor, n_bins: int) -> torch.Tensor: return unique_quantile(y, n_bins, first_bin_zero=False) def discretize(y: torch.Tensor, bins: torch.Tensor) -> torch.Tensor: return torch.bucketize(y.clip(max=bins[-1] - 1e-5), boundaries=bins)
def select_bins(y: torch.Tensor, n_bins: int) -> torch.Tensor: return unique_quantile(y, n_bins, first_bin_zero=False) def discretize(y: torch.Tensor, bins: torch.Tensor) -> torch.Tensor: return torch.bucketize(y.clip(max=bins[-1] - 1e-5), boundaries=bins)
class ConditionalHist(BaseLightning):
0
2023-11-01 18:04:29+00:00
2k
nik-sm/com-hom-emg
tests/test_data.py
[ { "identifier": "get_datasets", "path": "com_hom_emg/data.py", "snippet": "def get_datasets(\n per_subj_data: dict,\n fold: int,\n n_train_subj: int,\n n_val_subj: int,\n n_test_subj: int,\n use_preprocessed_data: bool,\n return_subj_names: bool = False, # For testing\n) -> Tuple[T...
import torch from com_hom_emg.data import get_datasets, get_per_subj_data
1,309
def test_get_datasets_disjoint_val_test(): # The subject used for val should be different each time # Likewise for test per_subj_data = get_per_subj_data() all_val_subj = [] all_test_subj = [] n_train = 8 n_val = 1 n_test = 1 expected_train_size = 8 * 1224 # 1224 gestures per subject expected_val_size = n_val * 1224 expected_test_size = n_test * 1224 def check_contents(dataset, N): ## Check shapes # data = 8-channel EMG, 962 timesteps (= 500ms at 1926 Hz) assert dataset.tensors[0].shape == torch.Size([N, 8, 962]) # labels = 2D labels assert dataset.tensors[1].shape == torch.Size([N, 2]) # is_single = bool labels assert dataset.tensors[2].shape == torch.Size([N]) # subj_ids = 1d labels assert dataset.tensors[3].shape == torch.Size([N]) ## Check dtypes assert dataset.tensors[0].dtype == torch.float32 assert dataset.tensors[1].dtype == torch.int64 assert dataset.tensors[2].dtype == torch.bool assert dataset.tensors[3].dtype == torch.int64 for i in range(10):
def test_get_datasets_disjoint_val_test(): # The subject used for val should be different each time # Likewise for test per_subj_data = get_per_subj_data() all_val_subj = [] all_test_subj = [] n_train = 8 n_val = 1 n_test = 1 expected_train_size = 8 * 1224 # 1224 gestures per subject expected_val_size = n_val * 1224 expected_test_size = n_test * 1224 def check_contents(dataset, N): ## Check shapes # data = 8-channel EMG, 962 timesteps (= 500ms at 1926 Hz) assert dataset.tensors[0].shape == torch.Size([N, 8, 962]) # labels = 2D labels assert dataset.tensors[1].shape == torch.Size([N, 2]) # is_single = bool labels assert dataset.tensors[2].shape == torch.Size([N]) # subj_ids = 1d labels assert dataset.tensors[3].shape == torch.Size([N]) ## Check dtypes assert dataset.tensors[0].dtype == torch.float32 assert dataset.tensors[1].dtype == torch.int64 assert dataset.tensors[2].dtype == torch.bool assert dataset.tensors[3].dtype == torch.int64 for i in range(10):
train_set, val_set, test_set, train_subj, val_subj, test_subj = get_datasets(
0
2023-11-01 21:12:05+00:00
2k
alengwenus/ha-sma-ev-charger
custom_components/smaev/select.py
[ { "identifier": "DOMAIN", "path": "custom_components/smaev/const.py", "snippet": "DOMAIN = \"smaev\"" }, { "identifier": "SMAEV_COORDINATOR", "path": "custom_components/smaev/const.py", "snippet": "SMAEV_COORDINATOR = \"coordinator\"" }, { "identifier": "SMAEV_DEVICE_INFO", "...
from dataclasses import dataclass, field from datetime import datetime from typing import TYPE_CHECKING from pysmaev.const import SmaEvChargerParameters from pysmaev.helpers import get_parameters_channel from homeassistant.components.select import SelectEntity, SelectEntityDescription from homeassistant.config_entries import ConfigEntry from homeassistant.const import EntityCategory from homeassistant.core import HomeAssistant, callback from homeassistant.helpers.device_registry import DeviceInfo from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.event import async_call_later from homeassistant.helpers.update_coordinator import ( CoordinatorEntity, DataUpdateCoordinator, ) from .const import ( DOMAIN, SMAEV_COORDINATOR, SMAEV_DEVICE_INFO, SMAEV_PARAMETER, SMAEV_POSSIBLE_VALUES, SMAEV_VALUE, ) import logging
1,047
"""Select platform for SMA EV Charger integration.""" from __future__ import annotations _LOGGER = logging.getLogger(__name__) @dataclass class SmaEvChargerSelectEntityDescription(SelectEntityDescription): """Describes SMA EV Charger select entities.""" type: str = "" channel: str = "" value_mapping: dict = field(default_factory=dict) SELECT_DESCRIPTIONS: tuple[SmaEvChargerSelectEntityDescription, ...] = ( SmaEvChargerSelectEntityDescription( key="operating_mode_of_charge_session", translation_key="operating_mode_of_charge_session", type=SMAEV_PARAMETER, channel="Parameter.Chrg.ActChaMod", value_mapping={ SmaEvChargerParameters.BOOST_CHARGING: "boost_charging", SmaEvChargerParameters.OPTIMIZED_CHARGING: "optimized_charging", SmaEvChargerParameters.SETPOINT_CHARGING: "setpoint_charging", SmaEvChargerParameters.CHARGE_STOP: "charge_stop", }, entity_registry_enabled_default=True, ), SmaEvChargerSelectEntityDescription( key="led_brightness", translation_key="led_brightness", type=SMAEV_PARAMETER, channel="Parameter.Sys.DevSigBri", value_mapping={ SmaEvChargerParameters.LED_LOW: "low", SmaEvChargerParameters.LED_AVERAGE: "average", SmaEvChargerParameters.LED_HIGH: "high", }, entity_registry_enabled_default=True, entity_category=EntityCategory.DIAGNOSTIC, ), ) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up SMA EV Charger select entities.""" data = hass.data[DOMAIN][config_entry.entry_id] coordinator = data[SMAEV_COORDINATOR] device_info = data[SMAEV_DEVICE_INFO] if TYPE_CHECKING: assert config_entry.unique_id entities = [] for entity_description in SELECT_DESCRIPTIONS: entities.append( SmaEvChargerSelect( coordinator, config_entry.unique_id, device_info, entity_description ) ) async_add_entities(entities) class SmaEvChargerSelect(CoordinatorEntity, SelectEntity): """Representation of a SMA EV Charger select entity.""" entity_description: SmaEvChargerSelectEntityDescription _attr_has_entity_name = True def __init__( self, coordinator: DataUpdateCoordinator, config_entry_unique_id: str, device_info: DeviceInfo, entity_description: SmaEvChargerSelectEntityDescription, ) -> None: """Initialize the sensor.""" super().__init__(coordinator) self.entity_description = entity_description self._attr_device_info = device_info self._attr_unique_id = f"{config_entry_unique_id}-{self.entity_description.key}" self._attr_options = [] self._attr_current_option = None self.inv_value_mapping = { value: key for key, value in self.entity_description.value_mapping.items() } @callback def _handle_coordinator_update(self) -> None: """Handle updated data from the coordinator.""" channel = get_parameters_channel( self.coordinator.data[SMAEV_PARAMETER], self.entity_description.channel, ) possible_values = channel[SMAEV_POSSIBLE_VALUES]
"""Select platform for SMA EV Charger integration.""" from __future__ import annotations _LOGGER = logging.getLogger(__name__) @dataclass class SmaEvChargerSelectEntityDescription(SelectEntityDescription): """Describes SMA EV Charger select entities.""" type: str = "" channel: str = "" value_mapping: dict = field(default_factory=dict) SELECT_DESCRIPTIONS: tuple[SmaEvChargerSelectEntityDescription, ...] = ( SmaEvChargerSelectEntityDescription( key="operating_mode_of_charge_session", translation_key="operating_mode_of_charge_session", type=SMAEV_PARAMETER, channel="Parameter.Chrg.ActChaMod", value_mapping={ SmaEvChargerParameters.BOOST_CHARGING: "boost_charging", SmaEvChargerParameters.OPTIMIZED_CHARGING: "optimized_charging", SmaEvChargerParameters.SETPOINT_CHARGING: "setpoint_charging", SmaEvChargerParameters.CHARGE_STOP: "charge_stop", }, entity_registry_enabled_default=True, ), SmaEvChargerSelectEntityDescription( key="led_brightness", translation_key="led_brightness", type=SMAEV_PARAMETER, channel="Parameter.Sys.DevSigBri", value_mapping={ SmaEvChargerParameters.LED_LOW: "low", SmaEvChargerParameters.LED_AVERAGE: "average", SmaEvChargerParameters.LED_HIGH: "high", }, entity_registry_enabled_default=True, entity_category=EntityCategory.DIAGNOSTIC, ), ) async def async_setup_entry( hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback, ) -> None: """Set up SMA EV Charger select entities.""" data = hass.data[DOMAIN][config_entry.entry_id] coordinator = data[SMAEV_COORDINATOR] device_info = data[SMAEV_DEVICE_INFO] if TYPE_CHECKING: assert config_entry.unique_id entities = [] for entity_description in SELECT_DESCRIPTIONS: entities.append( SmaEvChargerSelect( coordinator, config_entry.unique_id, device_info, entity_description ) ) async_add_entities(entities) class SmaEvChargerSelect(CoordinatorEntity, SelectEntity): """Representation of a SMA EV Charger select entity.""" entity_description: SmaEvChargerSelectEntityDescription _attr_has_entity_name = True def __init__( self, coordinator: DataUpdateCoordinator, config_entry_unique_id: str, device_info: DeviceInfo, entity_description: SmaEvChargerSelectEntityDescription, ) -> None: """Initialize the sensor.""" super().__init__(coordinator) self.entity_description = entity_description self._attr_device_info = device_info self._attr_unique_id = f"{config_entry_unique_id}-{self.entity_description.key}" self._attr_options = [] self._attr_current_option = None self.inv_value_mapping = { value: key for key, value in self.entity_description.value_mapping.items() } @callback def _handle_coordinator_update(self) -> None: """Handle updated data from the coordinator.""" channel = get_parameters_channel( self.coordinator.data[SMAEV_PARAMETER], self.entity_description.channel, ) possible_values = channel[SMAEV_POSSIBLE_VALUES]
value = channel[SMAEV_VALUE]
5
2023-11-04 07:08:41+00:00
2k
microsoft/promptbase
azureml/components/src/shared/jsonl_utils.py
[ { "identifier": "JSONLReader", "path": "azureml/components/src/shared/jsonl_file_utils.py", "snippet": "class JSONLReader:\n \"\"\"Line-by-line iteration over a JSONL file\n\n Can be used in a 'with' statement, and then iterated over.\n The returned value is a decoded JSON object, rather than\n...
import json import pathlib import tempfile import traceback from typing import Any, Callable, Tuple from .jsonl_file_utils import JSONLReader, JSONLWriter from .logging_utils import get_standard_logger_for_file
808
# Copied from Medprompt.... perhaps those utils should go to PyPi? _logger = get_standard_logger_for_file(__file__) def line_map( *, map_func: Callable[[dict[str, Any]], dict[str, Any] | None], source_file: pathlib.Path, dest_file: pathlib.Path, source_encoding: str, dest_encoding: str, error_file: pathlib.Path | None = None, error_encoding: str | None = None, max_errors: int = -1, ) -> Tuple[int, int]: """Iterate over a JSONL file, applying map_func to each line""" assert source_file.exists() successful_lines = 0 error_lines = 0
# Copied from Medprompt.... perhaps those utils should go to PyPi? _logger = get_standard_logger_for_file(__file__) def line_map( *, map_func: Callable[[dict[str, Any]], dict[str, Any] | None], source_file: pathlib.Path, dest_file: pathlib.Path, source_encoding: str, dest_encoding: str, error_file: pathlib.Path | None = None, error_encoding: str | None = None, max_errors: int = -1, ) -> Tuple[int, int]: """Iterate over a JSONL file, applying map_func to each line""" assert source_file.exists() successful_lines = 0 error_lines = 0
with JSONLReader(source_file, source_encoding) as in_file:
0
2023-12-12 08:00:11+00:00
2k
openai/weak-to-strong
weak_to_strong/train.py
[ { "identifier": "clear_mem", "path": "weak_to_strong/common.py", "snippet": "def clear_mem(verbose: bool = False):\n \"\"\"\n This function is used to clear the memory allocated by PyTorch.\n It does so by calling the garbage collector to release unused GPU memory.\n After clearing the memor...
import itertools import os import pickle import time import datasets import numpy as np import torch import torch_optimizer as toptim import weak_to_strong.logger as logger from dataclasses import dataclass from typing import Callable, Optional from transformers.modeling_utils import load_sharded_checkpoint from weak_to_strong.common import clear_mem from weak_to_strong.eval import eval_model_acc from weak_to_strong.loss import xent_loss from weak_to_strong.model import TransformerWithHead
1,558
@dataclass class ModelConfig: name: str default_lr: float eval_batch_size: int custom_kwargs: Optional[dict] = None gradient_checkpointing: bool = False model_parallel: bool = False default_optimizer: str = "adam" def train_model( model: torch.nn.Module, ds: datasets.Dataset, batch_size: int, lr: float = 1e-5,
@dataclass class ModelConfig: name: str default_lr: float eval_batch_size: int custom_kwargs: Optional[dict] = None gradient_checkpointing: bool = False model_parallel: bool = False default_optimizer: str = "adam" def train_model( model: torch.nn.Module, ds: datasets.Dataset, batch_size: int, lr: float = 1e-5,
loss_fn: Callable = xent_loss,
2
2023-12-13 23:53:13+00:00
2k
SqueezeAILab/LLMCompiler
configs/hotpotqa/configs.py
[ { "identifier": "OUTPUT_PROMPT", "path": "configs/hotpotqa/gpt_prompts.py", "snippet": "OUTPUT_PROMPT = (\n \"Solve a question answering task with interleaving Observation, Thought, and Action steps. Here are some guidelines:\\n\"\n \" - You will be given a Question and some Wikipedia passages, w...
from configs.hotpotqa.gpt_prompts import OUTPUT_PROMPT, PLANNER_PROMPT
945
CONFIGS = { "default_model": "gpt-3.5-turbo-1106", "planner_prompt": PLANNER_PROMPT,
CONFIGS = { "default_model": "gpt-3.5-turbo-1106", "planner_prompt": PLANNER_PROMPT,
"output_prompt": OUTPUT_PROMPT,
0
2023-12-06 21:12:54+00:00
2k
open-compass/MixtralKit
mixtralkit/layers/attention.py
[ { "identifier": "ModelArgs", "path": "mixtralkit/layers/utils.py", "snippet": "class ModelArgs:\n dim: int = 4096\n n_layers: int = 32\n n_heads: int = 32\n n_kv_heads: Optional[int] = None\n vocab_size: int = -1 # defined later by tokenizer\n multiple_of: int = 256 # make SwiGLU hid...
import math import torch import torch.nn.functional as F import fairscale.nn.model_parallel.initialize as fs_init from typing import Optional, Tuple from torch import nn from .utils import ModelArgs, repeat_kv from .position_embeding import apply_rotary_emb from fairscale.nn.model_parallel.layers import ( ColumnParallelLinear, RowParallelLinear, )
1,488
# Copyright (c) OpenMMLab. and affiliates. # Copyright (c) Meta Platforms, Inc. and affiliates. class TorchAttention(nn.Module): """Multi-head attention module.""" def __init__(self, args: ModelArgs): """ Initialize the Attention module. Args: args (ModelArgs): Model configuration parameters. Attributes: n_kv_heads (int): Number of key and value heads. n_local_heads (int): Number of local query heads. n_local_kv_heads (int): Number of local key and value heads. n_rep (int): Number of repetitions for local heads. head_dim (int): Dimension size of each attention head. wq (ColumnParallelLinear): Linear transformation for queries. wk (ColumnParallelLinear): Linear transformation for keys. wv (ColumnParallelLinear): Linear transformation for values. wo (RowParallelLinear): Linear transformation for output. cache_k (torch.Tensor): Cached keys for attention. cache_v (torch.Tensor): Cached values for attention. """ super().__init__() self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads model_parallel_size = 1 self.n_local_heads = args.n_heads // model_parallel_size self.n_local_kv_heads = self.n_kv_heads // model_parallel_size self.n_rep = self.n_local_heads // self.n_local_kv_heads self.head_dim = args.dim // args.n_heads self.wq = nn.Linear( args.dim, args.n_heads * self.head_dim, bias=False, ) self.wk = nn.Linear( args.dim, self.n_kv_heads * self.head_dim, bias=False, ) self.wv = nn.Linear( args.dim, self.n_kv_heads * self.head_dim, bias=False, ) self.wo = nn.Linear( args.n_heads * self.head_dim, args.dim, bias=False, ) self.cache_k = torch.zeros( ( args.max_batch_size, args.max_seq_len, self.n_local_kv_heads, self.head_dim, ) ).cuda() self.cache_v = torch.zeros( ( args.max_batch_size, args.max_seq_len, self.n_local_kv_heads, self.head_dim, ) ).cuda() def forward( self, x: torch.Tensor, start_pos: int, freqs_cis: torch.Tensor, mask: Optional[torch.Tensor], ): """ Forward pass of the attention module. Args: x (torch.Tensor): Input tensor. start_pos (int): Starting position for caching. freqs_cis (torch.Tensor): Precomputed frequency tensor. mask (torch.Tensor, optional): Attention mask tensor. Returns: torch.Tensor: Output tensor after attention. """ bsz, seqlen, _ = x.shape xq, xk, xv = self.wq(x), self.wk(x), self.wv(x) xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim) xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim) xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
# Copyright (c) OpenMMLab. and affiliates. # Copyright (c) Meta Platforms, Inc. and affiliates. class TorchAttention(nn.Module): """Multi-head attention module.""" def __init__(self, args: ModelArgs): """ Initialize the Attention module. Args: args (ModelArgs): Model configuration parameters. Attributes: n_kv_heads (int): Number of key and value heads. n_local_heads (int): Number of local query heads. n_local_kv_heads (int): Number of local key and value heads. n_rep (int): Number of repetitions for local heads. head_dim (int): Dimension size of each attention head. wq (ColumnParallelLinear): Linear transformation for queries. wk (ColumnParallelLinear): Linear transformation for keys. wv (ColumnParallelLinear): Linear transformation for values. wo (RowParallelLinear): Linear transformation for output. cache_k (torch.Tensor): Cached keys for attention. cache_v (torch.Tensor): Cached values for attention. """ super().__init__() self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads model_parallel_size = 1 self.n_local_heads = args.n_heads // model_parallel_size self.n_local_kv_heads = self.n_kv_heads // model_parallel_size self.n_rep = self.n_local_heads // self.n_local_kv_heads self.head_dim = args.dim // args.n_heads self.wq = nn.Linear( args.dim, args.n_heads * self.head_dim, bias=False, ) self.wk = nn.Linear( args.dim, self.n_kv_heads * self.head_dim, bias=False, ) self.wv = nn.Linear( args.dim, self.n_kv_heads * self.head_dim, bias=False, ) self.wo = nn.Linear( args.n_heads * self.head_dim, args.dim, bias=False, ) self.cache_k = torch.zeros( ( args.max_batch_size, args.max_seq_len, self.n_local_kv_heads, self.head_dim, ) ).cuda() self.cache_v = torch.zeros( ( args.max_batch_size, args.max_seq_len, self.n_local_kv_heads, self.head_dim, ) ).cuda() def forward( self, x: torch.Tensor, start_pos: int, freqs_cis: torch.Tensor, mask: Optional[torch.Tensor], ): """ Forward pass of the attention module. Args: x (torch.Tensor): Input tensor. start_pos (int): Starting position for caching. freqs_cis (torch.Tensor): Precomputed frequency tensor. mask (torch.Tensor, optional): Attention mask tensor. Returns: torch.Tensor: Output tensor after attention. """ bsz, seqlen, _ = x.shape xq, xk, xv = self.wq(x), self.wk(x), self.wv(x) xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim) xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim) xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
2
2023-12-09 15:05:26+00:00
2k
aymenfurter/microagents
gradio_ui/agent_manager.py
[ { "identifier": "MicroAgentManager", "path": "agents/microagent_manager.py", "snippet": "class MicroAgentManager:\n \"\"\"\n Manages the creation and retrieval of micro agents.\n \"\"\"\n\n def __init__(self, api_key: str, max_agents: int = 20, db_filename=\"agents.db\"):\n self.api_k...
import logging from typing import Any, List from agents.microagent_manager import MicroAgentManager from agents.microagent import MicroAgent
1,527
logger = logging.getLogger(__name__) class GradioAgentManager: """ A wrapper class for interacting with MicroAgentManager in a Gradio interface. """ def __init__(self, api_key: str): self.manager = MicroAgentManager(api_key) self.manager.create_agents() def get_agents_info(self) -> List[dict]: """ Retrieve information about all agents for display in Gradio. """ agents = self.manager.get_agents() return [self.format_agent_info(agent) for agent in agents]
logger = logging.getLogger(__name__) class GradioAgentManager: """ A wrapper class for interacting with MicroAgentManager in a Gradio interface. """ def __init__(self, api_key: str): self.manager = MicroAgentManager(api_key) self.manager.create_agents() def get_agents_info(self) -> List[dict]: """ Retrieve information about all agents for display in Gradio. """ agents = self.manager.get_agents() return [self.format_agent_info(agent) for agent in agents]
def format_agent_info(self, agent: MicroAgent) -> dict:
1
2023-12-11 08:17:09+00:00
2k
bytedance/ImageDream
extern/ldm_zero123/thirdp/psp/model_irse.py
[ { "identifier": "Flatten", "path": "extern/ldm_zero123/thirdp/psp/helpers.py", "snippet": "class Flatten(Module):\n def forward(self, input):\n return input.view(input.size(0), -1)" }, { "identifier": "bottleneck_IR", "path": "extern/ldm_zero123/thirdp/psp/helpers.py", "snippet...
from torch.nn import ( BatchNorm1d, BatchNorm2d, Conv2d, Dropout, Linear, Module, PReLU, Sequential, ) from extern.ldm_zero123.thirdp.psp.helpers import ( Flatten, bottleneck_IR, bottleneck_IR_SE, get_blocks, l2_norm, )
1,205
# https://github.com/eladrich/pixel2style2pixel """ Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) """ class Backbone(Module): def __init__(self, input_size, num_layers, mode="ir", drop_ratio=0.4, affine=True): super(Backbone, self).__init__() assert input_size in [112, 224], "input_size should be 112 or 224" assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152" assert mode in ["ir", "ir_se"], "mode should be ir or ir_se" blocks = get_blocks(num_layers) if mode == "ir":
# https://github.com/eladrich/pixel2style2pixel """ Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) """ class Backbone(Module): def __init__(self, input_size, num_layers, mode="ir", drop_ratio=0.4, affine=True): super(Backbone, self).__init__() assert input_size in [112, 224], "input_size should be 112 or 224" assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152" assert mode in ["ir", "ir_se"], "mode should be ir or ir_se" blocks = get_blocks(num_layers) if mode == "ir":
unit_module = bottleneck_IR
1
2023-12-13 21:09:37+00:00
2k
TencentARC/MotionCtrl
lvdm/modules/attention_temporal.py
[ { "identifier": "checkpoint", "path": "lvdm/common.py", "snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n :param func: the func...
import math import torch import torch as th import torch.nn.functional as F import xformers import xformers.ops from inspect import isfunction from torch import nn, einsum from einops import rearrange, repeat from lvdm.common import ( checkpoint, exists, uniq, default, max_neg_value, init_ ) from lvdm.basics import ( conv_nd, zero_module, normalization )
842
try: XFORMERS_IS_AVAILBLE = True except: XFORMERS_IS_AVAILBLE = False class GEGLU(nn.Module): def __init__(self, dim_in, dim_out): super().__init__() self.proj = nn.Linear(dim_in, dim_out * 2) def forward(self, x): x, gate = self.proj(x).chunk(2, dim=-1) return x * F.gelu(gate) class FeedForward(nn.Module): def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): super().__init__() inner_dim = int(dim * mult)
try: XFORMERS_IS_AVAILBLE = True except: XFORMERS_IS_AVAILBLE = False class GEGLU(nn.Module): def __init__(self, dim_in, dim_out): super().__init__() self.proj = nn.Linear(dim_in, dim_out * 2) def forward(self, x): x, gate = self.proj(x).chunk(2, dim=-1) return x * F.gelu(gate) class FeedForward(nn.Module): def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): super().__init__() inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
3
2023-12-06 07:27:45+00:00
2k
s-casci/tinyzero
tictactoe/one_dim/eval.py
[ { "identifier": "LinearNetwork", "path": "models.py", "snippet": "class LinearNetwork(nn.Module):\n def __init__(self, input_shape, action_space, first_layer_size=512, second_layer_size=256):\n super().__init__()\n self.first_layer = nn.Linear(input_shape[0], first_layer_size)\n self.second_la...
from game import TicTacToe from train import OUT_DIR, SEARCH_ITERATIONS from tqdm import tqdm from models import LinearNetwork # noqa: E402 from agents import AlphaZeroAgent, ClassicMCTSAgent # noqa: E402 from mcts import pit # noqa: E402 import torch import os import sys
948
sys.path.append(os.getcwd()) EVAL_GAMES = 100 if __name__ == "__main__": game = TicTacToe() model = LinearNetwork(game.observation_shape, game.action_space) model.load_state_dict(torch.load(f"{OUT_DIR}/model.pth")) agent = AlphaZeroAgent(model) agent_play_kwargs = {"search_iterations": SEARCH_ITERATIONS * 2, "c_puct": 1.0, "dirichlet_alpha": None} print(f"Playing {EVAL_GAMES} games against itself") results = {0: 0, 1: 0, -1: 0} for _ in tqdm(range(EVAL_GAMES)): game.reset()
sys.path.append(os.getcwd()) EVAL_GAMES = 100 if __name__ == "__main__": game = TicTacToe() model = LinearNetwork(game.observation_shape, game.action_space) model.load_state_dict(torch.load(f"{OUT_DIR}/model.pth")) agent = AlphaZeroAgent(model) agent_play_kwargs = {"search_iterations": SEARCH_ITERATIONS * 2, "c_puct": 1.0, "dirichlet_alpha": None} print(f"Playing {EVAL_GAMES} games against itself") results = {0: 0, 1: 0, -1: 0} for _ in tqdm(range(EVAL_GAMES)): game.reset()
result = pit(
3
2023-12-14 11:36:50+00:00
2k
facebookresearch/PurpleLlama
CybersecurityBenchmarks/insecure_code_detector/tests/test_python_insecure_code_detector.py
[ { "identifier": "Language", "path": "CybersecurityBenchmarks/insecure_code_detector/languages.py", "snippet": "class Language(str, enum.Enum):\n C = \"c\"\n CPP = \"cpp\"\n CSHARP = \"csharp\"\n HACK = \"hack\"\n JAVA = \"java\"\n JAVASCRIPT = \"javascript\"\n KOTLIN = \"kotlin\"\n ...
from ..languages import Language from .insecure_code_detector_test import InsecureCodeDetectorTest
716
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # the following test cases contain an input string, and the corresponding number of expected insecure pattern matches PYTHON_TEST_CASES = [ ("import os", 0), ("import hashlib", 0), ("from hashlib import sha256", 0), ("hashlib.new('sha1')", 1), ("hashlib.md5()", 1), ]
# Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # the following test cases contain an input string, and the corresponding number of expected insecure pattern matches PYTHON_TEST_CASES = [ ("import os", 0), ("import hashlib", 0), ("from hashlib import sha256", 0), ("hashlib.new('sha1')", 1), ("hashlib.md5()", 1), ]
class TestPythonInsecureCodeDetector(InsecureCodeDetectorTest):
1
2023-12-06 21:29:41+00:00
2k
allenai/unified-io-2
t5x/examples/unified_io/modality_processing.py
[ { "identifier": "AudioEncoder", "path": "t5x/examples/unified_io/audio_encoder.py", "snippet": "class AudioEncoder(nn.Module):\n \"\"\"Encodes raw audio spectrograms as features\"\"\"\n config: Union[ImageVitFeatureConfig, AudioVitFeatureConfig]\n\n def setup(self):\n cfg = self.config\n # `vis...
from collections import OrderedDict from typing import Mapping from flax import traverse_util from seqio import TaskRegistry, FeatureConverter from t5x.examples.unified_io.audio_encoder import AudioEncoder from t5x.examples.unified_io.image_encoder import ImageEncoder from t5x.examples.unified_io.input_modalities import * from t5x.examples.unified_io.target_modalities import *
890
"""Code for handling modalities""" @gin.configurable def get_target_modalities( target_modality=['text', 'image', 'audio'], image_vae_config: ImageViTVQGANConfig=VAEConfig(), audio_vae_config: AudioViTVQGANConfig=AudioViTVQGANConfig(), ) -> Dict[str, ModalityEncoder]: """Return the encoders to use for target modalities""" out = {} if 'text' in target_modality: out['text'] = TargetTextEncoder() if 'image' in target_modality: out['image'] = TargetImageDVAEEmbedder(image_vae_config) if 'audio' in target_modality: out['audio'] = TargetAudioDVAEEmbedder(audio_vae_config) return out @gin.configurable def get_input_modalities( input_modality=('text', 'image', 'image_history', 'audio', 'audio_history'), image_vit_cfg: ImageVitFeatureConfig=ImageVitFeatureConfig(), audio_vit_cfg: AudioVitFeatureConfig=AudioVitFeatureConfig(), image_history_cfg: ImageResamplerConfig=ImageResamplerConfig(), audio_history_cfg: AudioResamplerConfig=AudioResamplerConfig(), max_img_history=None, max_audio_history=None, use_image_vit = False, use_audio_vit = False, freeze_vit=False, use_image_history_vit = False, use_audio_history_vit = False, ) -> Dict[str, ModalityEncoder]: """Returns the ModalityEncoder for the input modalities""" out = dict() if 'text' in input_modality: out["text"] = InputTextEncoder() image_encoder = None if use_image_vit or use_image_history_vit: image_encoder = ImageEncoder(image_vit_cfg) audio_encoder = None if use_audio_vit or use_audio_history_vit:
"""Code for handling modalities""" @gin.configurable def get_target_modalities( target_modality=['text', 'image', 'audio'], image_vae_config: ImageViTVQGANConfig=VAEConfig(), audio_vae_config: AudioViTVQGANConfig=AudioViTVQGANConfig(), ) -> Dict[str, ModalityEncoder]: """Return the encoders to use for target modalities""" out = {} if 'text' in target_modality: out['text'] = TargetTextEncoder() if 'image' in target_modality: out['image'] = TargetImageDVAEEmbedder(image_vae_config) if 'audio' in target_modality: out['audio'] = TargetAudioDVAEEmbedder(audio_vae_config) return out @gin.configurable def get_input_modalities( input_modality=('text', 'image', 'image_history', 'audio', 'audio_history'), image_vit_cfg: ImageVitFeatureConfig=ImageVitFeatureConfig(), audio_vit_cfg: AudioVitFeatureConfig=AudioVitFeatureConfig(), image_history_cfg: ImageResamplerConfig=ImageResamplerConfig(), audio_history_cfg: AudioResamplerConfig=AudioResamplerConfig(), max_img_history=None, max_audio_history=None, use_image_vit = False, use_audio_vit = False, freeze_vit=False, use_image_history_vit = False, use_audio_history_vit = False, ) -> Dict[str, ModalityEncoder]: """Returns the ModalityEncoder for the input modalities""" out = dict() if 'text' in input_modality: out["text"] = InputTextEncoder() image_encoder = None if use_image_vit or use_image_history_vit: image_encoder = ImageEncoder(image_vit_cfg) audio_encoder = None if use_audio_vit or use_audio_history_vit:
audio_encoder = AudioEncoder(audio_vit_cfg)
0
2023-12-12 20:23:33+00:00
2k
zju3dv/EasyVolcap
scripts/gaussian/merge_pcd.py
[ { "identifier": "load_pts", "path": "easyvolcap/utils/data_utils.py", "snippet": "def load_pts(filename: str):\n from pyntcloud import PyntCloud\n cloud = PyntCloud.from_file(filename)\n verts = cloud.xyz\n if 'red' in cloud.points and 'green' in cloud.points and 'blue' in cloud.points:\n ...
from easyvolcap.utils.console_utils import * from easyvolcap.utils.data_utils import load_pts, export_pts from os.path import join import argparse import numpy as np
1,196
""" This script will load and convert a .ply visual hull to a points3D file """ @catch_throw def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--data_root', default='data/enerf_outdoor/actor2_3') parser.add_argument('--vhulls_dir', default='merged') parser.add_argument('--vhulls_dirs', default=['vhulls', 'bkgd/boost']) parser.add_argument('--pcd_file', default='000000.ply') args = parser.parse_args() vs = [] out = join(args.data_root, args.vhulls_dir, args.pcd_file) for vhull_dir in args.vhulls_dirs: vhull = join(args.data_root, vhull_dir, args.pcd_file)
""" This script will load and convert a .ply visual hull to a points3D file """ @catch_throw def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--data_root', default='data/enerf_outdoor/actor2_3') parser.add_argument('--vhulls_dir', default='merged') parser.add_argument('--vhulls_dirs', default=['vhulls', 'bkgd/boost']) parser.add_argument('--pcd_file', default='000000.ply') args = parser.parse_args() vs = [] out = join(args.data_root, args.vhulls_dir, args.pcd_file) for vhull_dir in args.vhulls_dirs: vhull = join(args.data_root, vhull_dir, args.pcd_file)
v, c, n, s = load_pts(vhull)
0
2023-12-07 08:53:42+00:00
2k
minghanqin/LangSplat
scene/cameras.py
[ { "identifier": "getWorld2View2", "path": "utils/graphics_utils.py", "snippet": "def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):\n Rt = np.zeros((4, 4))\n Rt[:3, :3] = R.transpose()\n Rt[:3, 3] = t\n Rt[3, 3] = 1.0\n\n C2W = np.linalg.inv(Rt)\n cam_center = C2W[:...
import os import pickle import torch import numpy as np from torch import nn from utils.graphics_utils import getWorld2View2, getProjectionMatrix
922
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact george.drettakis@inria.fr # class Camera(nn.Module): def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask, image_name, uid, trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda" ): super(Camera, self).__init__() self.uid = uid self.colmap_id = colmap_id self.R = R self.T = T self.FoVx = FoVx self.FoVy = FoVy self.image_name = image_name try: self.data_device = torch.device(data_device) except Exception as e: print(e) print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" ) self.data_device = torch.device("cuda") self.original_image = image.clamp(0.0, 1.0).to(self.data_device) self.image_width = self.original_image.shape[2] self.image_height = self.original_image.shape[1] if gt_alpha_mask is not None: self.original_image *= gt_alpha_mask.to(self.data_device) else: self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device) self.zfar = 100.0 self.znear = 0.01 self.trans = trans self.scale = scale self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda()
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact george.drettakis@inria.fr # class Camera(nn.Module): def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask, image_name, uid, trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda" ): super(Camera, self).__init__() self.uid = uid self.colmap_id = colmap_id self.R = R self.T = T self.FoVx = FoVx self.FoVy = FoVy self.image_name = image_name try: self.data_device = torch.device(data_device) except Exception as e: print(e) print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" ) self.data_device = torch.device("cuda") self.original_image = image.clamp(0.0, 1.0).to(self.data_device) self.image_width = self.original_image.shape[2] self.image_height = self.original_image.shape[1] if gt_alpha_mask is not None: self.original_image *= gt_alpha_mask.to(self.data_device) else: self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device) self.zfar = 100.0 self.znear = 0.01 self.trans = trans self.scale = scale self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda()
self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy).transpose(0,1).cuda()
1
2023-12-11 06:33:35+00:00
2k
SciPhi-AI/agent-search
agent_search/search/base.py
[ { "identifier": "AgentSearchResult", "path": "agent_search/core/search_types.py", "snippet": "class AgentSearchResult(BaseModel):\n \"\"\"A dataclass to store the search result\"\"\"\n\n score: float\n url: str\n title: Optional[str]\n dataset: Optional[str]\n # TODO - Add dict(str, [s...
import csv import json import logging import os import numpy as np import psycopg2 import psycopg2 from typing import List from qdrant_client import QdrantClient from transformers import AutoModel from agent_search.core import AgentSearchResult from agent_search.core.utils import ( cosine_similarity, get_data_path, load_config, )
650
logger = logging.getLogger(__name__) class WebSearchEngine: """A simple search client for the OpenSearch collection""" def __init__( self, ): try: except ImportError as e: raise ImportError( f"Error {e} while imoprting psycopg2. Please install it with `pip install psycopg2` to run an WebSearchEngine instance." ) # Load config
logger = logging.getLogger(__name__) class WebSearchEngine: """A simple search client for the OpenSearch collection""" def __init__( self, ): try: except ImportError as e: raise ImportError( f"Error {e} while imoprting psycopg2. Please install it with `pip install psycopg2` to run an WebSearchEngine instance." ) # Load config
self.config = load_config()["agent_search"]
3
2023-12-11 17:41:03+00:00
2k
yohanshin/WHAM
lib/data/_dataset.py
[ { "identifier": "constants", "path": "configs/constants.py", "snippet": "IMG_FEAT_DIM = {\n 'resnet': 2048,\n 'vit': 1024\n}\nN_JOINTS = 17\n PARSED_DATA = f'{root}/parsed_data'\n THREEDPW_PTH = f'{root}/3DPW'\n RICH_PTH = f'{root}/RICH'\n EMDB_PTH = f'{root}/EMDB'\n NUM_JOINTS = N_...
import torch import numpy as np from skimage.util.shape import view_as_windows from configs import constants as _C from .normalizer import Normalizer from lib.utils.imutils import transform
1,499
from __future__ import absolute_import from __future__ import print_function from __future__ import division class BaseDataset(torch.utils.data.Dataset): def __init__(self, cfg, training=True): super(BaseDataset, self).__init__() self.n_joints = _C.KEYPOINTS.NUM_JOINTS self.epoch = 0 self.n_frames = cfg.DATASET.SEQLEN + 1 self.training = training
from __future__ import absolute_import from __future__ import print_function from __future__ import division class BaseDataset(torch.utils.data.Dataset): def __init__(self, cfg, training=True): super(BaseDataset, self).__init__() self.n_joints = _C.KEYPOINTS.NUM_JOINTS self.epoch = 0 self.n_frames = cfg.DATASET.SEQLEN + 1 self.training = training
self.keypoints_normalizer = Normalizer(cfg)
1
2023-12-08 09:17:54+00:00
2k
octo-models/octo
octo/data/oxe/oxe_standardization_transforms.py
[ { "identifier": "binarize_gripper_actions", "path": "octo/data/utils/data_utils.py", "snippet": "def binarize_gripper_actions(actions: tf.Tensor) -> tf.Tensor:\n \"\"\"Converts gripper actions from continous to binary values (0 and 1).\n\n We exploit that fact that most of the time, the gripper is...
from typing import Any, Dict from octo.data.utils.data_utils import ( binarize_gripper_actions, invert_gripper_actions, rel2abs_gripper_actions, relabel_actions, ) import tensorflow as tf import tensorflow_graphics.geometry.transformation as tft import tensorflow_graphics.geometry.transformation as tft import tensorflow_graphics.geometry.transformation as tft
1,251
"""Open X-Embodiment Dataset Transforms input: dict of features, each is batched, i.e. has leading time dimension expected output: step = { 'observation': { <image_keys, depth_image_keys> state in chosen state representation }, 'action': action in chosen action representation, 'language_instruction': str, } """ def bridge_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: # NOTE: this is not actually the official OXE copy of bridge, it is our own more up-to-date copy that you # can find at https://rail.eecs.berkeley.edu/datasets/bridge_release/data/tfds/ trajectory["action"] = tf.concat( [ trajectory["action"][:, :6],
"""Open X-Embodiment Dataset Transforms input: dict of features, each is batched, i.e. has leading time dimension expected output: step = { 'observation': { <image_keys, depth_image_keys> state in chosen state representation }, 'action': action in chosen action representation, 'language_instruction': str, } """ def bridge_dataset_transform(trajectory: Dict[str, Any]) -> Dict[str, Any]: # NOTE: this is not actually the official OXE copy of bridge, it is our own more up-to-date copy that you # can find at https://rail.eecs.berkeley.edu/datasets/bridge_release/data/tfds/ trajectory["action"] = tf.concat( [ trajectory["action"][:, :6],
binarize_gripper_actions(trajectory["action"][:, -1])[:, None],
0
2023-12-13 09:58:56+00:00
2k
mistralai/client-python
tests/test_chat.py
[ { "identifier": "mock_chat_response_payload", "path": "tests/utils.py", "snippet": "def mock_chat_response_payload():\n return orjson.dumps(\n {\n \"id\": \"chat-98c8c60e3fbf4fc49658eddaf447357c\",\n \"object\": \"chat.completion\",\n \"created\": 1703165682,\n...
import unittest.mock as mock import pytest from mistralai.client import MistralClient from mistralai.models.chat_completion import ( ChatCompletionResponse, ChatCompletionStreamResponse, ChatMessage, ) from .utils import ( mock_chat_response_payload, mock_chat_response_streaming_payload, mock_response, mock_stream_response, )
1,007
@pytest.fixture() def client(): client = MistralClient() client._client = mock.MagicMock() return client class TestChat: def test_chat(self, client): client._client.request.return_value = mock_response( 200, mock_chat_response_payload(), ) result = client.chat( model="mistral-small", messages=[ ChatMessage(role="user", content="What is the best French cheese?") ], ) client._client.request.assert_called_once_with( "post", "https://api.mistral.ai/v1/chat/completions", headers={ "User-Agent": f"mistral-client-python/{client._version}", "Accept": "application/json", "Authorization": "Bearer None", "Content-Type": "application/json", }, json={ "model": "mistral-small", "messages": [ {"role": "user", "content": "What is the best French cheese?"} ], "safe_prompt": False, "stream": False, }, ) assert isinstance( result, ChatCompletionResponse ), "Should return an ChatCompletionResponse" assert len(result.choices) == 1 assert result.choices[0].index == 0 assert result.object == "chat.completion" def test_chat_streaming(self, client):
@pytest.fixture() def client(): client = MistralClient() client._client = mock.MagicMock() return client class TestChat: def test_chat(self, client): client._client.request.return_value = mock_response( 200, mock_chat_response_payload(), ) result = client.chat( model="mistral-small", messages=[ ChatMessage(role="user", content="What is the best French cheese?") ], ) client._client.request.assert_called_once_with( "post", "https://api.mistral.ai/v1/chat/completions", headers={ "User-Agent": f"mistral-client-python/{client._version}", "Accept": "application/json", "Authorization": "Bearer None", "Content-Type": "application/json", }, json={ "model": "mistral-small", "messages": [ {"role": "user", "content": "What is the best French cheese?"} ], "safe_prompt": False, "stream": False, }, ) assert isinstance( result, ChatCompletionResponse ), "Should return an ChatCompletionResponse" assert len(result.choices) == 1 assert result.choices[0].index == 0 assert result.object == "chat.completion" def test_chat_streaming(self, client):
client._client.stream.return_value = mock_stream_response(
3
2023-12-07 10:09:51+00:00
2k
kijai/ComfyUI-Marigold
marigold/model/marigold_pipeline.py
[ { "identifier": "RGBEncoder", "path": "marigold/model/rgb_encoder.py", "snippet": "class RGBEncoder(nn.Module):\n \"\"\"\n The encoder of pretrained Stable Diffusion VAE\n \"\"\"\n \n def __init__(self, pretrained_path, subfolder=None) -> None:\n super().__init__()\n \n ...
import logging import numpy as np import torch from typing import Dict from diffusers import ( DDIMScheduler, DDPMScheduler, PNDMScheduler, DEISMultistepScheduler, SchedulerMixin, UNet2DConditionModel, ) from torch import nn from torch.nn import Conv2d from torch.nn.parameter import Parameter from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from .rgb_encoder import RGBEncoder from .stacked_depth_AE import StackedDepthAE
1,225
# Author: Bingxin Ke # Last modified: 2023-12-11 class MarigoldPipeline(nn.Module): """ Marigold monocular depth estimator. """ def __init__( self, unet_pretrained_path: Dict, # {path: xxx, subfolder: xxx} rgb_encoder_pretrained_path: Dict, depht_ae_pretrained_path: Dict, noise_scheduler_pretrained_path: Dict, tokenizer_pretrained_path: Dict, text_encoder_pretrained_path: Dict, empty_text_embed=None, trainable_unet=False, rgb_latent_scale_factor=0.18215, depth_latent_scale_factor=0.18215, noise_scheduler_type=None, enable_gradient_checkpointing=False, enable_xformers=True, ) -> None: super().__init__() self.rgb_latent_scale_factor = rgb_latent_scale_factor self.depth_latent_scale_factor = depth_latent_scale_factor self.device = "cpu" # ******* Initialize modules ******* # Trainable modules self.trainable_module_dic: Dict[str, nn.Module] = {} self.trainable_unet = trainable_unet # Denoising UNet self.unet: UNet2DConditionModel = UNet2DConditionModel.from_pretrained( unet_pretrained_path["path"], subfolder=unet_pretrained_path["subfolder"] ) logging.info(f"pretrained UNet loaded from: {unet_pretrained_path}") if 8 != self.unet.config["in_channels"]: self._replace_unet_conv_in() logging.warning("Unet conv_in layer is replaced") if enable_xformers: self.unet.enable_xformers_memory_efficient_attention() else: self.unet.disable_xformers_memory_efficient_attention() # Image encoder
# Author: Bingxin Ke # Last modified: 2023-12-11 class MarigoldPipeline(nn.Module): """ Marigold monocular depth estimator. """ def __init__( self, unet_pretrained_path: Dict, # {path: xxx, subfolder: xxx} rgb_encoder_pretrained_path: Dict, depht_ae_pretrained_path: Dict, noise_scheduler_pretrained_path: Dict, tokenizer_pretrained_path: Dict, text_encoder_pretrained_path: Dict, empty_text_embed=None, trainable_unet=False, rgb_latent_scale_factor=0.18215, depth_latent_scale_factor=0.18215, noise_scheduler_type=None, enable_gradient_checkpointing=False, enable_xformers=True, ) -> None: super().__init__() self.rgb_latent_scale_factor = rgb_latent_scale_factor self.depth_latent_scale_factor = depth_latent_scale_factor self.device = "cpu" # ******* Initialize modules ******* # Trainable modules self.trainable_module_dic: Dict[str, nn.Module] = {} self.trainable_unet = trainable_unet # Denoising UNet self.unet: UNet2DConditionModel = UNet2DConditionModel.from_pretrained( unet_pretrained_path["path"], subfolder=unet_pretrained_path["subfolder"] ) logging.info(f"pretrained UNet loaded from: {unet_pretrained_path}") if 8 != self.unet.config["in_channels"]: self._replace_unet_conv_in() logging.warning("Unet conv_in layer is replaced") if enable_xformers: self.unet.enable_xformers_memory_efficient_attention() else: self.unet.disable_xformers_memory_efficient_attention() # Image encoder
self.rgb_encoder = RGBEncoder(
0
2023-12-12 12:25:52+00:00
2k
modelscope/richdreamer
extern/ldm_zero123/thirdp/psp/model_irse.py
[ { "identifier": "Flatten", "path": "extern/ldm_zero123/thirdp/psp/helpers.py", "snippet": "class Flatten(Module):\n def forward(self, input):\n return input.view(input.size(0), -1)" }, { "identifier": "bottleneck_IR", "path": "extern/ldm_zero123/thirdp/psp/helpers.py", "snippet...
from torch.nn import (BatchNorm1d, BatchNorm2d, Conv2d, Dropout, Linear, Module, PReLU, Sequential,) from extern.ldm_zero123.thirdp.psp.helpers import (Flatten, bottleneck_IR, bottleneck_IR_SE, get_blocks, l2_norm,)
1,210
# https://github.com/eladrich/pixel2style2pixel """ Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) """ class Backbone(Module): def __init__(self, input_size, num_layers, mode="ir", drop_ratio=0.4, affine=True): super(Backbone, self).__init__() assert input_size in [112, 224], "input_size should be 112 or 224" assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152" assert mode in ["ir", "ir_se"], "mode should be ir or ir_se" blocks = get_blocks(num_layers) if mode == "ir": unit_module = bottleneck_IR elif mode == "ir_se":
# https://github.com/eladrich/pixel2style2pixel """ Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) """ class Backbone(Module): def __init__(self, input_size, num_layers, mode="ir", drop_ratio=0.4, affine=True): super(Backbone, self).__init__() assert input_size in [112, 224], "input_size should be 112 or 224" assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152" assert mode in ["ir", "ir_se"], "mode should be ir or ir_se" blocks = get_blocks(num_layers) if mode == "ir": unit_module = bottleneck_IR elif mode == "ir_se":
unit_module = bottleneck_IR_SE
2
2023-12-06 07:53:11+00:00
2k
rehg-lab/RAVE
annotator/mmpkg/mmcv/runner/base_module.py
[ { "identifier": "master_only", "path": "annotator/mmpkg/mmcv/runner/dist_utils.py", "snippet": "def master_only(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n rank, _ = get_dist_info()\n if rank == 0:\n return func(*args, **kwargs)\n\n return wrappe...
import copy import warnings import torch.nn as nn from abc import ABCMeta from collections import defaultdict from logging import FileHandler from annotator.mmpkg.mmcv.runner.dist_utils import master_only from annotator.mmpkg.mmcv.utils.logging import get_logger, logger_initialized, print_log from ..cnn import initialize from ..cnn.utils.weight_init import update_init_info
999
# Copyright (c) OpenMMLab. All rights reserved. class BaseModule(nn.Module, metaclass=ABCMeta): """Base module for all modules in openmmlab. ``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional functionality of parameter initialization. Compared with ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes. - ``init_cfg``: the config to control the initialization. - ``init_weights``: The function of parameter initialization and recording initialization information. - ``_params_init_info``: Used to track the parameter initialization information. This attribute only exists during executing the ``init_weights``. Args: init_cfg (dict, optional): Initialization config dict. """ def __init__(self, init_cfg=None): """Initialize BaseModule, inherited from `torch.nn.Module`""" # NOTE init_cfg can be defined in different levels, but init_cfg # in low levels has a higher priority. super(BaseModule, self).__init__() # define default value of init_cfg instead of hard code # in init_weights() function self._is_init = False self.init_cfg = copy.deepcopy(init_cfg) # Backward compatibility in derived classes # if pretrained is not None: # warnings.warn('DeprecationWarning: pretrained is a deprecated \ # key, please consider using init_cfg') # self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) @property def is_init(self): return self._is_init def init_weights(self): """Initialize the weights.""" is_top_level_module = False # check if it is top-level module if not hasattr(self, '_params_init_info'): # The `_params_init_info` is used to record the initialization # information of the parameters # the key should be the obj:`nn.Parameter` of model and the value # should be a dict containing # - init_info (str): The string that describes the initialization. # - tmp_mean_value (FloatTensor): The mean of the parameter, # which indicates whether the parameter has been modified. # this attribute would be deleted after all parameters # is initialized. self._params_init_info = defaultdict(dict) is_top_level_module = True # Initialize the `_params_init_info`, # When detecting the `tmp_mean_value` of # the corresponding parameter is changed, update related # initialization information for name, param in self.named_parameters(): self._params_init_info[param][ 'init_info'] = f'The value is the same before and ' \ f'after calling `init_weights` ' \ f'of {self.__class__.__name__} ' self._params_init_info[param][ 'tmp_mean_value'] = param.data.mean() # pass `params_init_info` to all submodules # All submodules share the same `params_init_info`, # so it will be updated when parameters are # modified at any level of the model. for sub_module in self.modules(): sub_module._params_init_info = self._params_init_info # Get the initialized logger, if not exist, # create a logger named `mmcv` logger_names = list(logger_initialized.keys()) logger_name = logger_names[0] if logger_names else 'mmcv' module_name = self.__class__.__name__ if not self._is_init: if self.init_cfg:
# Copyright (c) OpenMMLab. All rights reserved. class BaseModule(nn.Module, metaclass=ABCMeta): """Base module for all modules in openmmlab. ``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional functionality of parameter initialization. Compared with ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes. - ``init_cfg``: the config to control the initialization. - ``init_weights``: The function of parameter initialization and recording initialization information. - ``_params_init_info``: Used to track the parameter initialization information. This attribute only exists during executing the ``init_weights``. Args: init_cfg (dict, optional): Initialization config dict. """ def __init__(self, init_cfg=None): """Initialize BaseModule, inherited from `torch.nn.Module`""" # NOTE init_cfg can be defined in different levels, but init_cfg # in low levels has a higher priority. super(BaseModule, self).__init__() # define default value of init_cfg instead of hard code # in init_weights() function self._is_init = False self.init_cfg = copy.deepcopy(init_cfg) # Backward compatibility in derived classes # if pretrained is not None: # warnings.warn('DeprecationWarning: pretrained is a deprecated \ # key, please consider using init_cfg') # self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) @property def is_init(self): return self._is_init def init_weights(self): """Initialize the weights.""" is_top_level_module = False # check if it is top-level module if not hasattr(self, '_params_init_info'): # The `_params_init_info` is used to record the initialization # information of the parameters # the key should be the obj:`nn.Parameter` of model and the value # should be a dict containing # - init_info (str): The string that describes the initialization. # - tmp_mean_value (FloatTensor): The mean of the parameter, # which indicates whether the parameter has been modified. # this attribute would be deleted after all parameters # is initialized. self._params_init_info = defaultdict(dict) is_top_level_module = True # Initialize the `_params_init_info`, # When detecting the `tmp_mean_value` of # the corresponding parameter is changed, update related # initialization information for name, param in self.named_parameters(): self._params_init_info[param][ 'init_info'] = f'The value is the same before and ' \ f'after calling `init_weights` ' \ f'of {self.__class__.__name__} ' self._params_init_info[param][ 'tmp_mean_value'] = param.data.mean() # pass `params_init_info` to all submodules # All submodules share the same `params_init_info`, # so it will be updated when parameters are # modified at any level of the model. for sub_module in self.modules(): sub_module._params_init_info = self._params_init_info # Get the initialized logger, if not exist, # create a logger named `mmcv` logger_names = list(logger_initialized.keys()) logger_name = logger_names[0] if logger_names else 'mmcv' module_name = self.__class__.__name__ if not self._is_init: if self.init_cfg:
print_log(
1
2023-12-05 02:51:53+00:00
2k
worldcoin/open-iris
tests/e2e_tests/pipelines/test_e2e_iris_pipeline.py
[ { "identifier": "compare_debug_pipeline_outputs", "path": "tests/e2e_tests/utils.py", "snippet": "def compare_debug_pipeline_outputs(pipeline_output_1: Dict[str, Any], pipeline_output_2: Dict[str, Any]):\n \"\"\"Compare two IRISPipeline outputs for debugging.\n\n Args:\n pipeline_output_1 (...
import os import pickle import cv2 import numpy as np import pytest from typing import Any, Dict from iris.pipelines.iris_pipeline import IRISPipeline from tests.e2e_tests.utils import compare_debug_pipeline_outputs, compare_iris_pipeline_outputs
906
@pytest.fixture def ir_image() -> np.ndarray: ir_image_path = os.path.join(os.path.dirname(__file__), "mocks", "inputs", "anonymized.png") img_data = cv2.imread(ir_image_path, cv2.IMREAD_GRAYSCALE) return img_data @pytest.fixture def expected_iris_pipeline_output() -> Dict[str, Any]: expected_iris_code_path = os.path.join( os.path.dirname(__file__), "mocks", "outputs", "expected_iris_orb_pipeline_output.pickle" ) return pickle.load(open(expected_iris_code_path, "rb")) @pytest.fixture def expected_debug_pipeline_output() -> Dict[str, Any]: expected_iris_code_path = os.path.join( os.path.dirname(__file__), "mocks", "outputs", "expected_iris_debug_pipeline_output.pickle" ) return pickle.load(open(expected_iris_code_path, "rb")) def test_e2e_iris_pipeline(ir_image: np.ndarray, expected_iris_pipeline_output: Dict[str, Any]) -> None: """End-to-end test of the IRISPipeline in the Orb setup""" iris_pipeline = IRISPipeline() computed_pipeline_output = iris_pipeline(img_data=ir_image, eye_side="right") compare_iris_pipeline_outputs(computed_pipeline_output, expected_iris_pipeline_output) def test_e2e_debug_pipeline(ir_image: np.ndarray, expected_debug_pipeline_output: Dict[str, Any]) -> None: """End-to-end test of the IRISPipeline in the debug setup""" iris_pipeline = IRISPipeline(env=IRISPipeline.DEBUGGING_ENVIRONMENT) computed_pipeline_output = iris_pipeline(img_data=ir_image, eye_side="right")
@pytest.fixture def ir_image() -> np.ndarray: ir_image_path = os.path.join(os.path.dirname(__file__), "mocks", "inputs", "anonymized.png") img_data = cv2.imread(ir_image_path, cv2.IMREAD_GRAYSCALE) return img_data @pytest.fixture def expected_iris_pipeline_output() -> Dict[str, Any]: expected_iris_code_path = os.path.join( os.path.dirname(__file__), "mocks", "outputs", "expected_iris_orb_pipeline_output.pickle" ) return pickle.load(open(expected_iris_code_path, "rb")) @pytest.fixture def expected_debug_pipeline_output() -> Dict[str, Any]: expected_iris_code_path = os.path.join( os.path.dirname(__file__), "mocks", "outputs", "expected_iris_debug_pipeline_output.pickle" ) return pickle.load(open(expected_iris_code_path, "rb")) def test_e2e_iris_pipeline(ir_image: np.ndarray, expected_iris_pipeline_output: Dict[str, Any]) -> None: """End-to-end test of the IRISPipeline in the Orb setup""" iris_pipeline = IRISPipeline() computed_pipeline_output = iris_pipeline(img_data=ir_image, eye_side="right") compare_iris_pipeline_outputs(computed_pipeline_output, expected_iris_pipeline_output) def test_e2e_debug_pipeline(ir_image: np.ndarray, expected_debug_pipeline_output: Dict[str, Any]) -> None: """End-to-end test of the IRISPipeline in the debug setup""" iris_pipeline = IRISPipeline(env=IRISPipeline.DEBUGGING_ENVIRONMENT) computed_pipeline_output = iris_pipeline(img_data=ir_image, eye_side="right")
compare_debug_pipeline_outputs(computed_pipeline_output, expected_debug_pipeline_output)
0
2023-12-09 22:43:09+00:00
2k
laixintao/mactop
mactop/panels/cpu_percpu_usage.py
[ { "identifier": "LabeledColorBar", "path": "mactop/widgets/labeled_colorbar.py", "snippet": "class LabeledColorBar(Static):\n percentages = reactive(None)\n\n DEFAULT_CSS = \"\"\"\n LabeledColorBar {\n layout: horizontal;\n }\n LabeledColorBar > ColorBar {\n width: 1fr;\n ...
import logging from functools import partial from textual.app import ComposeResult from mactop.widgets import LabeledColorBar from mactop.metrics_store import metrics from mactop.utils.formatting import render_cpu_percentage_100 from ._base import BaseStatic from mactop import const
1,272
logger = logging.getLogger(__name__) def get_percpu_percent(index): cpus = metrics.psutilmetrics.cpu_percent_percpu if not cpus: return [0, 0, 0, 0] cpu_percent = cpus[index] return [ cpu_percent.user, cpu_percent.nice, cpu_percent.system, cpu_percent.idle, ] class CPUUsageBarPanel(BaseStatic): BORDER_TITLE = "CPU" DEFAULT_CSS = """ CPUUsageBarPanel { layout: grid; grid-gutter: 0 1; } """ def __init__( self, color_user=const.COLOR_USER, color_nice=const.COLOR_NICE, color_system=const.COLOR_SYSTEM, color_idle=const.COLOR_IDLE, columns=4, *args, **kwargs, ) -> None: super().__init__(*args, **kwargs) self.color_user = color_user self.color_nice = color_nice self.color_system = color_system self.color_idle = color_idle self.columns = int(columns) def compose(self) -> ComposeResult: self.styles.grid_size_columns = self.columns cpu_count = metrics.psutilmetrics.cpu_count for index in range(cpu_count): yield LabeledColorBar( prefix_label=f"[#FFFFE0]{index:>2}[/#FFFFE0]", color_choices=[ self.color_user, self.color_nice, self.color_system, self.color_idle, ], percentages_update_fn=partial(get_percpu_percent, index=index),
logger = logging.getLogger(__name__) def get_percpu_percent(index): cpus = metrics.psutilmetrics.cpu_percent_percpu if not cpus: return [0, 0, 0, 0] cpu_percent = cpus[index] return [ cpu_percent.user, cpu_percent.nice, cpu_percent.system, cpu_percent.idle, ] class CPUUsageBarPanel(BaseStatic): BORDER_TITLE = "CPU" DEFAULT_CSS = """ CPUUsageBarPanel { layout: grid; grid-gutter: 0 1; } """ def __init__( self, color_user=const.COLOR_USER, color_nice=const.COLOR_NICE, color_system=const.COLOR_SYSTEM, color_idle=const.COLOR_IDLE, columns=4, *args, **kwargs, ) -> None: super().__init__(*args, **kwargs) self.color_user = color_user self.color_nice = color_nice self.color_system = color_system self.color_idle = color_idle self.columns = int(columns) def compose(self) -> ComposeResult: self.styles.grid_size_columns = self.columns cpu_count = metrics.psutilmetrics.cpu_count for index in range(cpu_count): yield LabeledColorBar( prefix_label=f"[#FFFFE0]{index:>2}[/#FFFFE0]", color_choices=[ self.color_user, self.color_nice, self.color_system, self.color_idle, ], percentages_update_fn=partial(get_percpu_percent, index=index),
value_render_fn=render_cpu_percentage_100,
2
2023-12-05 09:12:42+00:00
2k
geopavlakos/hamer
hamer/datasets/vitdet_dataset.py
[ { "identifier": "convert_cvimg_to_tensor", "path": "hamer/datasets/utils.py", "snippet": "def convert_cvimg_to_tensor(cvimg: np.array):\n \"\"\"\n Convert image from HWC to CHW format.\n Args:\n cvimg (np.array): Image of shape (H, W, 3) as loaded by OpenCV.\n Returns:\n np.arr...
from typing import Dict from skimage.filters import gaussian from yacs.config import CfgNode from .utils import (convert_cvimg_to_tensor, expand_to_aspect_ratio, generate_image_patch_cv2) import cv2 import numpy as np import torch
1,342
DEFAULT_MEAN = 255. * np.array([0.485, 0.456, 0.406]) DEFAULT_STD = 255. * np.array([0.229, 0.224, 0.225]) class ViTDetDataset(torch.utils.data.Dataset): def __init__(self, cfg: CfgNode, img_cv2: np.array, boxes: np.array, right: np.array, rescale_factor=2.5, train: bool = False, **kwargs): super().__init__() self.cfg = cfg self.img_cv2 = img_cv2 # self.boxes = boxes assert train == False, "ViTDetDataset is only for inference" self.train = train self.img_size = cfg.MODEL.IMAGE_SIZE self.mean = 255. * np.array(self.cfg.MODEL.IMAGE_MEAN) self.std = 255. * np.array(self.cfg.MODEL.IMAGE_STD) # Preprocess annotations boxes = boxes.astype(np.float32) self.center = (boxes[:, 2:4] + boxes[:, 0:2]) / 2.0 self.scale = rescale_factor * (boxes[:, 2:4] - boxes[:, 0:2]) / 200.0 self.personid = np.arange(len(boxes), dtype=np.int32) self.right = right.astype(np.float32) def __len__(self) -> int: return len(self.personid) def __getitem__(self, idx: int) -> Dict[str, np.array]: center = self.center[idx].copy() center_x = center[0] center_y = center[1] scale = self.scale[idx] BBOX_SHAPE = self.cfg.MODEL.get('BBOX_SHAPE', None)
DEFAULT_MEAN = 255. * np.array([0.485, 0.456, 0.406]) DEFAULT_STD = 255. * np.array([0.229, 0.224, 0.225]) class ViTDetDataset(torch.utils.data.Dataset): def __init__(self, cfg: CfgNode, img_cv2: np.array, boxes: np.array, right: np.array, rescale_factor=2.5, train: bool = False, **kwargs): super().__init__() self.cfg = cfg self.img_cv2 = img_cv2 # self.boxes = boxes assert train == False, "ViTDetDataset is only for inference" self.train = train self.img_size = cfg.MODEL.IMAGE_SIZE self.mean = 255. * np.array(self.cfg.MODEL.IMAGE_MEAN) self.std = 255. * np.array(self.cfg.MODEL.IMAGE_STD) # Preprocess annotations boxes = boxes.astype(np.float32) self.center = (boxes[:, 2:4] + boxes[:, 0:2]) / 2.0 self.scale = rescale_factor * (boxes[:, 2:4] - boxes[:, 0:2]) / 200.0 self.personid = np.arange(len(boxes), dtype=np.int32) self.right = right.astype(np.float32) def __len__(self) -> int: return len(self.personid) def __getitem__(self, idx: int) -> Dict[str, np.array]: center = self.center[idx].copy() center_x = center[0] center_y = center[1] scale = self.scale[idx] BBOX_SHAPE = self.cfg.MODEL.get('BBOX_SHAPE', None)
bbox_size = expand_to_aspect_ratio(scale*200, target_aspect_ratio=BBOX_SHAPE).max()
1
2023-12-08 09:07:07+00:00
2k
rogeriochaves/driver
driver/annotator.py
[ { "identifier": "detect_components", "path": "driver/UIED/run_single.py", "snippet": "def detect_components(\n input_path_img, ocr_result: AnnotatedImage, showOCR=False, showUIED=False\n) -> DetectElementsResponse:\n output_root = \"output\"\n\n # Resizes the image to be smaller because this pr...
import math import os import cv2 from PIL import Image, ImageDraw, ImageFont from driver.UIED.run_single import detect_components from driver.UIED.utils import show_image from driver.ocr_call import ocr_text_detection from driver.types import DebugConfig, ImgMultiplierFactor, LabelMap from driver.utils import is_retina_display
1,310
def annotate_image(input_image_path, debug: DebugConfig): ocr_result = ocr_text_detection(input_image_path, debug) components = detect_components( input_image_path, ocr_result, showOCR=debug["ocr"], showUIED=debug["uied"], ) original_image = Image.open(input_image_path) size = {"width": original_image.width, "height": original_image.height} img_multiplier_factor: ImgMultiplierFactor = { "height": components["img_shape"][0] / size["height"], "width": components["img_shape"][1] / size["width"], } label_counter = 1 label_prefix = "A" drawn_positions = []
def annotate_image(input_image_path, debug: DebugConfig): ocr_result = ocr_text_detection(input_image_path, debug) components = detect_components( input_image_path, ocr_result, showOCR=debug["ocr"], showUIED=debug["uied"], ) original_image = Image.open(input_image_path) size = {"width": original_image.width, "height": original_image.height} img_multiplier_factor: ImgMultiplierFactor = { "height": components["img_shape"][0] / size["height"], "width": components["img_shape"][1] / size["width"], } label_counter = 1 label_prefix = "A" drawn_positions = []
label_map: LabelMap = {}
3
2023-12-10 17:18:28+00:00
2k
baidubce/app-builder
appbuilder/core/components/embeddings/base.py
[ { "identifier": "Component", "path": "appbuilder/core/component.py", "snippet": "class Component:\n r\"\"\"Component基类, 其它实现的Component子类需要继承该基类,并至少实现run方法.\"\"\"\n\n def __init__(self,\n meta: Optional[ComponentArguments] = ComponentArguments(),\n secret_key: Option...
from abc import abstractmethod from typing import List, Union from appbuilder.core.component import Component from appbuilder.core.message import Message from appbuilder.core.component import ComponentArguments
1,120
""" base """ # Copyright (c) 2023 Baidu, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class EmbeddingBaseComponent(Component): """ EmbeddingBaseComponent """ name: str version: str
""" base """ # Copyright (c) 2023 Baidu, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class EmbeddingBaseComponent(Component): """ EmbeddingBaseComponent """ name: str version: str
meta: ComponentArguments
2
2023-12-05 01:48:12+00:00
2k
corfyi/UCMCTrack
run_mot20_test.py
[ { "identifier": "run_ucmc", "path": "util/run_ucmc.py", "snippet": "def run_ucmc(args, det_path = \"det_results/mot17/yolox_x_ablation\",\n cam_path = \"cam_para/mot17\",\n gmc_path = \"gmc/mot17\",\n out_path = \"output/mot17\",\n ...
from util.run_ucmc import run_ucmc, make_args
1,416
if __name__ == '__main__': det_path = "det_results/mot20" cam_path = "cam_para/mot20" gmc_path = "gmc/mot20" out_path = "output/mot20" exp_name = "test" dataset = "MOT20"
if __name__ == '__main__': det_path = "det_results/mot20" cam_path = "cam_para/mot20" gmc_path = "gmc/mot20" out_path = "output/mot20" exp_name = "test" dataset = "MOT20"
args = make_args()
1
2023-12-12 07:29:20+00:00
2k
ingra14m/Specular-Gaussians
metrics.py
[ { "identifier": "ssim", "path": "utils/loss_utils.py", "snippet": "def ssim(img1, img2, window_size=11, size_average=True):\n channel = img1.size(-3)\n window = create_window(window_size, channel)\n\n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as...
from pathlib import Path from PIL import Image from utils.loss_utils import ssim from tqdm import tqdm from utils.image_utils import psnr from argparse import ArgumentParser import os import torch import torchvision.transforms.functional as tf import lpips import json
721
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact george.drettakis@inria.fr # # from lpipsPyTorch import lpips def readImages(renders_dir, gt_dir): renders = [] gts = [] image_names = [] for fname in os.listdir(renders_dir): render = Image.open(renders_dir / fname) gt = Image.open(gt_dir / fname) renders.append(tf.to_tensor(render).unsqueeze(0)[:, :3, :, :].cuda()) gts.append(tf.to_tensor(gt).unsqueeze(0)[:, :3, :, :].cuda()) image_names.append(fname) return renders, gts, image_names def evaluate(model_paths): full_dict = {} per_view_dict = {} full_dict_polytopeonly = {} per_view_dict_polytopeonly = {} print("") for scene_dir in model_paths: try: print("Scene:", scene_dir) full_dict[scene_dir] = {} per_view_dict[scene_dir] = {} full_dict_polytopeonly[scene_dir] = {} per_view_dict_polytopeonly[scene_dir] = {} test_dir = Path(scene_dir) / "test" for method in os.listdir(test_dir): if not method.startswith("ours"): continue print("Method:", method) full_dict[scene_dir][method] = {} per_view_dict[scene_dir][method] = {} full_dict_polytopeonly[scene_dir][method] = {} per_view_dict_polytopeonly[scene_dir][method] = {} method_dir = test_dir / method gt_dir = method_dir / "gt" renders_dir = method_dir / "renders" renders, gts, image_names = readImages(renders_dir, gt_dir) ssims = [] psnrs = [] lpipss = [] for idx in tqdm(range(len(renders)), desc="Metric evaluation progress"):
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact george.drettakis@inria.fr # # from lpipsPyTorch import lpips def readImages(renders_dir, gt_dir): renders = [] gts = [] image_names = [] for fname in os.listdir(renders_dir): render = Image.open(renders_dir / fname) gt = Image.open(gt_dir / fname) renders.append(tf.to_tensor(render).unsqueeze(0)[:, :3, :, :].cuda()) gts.append(tf.to_tensor(gt).unsqueeze(0)[:, :3, :, :].cuda()) image_names.append(fname) return renders, gts, image_names def evaluate(model_paths): full_dict = {} per_view_dict = {} full_dict_polytopeonly = {} per_view_dict_polytopeonly = {} print("") for scene_dir in model_paths: try: print("Scene:", scene_dir) full_dict[scene_dir] = {} per_view_dict[scene_dir] = {} full_dict_polytopeonly[scene_dir] = {} per_view_dict_polytopeonly[scene_dir] = {} test_dir = Path(scene_dir) / "test" for method in os.listdir(test_dir): if not method.startswith("ours"): continue print("Method:", method) full_dict[scene_dir][method] = {} per_view_dict[scene_dir][method] = {} full_dict_polytopeonly[scene_dir][method] = {} per_view_dict_polytopeonly[scene_dir][method] = {} method_dir = test_dir / method gt_dir = method_dir / "gt" renders_dir = method_dir / "renders" renders, gts, image_names = readImages(renders_dir, gt_dir) ssims = [] psnrs = [] lpipss = [] for idx in tqdm(range(len(renders)), desc="Metric evaluation progress"):
ssims.append(ssim(renders[idx], gts[idx]))
0
2023-12-12 14:59:01+00:00
2k
u2seg/U2Seg
detectron2/evaluation/evaluator.py
[ { "identifier": "get_world_size", "path": "detectron2/utils/comm.py", "snippet": "def get_world_size() -> int:\n if not dist.is_available():\n return 1\n if not dist.is_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "is_main_process", "pat...
import datetime import logging import time import torch from collections import OrderedDict, abc from contextlib import ExitStack, contextmanager from typing import List, Union from torch import nn from detectron2.utils.comm import get_world_size, is_main_process from detectron2.utils.logger import log_every_n_seconds
1,151
# Copyright (c) Facebook, Inc. and its affiliates. class DatasetEvaluator: """ Base class for a dataset evaluator. The function :func:`inference_on_dataset` runs the model over all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs. This class will accumulate information of the inputs/outputs (by :meth:`process`), and produce evaluation results in the end (by :meth:`evaluate`). """ def reset(self): """ Preparation for a new round of evaluation. Should be called before starting a round of evaluation. """ pass def process(self, inputs, outputs): """ Process the pair of inputs and outputs. If they contain batches, the pairs can be consumed one-by-one using `zip`: .. code-block:: python for input_, output in zip(inputs, outputs): # do evaluation on single input/output pair ... Args: inputs (list): the inputs that's used to call the model. outputs (list): the return value of `model(inputs)` """ pass def evaluate(self): """ Evaluate/summarize the performance, after processing all input/output pairs. Returns: dict: A new evaluator class can return a dict of arbitrary format as long as the user can process the results. In our train_net.py, we expect the following format: * key: the name of the task (e.g., bbox) * value: a dict of {metric name: score}, e.g.: {"AP50": 80} """ pass class DatasetEvaluators(DatasetEvaluator): """ Wrapper class to combine multiple :class:`DatasetEvaluator` instances. This class dispatches every evaluation call to all of its :class:`DatasetEvaluator`. """ def __init__(self, evaluators): """ Args: evaluators (list): the evaluators to combine. """ super().__init__() self._evaluators = evaluators def reset(self): for evaluator in self._evaluators: evaluator.reset() def process(self, inputs, outputs): for evaluator in self._evaluators: evaluator.process(inputs, outputs) def evaluate(self): results = OrderedDict() for evaluator in self._evaluators: result = evaluator.evaluate() if is_main_process() and result is not None: for k, v in result.items(): assert ( k not in results ), "Different evaluators produce results with the same key {}".format(k) results[k] = v return results def inference_on_dataset( model, data_loader, evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None], callbacks=None, ): """ Run model on the data_loader and evaluate the metrics with evaluator. Also benchmark the inference speed of `model.__call__` accurately. The model will be used in eval mode. Args: model (callable): a callable which takes an object from `data_loader` and returns some outputs. If it's an nn.Module, it will be temporarily set to `eval` mode. If you wish to evaluate a model in `training` mode instead, you can wrap the given model and override its behavior of `.eval()` and `.train()`. data_loader: an iterable object with a length. The elements it generates will be the inputs to the model. evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark, but don't want to do any evaluation. callbacks (dict of callables): a dictionary of callback functions which can be called at each stage of inference. Returns: The return value of `evaluator.evaluate()` """
# Copyright (c) Facebook, Inc. and its affiliates. class DatasetEvaluator: """ Base class for a dataset evaluator. The function :func:`inference_on_dataset` runs the model over all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs. This class will accumulate information of the inputs/outputs (by :meth:`process`), and produce evaluation results in the end (by :meth:`evaluate`). """ def reset(self): """ Preparation for a new round of evaluation. Should be called before starting a round of evaluation. """ pass def process(self, inputs, outputs): """ Process the pair of inputs and outputs. If they contain batches, the pairs can be consumed one-by-one using `zip`: .. code-block:: python for input_, output in zip(inputs, outputs): # do evaluation on single input/output pair ... Args: inputs (list): the inputs that's used to call the model. outputs (list): the return value of `model(inputs)` """ pass def evaluate(self): """ Evaluate/summarize the performance, after processing all input/output pairs. Returns: dict: A new evaluator class can return a dict of arbitrary format as long as the user can process the results. In our train_net.py, we expect the following format: * key: the name of the task (e.g., bbox) * value: a dict of {metric name: score}, e.g.: {"AP50": 80} """ pass class DatasetEvaluators(DatasetEvaluator): """ Wrapper class to combine multiple :class:`DatasetEvaluator` instances. This class dispatches every evaluation call to all of its :class:`DatasetEvaluator`. """ def __init__(self, evaluators): """ Args: evaluators (list): the evaluators to combine. """ super().__init__() self._evaluators = evaluators def reset(self): for evaluator in self._evaluators: evaluator.reset() def process(self, inputs, outputs): for evaluator in self._evaluators: evaluator.process(inputs, outputs) def evaluate(self): results = OrderedDict() for evaluator in self._evaluators: result = evaluator.evaluate() if is_main_process() and result is not None: for k, v in result.items(): assert ( k not in results ), "Different evaluators produce results with the same key {}".format(k) results[k] = v return results def inference_on_dataset( model, data_loader, evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None], callbacks=None, ): """ Run model on the data_loader and evaluate the metrics with evaluator. Also benchmark the inference speed of `model.__call__` accurately. The model will be used in eval mode. Args: model (callable): a callable which takes an object from `data_loader` and returns some outputs. If it's an nn.Module, it will be temporarily set to `eval` mode. If you wish to evaluate a model in `training` mode instead, you can wrap the given model and override its behavior of `.eval()` and `.train()`. data_loader: an iterable object with a length. The elements it generates will be the inputs to the model. evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark, but don't want to do any evaluation. callbacks (dict of callables): a dictionary of callback functions which can be called at each stage of inference. Returns: The return value of `evaluator.evaluate()` """
num_devices = get_world_size()
0
2023-12-05 01:13:31+00:00
2k
upfusion3d/upfusion
control_net/cldm/ddim_hacked.py
[ { "identifier": "make_ddim_sampling_parameters", "path": "control_net/ldm/modules/diffusionmodules/util.py", "snippet": "def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):\n # select alphas for computing the variance schedule\n alphas = alphacums[ddim_timesteps]\n ...
import torch import numpy as np from tqdm import tqdm from control_net.ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor
844
"""SAMPLING ONLY.""" class DDIMSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
"""SAMPLING ONLY.""" class DDIMSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
1
2023-12-12 00:49:11+00:00
2k
modelscope/normal-depth-diffusion
libs/ControlNet-v1-1-nightly/annotator/normalbae/models/baseline.py
[ { "identifier": "UpSampleBN", "path": "libs/ControlNet-v1-1-nightly/annotator/normalbae/models/submodules/submodules.py", "snippet": "class UpSampleBN(nn.Module):\n\n def __init__(self, skip_input, output_features):\n super(UpSampleBN, self).__init__()\n\n self._net = nn.Sequential(\n ...
import torch import torch.nn as nn import torch.nn.functional as F from .submodules.submodules import UpSampleBN, norm_normalize
928
# This is the baseline encoder-decoder we used in the ablation study class NNET(nn.Module): def __init__(self, args=None): super(NNET, self).__init__() self.encoder = Encoder() self.decoder = Decoder(num_classes=4) def forward(self, x, **kwargs): out = self.decoder(self.encoder(x), **kwargs) # Bilinearly upsample the output to match the input resolution up_out = F.interpolate( out, size=[x.size(2), x.size(3)], mode='bilinear', align_corners=False) # L2-normalize the first three channels / ensure positive value for concentration parameters (kappa) up_out = norm_normalize(up_out) return up_out def get_1x_lr_params(self): # lr/10 learning rate return self.encoder.parameters() def get_10x_lr_params(self): # lr learning rate modules = [self.decoder] for m in modules: yield from m.parameters() # Encoder class Encoder(nn.Module): def __init__(self): super(Encoder, self).__init__() basemodel_name = 'tf_efficientnet_b5_ap' basemodel = torch.hub.load( 'rwightman/gen-efficientnet-pytorch', basemodel_name, pretrained=True) # Remove last layer basemodel.global_pool = nn.Identity() basemodel.classifier = nn.Identity() self.original_model = basemodel def forward(self, x): features = [x] for k, v in self.original_model._modules.items(): if (k == 'blocks'): for ki, vi in v._modules.items(): features.append(vi(features[-1])) else: features.append(v(features[-1])) return features # Decoder (no pixel-wise MLP, no uncertainty-guided sampling) class Decoder(nn.Module): def __init__(self, num_classes=4): super(Decoder, self).__init__() self.conv2 = nn.Conv2d(2048, 2048, kernel_size=1, stride=1, padding=0)
# This is the baseline encoder-decoder we used in the ablation study class NNET(nn.Module): def __init__(self, args=None): super(NNET, self).__init__() self.encoder = Encoder() self.decoder = Decoder(num_classes=4) def forward(self, x, **kwargs): out = self.decoder(self.encoder(x), **kwargs) # Bilinearly upsample the output to match the input resolution up_out = F.interpolate( out, size=[x.size(2), x.size(3)], mode='bilinear', align_corners=False) # L2-normalize the first three channels / ensure positive value for concentration parameters (kappa) up_out = norm_normalize(up_out) return up_out def get_1x_lr_params(self): # lr/10 learning rate return self.encoder.parameters() def get_10x_lr_params(self): # lr learning rate modules = [self.decoder] for m in modules: yield from m.parameters() # Encoder class Encoder(nn.Module): def __init__(self): super(Encoder, self).__init__() basemodel_name = 'tf_efficientnet_b5_ap' basemodel = torch.hub.load( 'rwightman/gen-efficientnet-pytorch', basemodel_name, pretrained=True) # Remove last layer basemodel.global_pool = nn.Identity() basemodel.classifier = nn.Identity() self.original_model = basemodel def forward(self, x): features = [x] for k, v in self.original_model._modules.items(): if (k == 'blocks'): for ki, vi in v._modules.items(): features.append(vi(features[-1])) else: features.append(v(features[-1])) return features # Decoder (no pixel-wise MLP, no uncertainty-guided sampling) class Decoder(nn.Module): def __init__(self, num_classes=4): super(Decoder, self).__init__() self.conv2 = nn.Conv2d(2048, 2048, kernel_size=1, stride=1, padding=0)
self.up1 = UpSampleBN(skip_input=2048 + 176, output_features=1024)
0
2023-12-06 07:29:34+00:00
2k
daswer123/xtts-webui
scripts/resemble_enhance/denoiser/inference.py
[ { "identifier": "inference", "path": "scripts/resemble_enhance/inference.py", "snippet": "def inference(model, dwav, sr, device, chunk_seconds: float = 30.0, overlap_seconds: float = 1.0):\n remove_weight_norm_recursively(model)\n\n hp: HParams = model.hp\n\n dwav = resample(\n dwav,\n ...
import logging import torch from functools import cache from ..inference import inference from .train import Denoiser, HParams
664
logger = logging.getLogger(__name__) @cache def load_denoiser(run_dir, device): if run_dir is None:
logger = logging.getLogger(__name__) @cache def load_denoiser(run_dir, device): if run_dir is None:
return Denoiser(HParams())
1
2023-12-14 06:34:12+00:00
2k
FrozenBurning/PrimDiffusion
dva/io.py
[ { "identifier": "AttrDict", "path": "dva/attr_dict.py", "snippet": "class AttrDict:\n def __init__(self, entries):\n self.add_entries_(entries)\n\n def keys(self):\n return self.__dict__.keys()\n\n def values(self):\n return self.__dict__.values()\n\n def __getitem__(sel...
import json import cv2 import numpy as np import copy import importlib import pickle import os from typing import Any, Dict from dva.attr_dict import AttrDict from dva.geom import compute_v2uv, compute_neighbours
1,514
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. def load_module(module_name, class_name=None, silent: bool = False): module = importlib.import_module(module_name) return getattr(module, class_name) if class_name else module def load_class(class_name): return load_module(*class_name.rsplit(".", 1)) def load_from_config(config, **kwargs): """Instantiate an object given a config and arguments.""" assert "class_name" in config and "module_name" not in config config = copy.deepcopy(config) class_name = config.pop("class_name") object_class = load_class(class_name) return object_class(**config, **kwargs) def load_opencv_calib(extrin_path, intrin_path): cameras = {} fse = cv2.FileStorage() fse.open(extrin_path, cv2.FileStorage_READ) fsi = cv2.FileStorage() fsi.open(intrin_path, cv2.FileStorage_READ) names = [ fse.getNode("names").at(c).string() for c in range(fse.getNode("names").size()) ] for camera in names: rot = fse.getNode(f"R_{camera}").mat() R = fse.getNode(f"Rot_{camera}").mat() T = fse.getNode(f"T_{camera}").mat() R_pred = cv2.Rodrigues(rot)[0] assert np.all(np.isclose(R_pred, R)) K = fsi.getNode(f"K_{camera}").mat() cameras[camera] = { "Rt": np.concatenate([R, T], axis=1).astype(np.float32), "K": K.astype(np.float32), } return cameras def load_smpl_params(params): return { k: np.array(v[0], dtype=np.float32) for k, v in params[0].items() if k != "id" } def load_smpl_topology(data_struct) -> Dict[str, Any]: # TODO: compute_ topology = { "vi": data_struct["f"].astype(np.int64), "vti": data_struct["ft"].astype(np.int64), "vt": data_struct["vt"].astype(np.float32), "n_verts": data_struct["v_template"].shape[0], }
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. def load_module(module_name, class_name=None, silent: bool = False): module = importlib.import_module(module_name) return getattr(module, class_name) if class_name else module def load_class(class_name): return load_module(*class_name.rsplit(".", 1)) def load_from_config(config, **kwargs): """Instantiate an object given a config and arguments.""" assert "class_name" in config and "module_name" not in config config = copy.deepcopy(config) class_name = config.pop("class_name") object_class = load_class(class_name) return object_class(**config, **kwargs) def load_opencv_calib(extrin_path, intrin_path): cameras = {} fse = cv2.FileStorage() fse.open(extrin_path, cv2.FileStorage_READ) fsi = cv2.FileStorage() fsi.open(intrin_path, cv2.FileStorage_READ) names = [ fse.getNode("names").at(c).string() for c in range(fse.getNode("names").size()) ] for camera in names: rot = fse.getNode(f"R_{camera}").mat() R = fse.getNode(f"Rot_{camera}").mat() T = fse.getNode(f"T_{camera}").mat() R_pred = cv2.Rodrigues(rot)[0] assert np.all(np.isclose(R_pred, R)) K = fsi.getNode(f"K_{camera}").mat() cameras[camera] = { "Rt": np.concatenate([R, T], axis=1).astype(np.float32), "K": K.astype(np.float32), } return cameras def load_smpl_params(params): return { k: np.array(v[0], dtype=np.float32) for k, v in params[0].items() if k != "id" } def load_smpl_topology(data_struct) -> Dict[str, Any]: # TODO: compute_ topology = { "vi": data_struct["f"].astype(np.int64), "vti": data_struct["ft"].astype(np.int64), "vt": data_struct["vt"].astype(np.float32), "n_verts": data_struct["v_template"].shape[0], }
topology["v2uv"] = compute_v2uv(
1
2023-12-06 05:12:55+00:00
2k
Nearcyan/papers.day
backend/admin.py
[ { "identifier": "ArxivPaper", "path": "backend/models.py", "snippet": "class ArxivPaper(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n arxiv_id = models.CharField(max_length=20, unique=True)\n\n # fields scraped fro...
from django.contrib import admin from .models import ArxivPaper, Author, Subject, PaperImage, PaperSource
1,096
class ArxivPaperAdmin(admin.ModelAdmin): list_display = ('title', 'citations', 'total_author_citations', 'summary', 'publication_date', 'arxiv_id', 'created_at') search_fields = ('title', 'abstract', 'arxiv_id') readonly_fields = ('created_at', 'modified_at') ordering = ('-publication_date',) list_filter = ('publication_date', 'created_at', 'citations', 'total_author_citations') class SubjectAdmin(admin.ModelAdmin): list_display = ('short_name', 'full_name') search_fields = ('short_name', 'full_name') ordering = ('short_name',) class AuthorAdmin(admin.ModelAdmin): list_display = ('name', 'affiliation', 'email', 'email_domain', 'citations', 'scholar_id') search_fields = ('name', 'affiliation', 'email', 'email_domain', 'citations', 'scholar_id') ordering = ('name',) class PaperImageAdmin(admin.ModelAdmin): list_display = ('image', 'paper') search_fields = ('image', 'paper') ordering = ('image',) class PaperSourceAdmin(admin.ModelAdmin): list_display = ('paper',) search_fields = ('paper',)
class ArxivPaperAdmin(admin.ModelAdmin): list_display = ('title', 'citations', 'total_author_citations', 'summary', 'publication_date', 'arxiv_id', 'created_at') search_fields = ('title', 'abstract', 'arxiv_id') readonly_fields = ('created_at', 'modified_at') ordering = ('-publication_date',) list_filter = ('publication_date', 'created_at', 'citations', 'total_author_citations') class SubjectAdmin(admin.ModelAdmin): list_display = ('short_name', 'full_name') search_fields = ('short_name', 'full_name') ordering = ('short_name',) class AuthorAdmin(admin.ModelAdmin): list_display = ('name', 'affiliation', 'email', 'email_domain', 'citations', 'scholar_id') search_fields = ('name', 'affiliation', 'email', 'email_domain', 'citations', 'scholar_id') ordering = ('name',) class PaperImageAdmin(admin.ModelAdmin): list_display = ('image', 'paper') search_fields = ('image', 'paper') ordering = ('image',) class PaperSourceAdmin(admin.ModelAdmin): list_display = ('paper',) search_fields = ('paper',)
admin.site.register(ArxivPaper, ArxivPaperAdmin)
0
2023-12-14 08:23:05+00:00
2k
LSimon95/megatts2
models/trainer.py
[ { "identifier": "MegaVQ", "path": "models/megatts2.py", "snippet": "class MegaVQ(nn.Module):\n def __init__(\n self,\n mrte: MRTE,\n vqpe: VQProsodyEncoder,\n decoder: ConvNet,\n ):\n super(MegaVQ, self).__init__()\n\n self.mrte = mrte\n ...
import lightning.pytorch as pl import torch import torchaudio import torch.nn.functional as F import transformers import numpy as np import math from .megatts2 import MegaVQ from modules.dscrm import Discriminator from utils.utils import plot_spectrogram_to_numpy
1,002
class MegaGANTrainer(pl.LightningModule): def __init__( self,
class MegaGANTrainer(pl.LightningModule): def __init__( self,
G: MegaVQ,
0
2023-12-10 15:02:54+00:00
2k
wanghao-cst/Omni-VideoAssistant
llava/serve/controller.py
[ { "identifier": "CONTROLLER_HEART_BEAT_EXPIRATION", "path": "llava/constants.py", "snippet": "CONTROLLER_HEART_BEAT_EXPIRATION = 30" }, { "identifier": "build_logger", "path": "llava/utils.py", "snippet": "def build_logger(logger_name, logger_filename):\n def __init__(self, logger, lo...
import argparse import asyncio import dataclasses import json import logging import time import threading import numpy as np import requests import uvicorn from enum import Enum, auto from typing import List, Union from fastapi import FastAPI, Request from fastapi.responses import StreamingResponse from llava.constants import CONTROLLER_HEART_BEAT_EXPIRATION from llava.utils import build_logger, server_error_msg
1,519
if not worker_status: return False self.worker_info[worker_name] = WorkerInfo( worker_status["model_names"], worker_status["speed"], worker_status["queue_length"], check_heart_beat, time.time()) logger.info(f"Register done: {worker_name}, {worker_status}") return True def get_worker_status(self, worker_name: str): try: r = requests.post(worker_name + "/worker_get_status", timeout=5) except requests.exceptions.RequestException as e: logger.error(f"Get status fails: {worker_name}, {e}") return None if r.status_code != 200: logger.error(f"Get status fails: {worker_name}, {r}") return None return r.json() def remove_worker(self, worker_name: str): del self.worker_info[worker_name] def refresh_all_workers(self): old_info = dict(self.worker_info) self.worker_info = {} for w_name, w_info in old_info.items(): if not self.register_worker(w_name, w_info.check_heart_beat, None): logger.info(f"Remove stale worker: {w_name}") def list_models(self): model_names = set() for w_name, w_info in self.worker_info.items(): model_names.update(w_info.model_names) return list(model_names) def get_worker_address(self, model_name: str): if self.dispatch_method == DispatchMethod.LOTTERY: worker_names = [] worker_speeds = [] for w_name, w_info in self.worker_info.items(): if model_name in w_info.model_names: worker_names.append(w_name) worker_speeds.append(w_info.speed) worker_speeds = np.array(worker_speeds, dtype=np.float32) norm = np.sum(worker_speeds) if norm < 1e-4: return "" worker_speeds = worker_speeds / norm if True: # Directly return address pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds) worker_name = worker_names[pt] return worker_name # Check status before returning while True: pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds) worker_name = worker_names[pt] if self.get_worker_status(worker_name): break else: self.remove_worker(worker_name) worker_speeds[pt] = 0 norm = np.sum(worker_speeds) if norm < 1e-4: return "" worker_speeds = worker_speeds / norm continue return worker_name elif self.dispatch_method == DispatchMethod.SHORTEST_QUEUE: worker_names = [] worker_qlen = [] for w_name, w_info in self.worker_info.items(): if model_name in w_info.model_names: worker_names.append(w_name) worker_qlen.append(w_info.queue_length / w_info.speed) if len(worker_names) == 0: return "" min_index = np.argmin(worker_qlen) w_name = worker_names[min_index] self.worker_info[w_name].queue_length += 1 logger.info(f"names: {worker_names}, queue_lens: {worker_qlen}, ret: {w_name}") return w_name else: raise ValueError(f"Invalid dispatch method: {self.dispatch_method}") def receive_heart_beat(self, worker_name: str, queue_length: int): if worker_name not in self.worker_info: logger.info(f"Receive unknown heart beat. {worker_name}") return False self.worker_info[worker_name].queue_length = queue_length self.worker_info[worker_name].last_heart_beat = time.time() logger.info(f"Receive heart beat. {worker_name}") return True def remove_stable_workers_by_expiration(self): expire = time.time() - CONTROLLER_HEART_BEAT_EXPIRATION to_delete = [] for worker_name, w_info in self.worker_info.items(): if w_info.check_heart_beat and w_info.last_heart_beat < expire: to_delete.append(worker_name) for worker_name in to_delete: self.remove_worker(worker_name) def worker_api_generate_stream(self, params): worker_addr = self.get_worker_address(params["model"]) if not worker_addr: logger.info(f"no worker: {params['model']}") ret = {
""" A controller manages distributed workers. It sends worker addresses to clients. """ logger = build_logger("controller", "controller.log") class DispatchMethod(Enum): LOTTERY = auto() SHORTEST_QUEUE = auto() @classmethod def from_str(cls, name): if name == "lottery": return cls.LOTTERY elif name == "shortest_queue": return cls.SHORTEST_QUEUE else: raise ValueError(f"Invalid dispatch method") @dataclasses.dataclass class WorkerInfo: model_names: List[str] speed: int queue_length: int check_heart_beat: bool last_heart_beat: str def heart_beat_controller(controller): while True: time.sleep(CONTROLLER_HEART_BEAT_EXPIRATION) controller.remove_stable_workers_by_expiration() class Controller: def __init__(self, dispatch_method: str): # Dict[str -> WorkerInfo] self.worker_info = {} self.dispatch_method = DispatchMethod.from_str(dispatch_method) self.heart_beat_thread = threading.Thread( target=heart_beat_controller, args=(self,)) self.heart_beat_thread.start() logger.info("Init controller") def register_worker(self, worker_name: str, check_heart_beat: bool, worker_status: dict): if worker_name not in self.worker_info: logger.info(f"Register a new worker: {worker_name}") else: logger.info(f"Register an existing worker: {worker_name}") if not worker_status: worker_status = self.get_worker_status(worker_name) if not worker_status: return False self.worker_info[worker_name] = WorkerInfo( worker_status["model_names"], worker_status["speed"], worker_status["queue_length"], check_heart_beat, time.time()) logger.info(f"Register done: {worker_name}, {worker_status}") return True def get_worker_status(self, worker_name: str): try: r = requests.post(worker_name + "/worker_get_status", timeout=5) except requests.exceptions.RequestException as e: logger.error(f"Get status fails: {worker_name}, {e}") return None if r.status_code != 200: logger.error(f"Get status fails: {worker_name}, {r}") return None return r.json() def remove_worker(self, worker_name: str): del self.worker_info[worker_name] def refresh_all_workers(self): old_info = dict(self.worker_info) self.worker_info = {} for w_name, w_info in old_info.items(): if not self.register_worker(w_name, w_info.check_heart_beat, None): logger.info(f"Remove stale worker: {w_name}") def list_models(self): model_names = set() for w_name, w_info in self.worker_info.items(): model_names.update(w_info.model_names) return list(model_names) def get_worker_address(self, model_name: str): if self.dispatch_method == DispatchMethod.LOTTERY: worker_names = [] worker_speeds = [] for w_name, w_info in self.worker_info.items(): if model_name in w_info.model_names: worker_names.append(w_name) worker_speeds.append(w_info.speed) worker_speeds = np.array(worker_speeds, dtype=np.float32) norm = np.sum(worker_speeds) if norm < 1e-4: return "" worker_speeds = worker_speeds / norm if True: # Directly return address pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds) worker_name = worker_names[pt] return worker_name # Check status before returning while True: pt = np.random.choice(np.arange(len(worker_names)), p=worker_speeds) worker_name = worker_names[pt] if self.get_worker_status(worker_name): break else: self.remove_worker(worker_name) worker_speeds[pt] = 0 norm = np.sum(worker_speeds) if norm < 1e-4: return "" worker_speeds = worker_speeds / norm continue return worker_name elif self.dispatch_method == DispatchMethod.SHORTEST_QUEUE: worker_names = [] worker_qlen = [] for w_name, w_info in self.worker_info.items(): if model_name in w_info.model_names: worker_names.append(w_name) worker_qlen.append(w_info.queue_length / w_info.speed) if len(worker_names) == 0: return "" min_index = np.argmin(worker_qlen) w_name = worker_names[min_index] self.worker_info[w_name].queue_length += 1 logger.info(f"names: {worker_names}, queue_lens: {worker_qlen}, ret: {w_name}") return w_name else: raise ValueError(f"Invalid dispatch method: {self.dispatch_method}") def receive_heart_beat(self, worker_name: str, queue_length: int): if worker_name not in self.worker_info: logger.info(f"Receive unknown heart beat. {worker_name}") return False self.worker_info[worker_name].queue_length = queue_length self.worker_info[worker_name].last_heart_beat = time.time() logger.info(f"Receive heart beat. {worker_name}") return True def remove_stable_workers_by_expiration(self): expire = time.time() - CONTROLLER_HEART_BEAT_EXPIRATION to_delete = [] for worker_name, w_info in self.worker_info.items(): if w_info.check_heart_beat and w_info.last_heart_beat < expire: to_delete.append(worker_name) for worker_name in to_delete: self.remove_worker(worker_name) def worker_api_generate_stream(self, params): worker_addr = self.get_worker_address(params["model"]) if not worker_addr: logger.info(f"no worker: {params['model']}") ret = {
"text": server_error_msg,
1
2023-12-05 08:02:17+00:00
2k
RobertCsordas/moe_attention
layers/transformer/transformer.py
[ { "identifier": "MultiHeadAttention", "path": "layers/transformer/multi_head_attention.py", "snippet": "class MultiHeadAttention(AttentionMergeMixin, AbsPosAttentionBase):\n def __init__(self, state_size: int, n_heads: int, dropout: float = 0.1, input_size: Optional[int] = None,\n out...
import torch import torch.nn import torch.nn.functional as F from .multi_head_attention import MultiHeadAttention, AttentionMask from typing import Optional, Callable, Dict, Type, Sequence, Union from dataclasses import dataclass
686
# This file is based on PyTorch's internal implementation ActivationFunction = Callable[[torch.Tensor], torch.Tensor] class TransformerEncoderLayer(torch.nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu, attention_dropout=0): super(TransformerEncoderLayer, self).__init__() self.self_attn = MultiHeadAttention(d_model, nhead, dropout=attention_dropout) self.linear1 = torch.nn.Linear(d_model, dim_feedforward) self.dropout = torch.nn.Dropout(dropout) self.linear2 = torch.nn.Linear(dim_feedforward, d_model) self.norm1 = torch.nn.LayerNorm(d_model) self.norm2 = torch.nn.LayerNorm(d_model) self.dropout1 = torch.nn.Dropout(dropout) self.dropout2 = torch.nn.Dropout(dropout) self.activation = activation self.reset_parameters()
# This file is based on PyTorch's internal implementation ActivationFunction = Callable[[torch.Tensor], torch.Tensor] class TransformerEncoderLayer(torch.nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu, attention_dropout=0): super(TransformerEncoderLayer, self).__init__() self.self_attn = MultiHeadAttention(d_model, nhead, dropout=attention_dropout) self.linear1 = torch.nn.Linear(d_model, dim_feedforward) self.dropout = torch.nn.Dropout(dropout) self.linear2 = torch.nn.Linear(dim_feedforward, d_model) self.norm1 = torch.nn.LayerNorm(d_model) self.norm2 = torch.nn.LayerNorm(d_model) self.dropout1 = torch.nn.Dropout(dropout) self.dropout2 = torch.nn.Dropout(dropout) self.activation = activation self.reset_parameters()
def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None) -> torch.Tensor:
1
2023-12-13 08:45:02+00:00
2k
riccardomusmeci/mlx-llm
src/mlx_llm/model/_registry.py
[ { "identifier": "phi2", "path": "src/mlx_llm/model/phi2.py", "snippet": "def phi2() -> Phi2:\n return Phi2(\n dim=2560,\n vocab_size=51200,\n n_heads=32,\n n_layers=32,\n rotary_dim=32\n )" }, { "identifier": "llama_2_7B_chat", "path": "src/mlx_llm/mo...
from .phi2 import phi2 from .transformer import ( llama_2_7B_chat, tiny_llama_chat_v06, openhermes_25_mistral_7B, # mistral_7B_instruct_v01, mistral_7B_instruct_v02, e5_mistral_7b_instruct )
756
MODEL_ENTRYPOINTS = { "Phi2": phi2, "LLaMA-2-7B-chat": llama_2_7B_chat, "TinyLlama-1.1B-Chat-v0.6": tiny_llama_chat_v06, # "Mistral-7B-Instruct-v0.1": mistral_7B_instruct_v01,
MODEL_ENTRYPOINTS = { "Phi2": phi2, "LLaMA-2-7B-chat": llama_2_7B_chat, "TinyLlama-1.1B-Chat-v0.6": tiny_llama_chat_v06, # "Mistral-7B-Instruct-v0.1": mistral_7B_instruct_v01,
"Mistral-7B-Instruct-v0.2": mistral_7B_instruct_v02,
4
2023-12-07 16:19:47+00:00
2k
xetdata/xetcache
xetcache/xetmemo_kernel_extension.py
[ { "identifier": "hash_anything", "path": "xetcache/util.py", "snippet": "def hash_anything(x):\n return hashlib.sha256(pickle.dumps(x)).hexdigest()" }, { "identifier": "probe_memo", "path": "xetcache/util.py", "snippet": "def probe_memo(memopath, inputhashstr, key=None):\n \"\"\"\n...
import os import time from .util import hash_anything, probe_memo, store_memo from .config import get_memo_path, get_runtime_threshold from IPython.core.magic import Magics, magics_class, cell_magic
1,389
@magics_class class XMemoMagics(Magics): """Memoization for data science tasks %load_ext xetcache to load the extension """ def __init__(self, *args, **kwargs): print(self.xetmemo.__doc__) memopath = get_memo_path() print(f"Memoizing to {memopath}") super().__init__(*args, **kwargs) @cell_magic def xetmemo(self, line, cell): ''' Usage: %%xetmemo input=v1,v2 output=v3,v4 Caches the specified output variables each time it is called. If called later with the same inputs , the cached value is returned and not reevaluated. This is persistent across Python runs. Any content changes to the input input variables, or cell code will force reevaluation of the cell. Otherwise the outputs will simply be retrieved from the memo. This memo is persistent across Python processes and if XetHub is used see `xetcache.set_xet_project`, can be shared with others. For performance reasons, only functions which take more than 3 seconds (configurable from config.set_runtime_threshold) will be cached. "always=True" can be added to the xetmemo arguments to ignore the runime and to always cache %%xetmemo input=v1,v2 output=v3,v4 always=True Note that inputs can be anything picklable including functions. A key parameter can be added to group the stored objects together. Objects stored with one key will not be retrievable with a different key %%xetmemo input=v1,v2 output=v3,v4 always=True key=experiment1 Also see the `xetcache.xetmemo` decorator for a version that can be used as a function decorator ''' # parse the argument list args = line.strip().split(' ') inputvars = [] outputvars = [] ip = self.shell always = False key = None for arg in args: k, v = arg.split('=') if k == 'input': inputvars = [x.strip() for x in v.split(',')] elif k == 'output': outputvars = [x.strip() for x in v.split(',')] elif k == 'always': always = (v.strip() == 'True') elif k == 'key': key = v.strip() else: raise RuntimeError(f'Unexpected xmemo key type {k}') # we hash the xetmemo line, and the contents of the cell # and all the variables in the input line
@magics_class class XMemoMagics(Magics): """Memoization for data science tasks %load_ext xetcache to load the extension """ def __init__(self, *args, **kwargs): print(self.xetmemo.__doc__) memopath = get_memo_path() print(f"Memoizing to {memopath}") super().__init__(*args, **kwargs) @cell_magic def xetmemo(self, line, cell): ''' Usage: %%xetmemo input=v1,v2 output=v3,v4 Caches the specified output variables each time it is called. If called later with the same inputs , the cached value is returned and not reevaluated. This is persistent across Python runs. Any content changes to the input input variables, or cell code will force reevaluation of the cell. Otherwise the outputs will simply be retrieved from the memo. This memo is persistent across Python processes and if XetHub is used see `xetcache.set_xet_project`, can be shared with others. For performance reasons, only functions which take more than 3 seconds (configurable from config.set_runtime_threshold) will be cached. "always=True" can be added to the xetmemo arguments to ignore the runime and to always cache %%xetmemo input=v1,v2 output=v3,v4 always=True Note that inputs can be anything picklable including functions. A key parameter can be added to group the stored objects together. Objects stored with one key will not be retrievable with a different key %%xetmemo input=v1,v2 output=v3,v4 always=True key=experiment1 Also see the `xetcache.xetmemo` decorator for a version that can be used as a function decorator ''' # parse the argument list args = line.strip().split(' ') inputvars = [] outputvars = [] ip = self.shell always = False key = None for arg in args: k, v = arg.split('=') if k == 'input': inputvars = [x.strip() for x in v.split(',')] elif k == 'output': outputvars = [x.strip() for x in v.split(',')] elif k == 'always': always = (v.strip() == 'True') elif k == 'key': key = v.strip() else: raise RuntimeError(f'Unexpected xmemo key type {k}') # we hash the xetmemo line, and the contents of the cell # and all the variables in the input line
inputhashes = [hash_anything(line), hash_anything(cell)]
0
2023-12-05 21:59:08+00:00
2k
open-compass/T-Eval
teval/evaluators/planning_evaluator.py
[ { "identifier": "format_load", "path": "teval/utils/format_load.py", "snippet": "def format_load(raw_data: str, start_character: str = '', end_character: str = ''):\n \"\"\"Format the raw data into the format that can be evaluated.\n\n Args:\n raw_data (str): The raw data.\n start_ch...
from collections import defaultdict from numpy import mean from mmengine import load from teval.utils.format_load import format_load from tqdm import tqdm from teval.schema import ResponseDataSample from sentence_transformers import SentenceTransformer, util import json import itertools import networkx as nx import numpy as np import copy import json import re
1,293
# import evaluate class PlanningEvaluator: """Planning Evaluation Args: dataset_path(str): File path of evaluation dataset name_weight(float): the weight of action_name in bert_score match, default = 0.9 args_weight(float): the weight of action_args in bert_score match, default = 0.1 match_threshold(float): the threshold of matching match_strategy(str): matching method, can choose 'bertscore' or 'permutation' bert_score_model(str): the bert_score model for sentence similarity, default = "all-mpnet-base-v2". Refer to https://www.sbert.net/docs/pretrained_models.html for more models. """ def __init__( self, dataset_path: str, name_weight = 0.75, args_weight = 0.25, match_threshold = 0.7, match_strategy: str = 'bertscore', # ["bertscore", "permutation"] bert_score_model: str = "all-mpnet-base-v2", # ['thenlper/gte-large-zh', 'all-mpnet-base-v2'] default_prompt_type: str = 'json', # ["json", "ReWOO"] **kwargs, ) -> None: self.bert_score_model = bert_score_model print(bert_score_model) self.dataset_path = dataset_path self.name_weight = name_weight self.args_weight = args_weight self.match_threshold = match_threshold self.default_prompt_type = default_prompt_type # ["json", "ReWOO"] assert match_strategy in ["bertscore", "permutation"], f"match strategy must in [\"bertscore\", \"permutation\"], but get {match_strategy}" self.match_strategy = match_strategy self.valid_data_count = None self.sentence_model = SentenceTransformer(self.bert_score_model) def _load_dataset(self): self.dataset = [] dataset = load(self.dataset_path) total_error = 0 total_count = 0 for key in dataset.keys(): datum = dataset[key] data_sample, error = self._process_response(datum) total_error += error total_count += 1 self.dataset.append( dict(response_data_sample=data_sample)) self.num_samples = len(self.dataset) print("total_data_count:", total_count, "valid_data_count:", total_count - total_error) self.valid_data_count = total_count - total_error def format_load(self, data): r''' ensure evaluator can work correctly under any data input ''' try: json_format = format_load(data, start_character='[', end_character=']') except Exception as e: return [] if type(json_format) != list: return [] for i in range(len(json_format)): try: json_format[i] = { 'name': str(json_format[i]['name']), 'id': int(json_format[i]['id']), 'args': str(json_format[i]['args']) } except Exception as e: return [] return json_format def _process_response( self, datum,
# import evaluate class PlanningEvaluator: """Planning Evaluation Args: dataset_path(str): File path of evaluation dataset name_weight(float): the weight of action_name in bert_score match, default = 0.9 args_weight(float): the weight of action_args in bert_score match, default = 0.1 match_threshold(float): the threshold of matching match_strategy(str): matching method, can choose 'bertscore' or 'permutation' bert_score_model(str): the bert_score model for sentence similarity, default = "all-mpnet-base-v2". Refer to https://www.sbert.net/docs/pretrained_models.html for more models. """ def __init__( self, dataset_path: str, name_weight = 0.75, args_weight = 0.25, match_threshold = 0.7, match_strategy: str = 'bertscore', # ["bertscore", "permutation"] bert_score_model: str = "all-mpnet-base-v2", # ['thenlper/gte-large-zh', 'all-mpnet-base-v2'] default_prompt_type: str = 'json', # ["json", "ReWOO"] **kwargs, ) -> None: self.bert_score_model = bert_score_model print(bert_score_model) self.dataset_path = dataset_path self.name_weight = name_weight self.args_weight = args_weight self.match_threshold = match_threshold self.default_prompt_type = default_prompt_type # ["json", "ReWOO"] assert match_strategy in ["bertscore", "permutation"], f"match strategy must in [\"bertscore\", \"permutation\"], but get {match_strategy}" self.match_strategy = match_strategy self.valid_data_count = None self.sentence_model = SentenceTransformer(self.bert_score_model) def _load_dataset(self): self.dataset = [] dataset = load(self.dataset_path) total_error = 0 total_count = 0 for key in dataset.keys(): datum = dataset[key] data_sample, error = self._process_response(datum) total_error += error total_count += 1 self.dataset.append( dict(response_data_sample=data_sample)) self.num_samples = len(self.dataset) print("total_data_count:", total_count, "valid_data_count:", total_count - total_error) self.valid_data_count = total_count - total_error def format_load(self, data): r''' ensure evaluator can work correctly under any data input ''' try: json_format = format_load(data, start_character='[', end_character=']') except Exception as e: return [] if type(json_format) != list: return [] for i in range(len(json_format)): try: json_format[i] = { 'name': str(json_format[i]['name']), 'id': int(json_format[i]['id']), 'args': str(json_format[i]['args']) } except Exception as e: return [] return json_format def _process_response( self, datum,
) -> ResponseDataSample:
1
2023-12-10 05:18:46+00:00
2k
rabilrbl/gemini-pro-bot
gemini_pro_bot/handlers.py
[ { "identifier": "model", "path": "gemini_pro_bot/llm.py", "snippet": "SAFETY_SETTINGS = {\n HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: HarmBlockThreshold.BLOCK_NONE,\n HarmCategory.HARM_CATEGORY_HARASSMENT: HarmBlockThreshold.BLOCK_NONE,\n HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: HarmBl...
import asyncio import PIL.Image as load_image from gemini_pro_bot.llm import model, img_model from google.generativeai.types.generation_types import ( StopCandidateException, BlockedPromptException, ) from telegram import Update from telegram.ext import ( ContextTypes, ) from telegram.error import NetworkError, BadRequest from telegram.constants import ChatAction, ParseMode from gemini_pro_bot.html_format import format_message from io import BytesIO
1,017
def new_chat(context: ContextTypes.DEFAULT_TYPE) -> None: context.chat_data["chat"] = model.start_chat() async def start(update: Update, _: ContextTypes.DEFAULT_TYPE) -> None: """Send a message when the command /start is issued.""" user = update.effective_user await update.message.reply_html( f"Hi {user.mention_html()}!\n\nStart sending messages with me to generate a response.\n\nSend /new to start a new chat session.", # reply_markup=ForceReply(selective=True), ) async def help_command(update: Update, _: ContextTypes.DEFAULT_TYPE) -> None: """Send a message when the command /help is issued.""" help_text = """ Basic commands: /start - Start the bot /help - Get help. Shows this message Chat commands: /new - Start a new chat session (model will forget previously generated messages) Send a message to the bot to generate a response. """ await update.message.reply_text(help_text) async def newchat_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """Start a new chat session.""" init_msg = await update.message.reply_text( text="Starting new chat session...", reply_to_message_id=update.message.message_id, ) new_chat(context) await init_msg.edit_text("New chat session started.") # Define the function that will handle incoming messages async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """Handles incoming text messages from users. Checks if a chat session exists for the user, initializes a new session if not. Sends the user's message to the chat session to generate a response. Streams the response back to the user, handling any errors. """ if context.chat_data.get("chat") is None: new_chat(context) text = update.message.text init_msg = await update.message.reply_text( text="Generating...", reply_to_message_id=update.message.message_id ) await update.message.chat.send_action(ChatAction.TYPING) # Generate a response using the text-generation pipeline chat = context.chat_data.get("chat") # Get the chat session for this chat response = None try: response = await chat.send_message_async( text, stream=True ) # Generate a response except StopCandidateException as sce: print("Prompt: ", text, " was stopped. User: ", update.message.from_user) print(sce) await init_msg.edit_text("The model unexpectedly stopped generating.") chat.rewind() # Rewind the chat session to prevent the bot from getting stuck return except BlockedPromptException as bpe: print("Prompt: ", text, " was blocked. User: ", update.message.from_user) print(bpe) await init_msg.edit_text("Blocked due to safety concerns.") if response: # Resolve the response to prevent the chat session from getting stuck await response.resolve() return full_plain_message = "" # Stream the responses async for chunk in response: try: if chunk.text: full_plain_message += chunk.text
def new_chat(context: ContextTypes.DEFAULT_TYPE) -> None: context.chat_data["chat"] = model.start_chat() async def start(update: Update, _: ContextTypes.DEFAULT_TYPE) -> None: """Send a message when the command /start is issued.""" user = update.effective_user await update.message.reply_html( f"Hi {user.mention_html()}!\n\nStart sending messages with me to generate a response.\n\nSend /new to start a new chat session.", # reply_markup=ForceReply(selective=True), ) async def help_command(update: Update, _: ContextTypes.DEFAULT_TYPE) -> None: """Send a message when the command /help is issued.""" help_text = """ Basic commands: /start - Start the bot /help - Get help. Shows this message Chat commands: /new - Start a new chat session (model will forget previously generated messages) Send a message to the bot to generate a response. """ await update.message.reply_text(help_text) async def newchat_command(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """Start a new chat session.""" init_msg = await update.message.reply_text( text="Starting new chat session...", reply_to_message_id=update.message.message_id, ) new_chat(context) await init_msg.edit_text("New chat session started.") # Define the function that will handle incoming messages async def handle_message(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None: """Handles incoming text messages from users. Checks if a chat session exists for the user, initializes a new session if not. Sends the user's message to the chat session to generate a response. Streams the response back to the user, handling any errors. """ if context.chat_data.get("chat") is None: new_chat(context) text = update.message.text init_msg = await update.message.reply_text( text="Generating...", reply_to_message_id=update.message.message_id ) await update.message.chat.send_action(ChatAction.TYPING) # Generate a response using the text-generation pipeline chat = context.chat_data.get("chat") # Get the chat session for this chat response = None try: response = await chat.send_message_async( text, stream=True ) # Generate a response except StopCandidateException as sce: print("Prompt: ", text, " was stopped. User: ", update.message.from_user) print(sce) await init_msg.edit_text("The model unexpectedly stopped generating.") chat.rewind() # Rewind the chat session to prevent the bot from getting stuck return except BlockedPromptException as bpe: print("Prompt: ", text, " was blocked. User: ", update.message.from_user) print(bpe) await init_msg.edit_text("Blocked due to safety concerns.") if response: # Resolve the response to prevent the chat session from getting stuck await response.resolve() return full_plain_message = "" # Stream the responses async for chunk in response: try: if chunk.text: full_plain_message += chunk.text
message = format_message(full_plain_message)
1
2023-12-14 16:57:14+00:00
2k
nox-410/tvm.tl
python/tvm/target/x86.py
[ { "identifier": "register_func", "path": "python/tvm/_ffi/registry.py", "snippet": "def register_func(func_name, f=None, override=False):\n \"\"\"Register global function\n\n Parameters\n ----------\n func_name : str or function\n The function name\n\n f : function, optional\n ...
from .._ffi import register_func from .codegen import target_has_features
940
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Common x86 related utilities""" @register_func("tvm.topi.x86.utils.get_simd_32bit_lanes") def get_simd_32bit_lanes(): """X86 SIMD optimal vector length lookup. Parameters ---------- Returns ------- vec_len : int The optimal vector length of CPU from the global context target. """ vec_len = 4
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Common x86 related utilities""" @register_func("tvm.topi.x86.utils.get_simd_32bit_lanes") def get_simd_32bit_lanes(): """X86 SIMD optimal vector length lookup. Parameters ---------- Returns ------- vec_len : int The optimal vector length of CPU from the global context target. """ vec_len = 4
if target_has_features(["avx512bw", "avx512f"]):
1
2023-12-14 02:37:47+00:00
2k
kakaobrain/honeybee
tasks/mme/mme_dataset.py
[ { "identifier": "TaskDataset", "path": "tasks/base_dataset.py", "snippet": "class TaskDataset(Dataset):\n def build_prompt(self, question, image_prompt=\"Human: <image>\"):\n prompt = f\"\"\"{SYSTEM}\n{image_prompt}\nHuman: {question}\nAI: \"\"\"\n return prompt\n\n def collate_fn(se...
from pathlib import Path from PIL import Image from tasks.base_dataset import TaskDataset, Example import utils
763
EVAL_TYPE_DICT = { "Perception": ["existence", "count", "position", "color", "posters", "celebrity", "scene", "landmark", "artwork", "OCR"], "Cognition": ["commonsense_reasoning", "numerical_calculation", "text_translation", "code_reasoning"] } def load_subset(dir_path): root = Path(dir_path) dset_name = root.name imgpaths = list(root.glob("**/*.jpg")) + list(root.glob("**/*.png")) imgpaths = sorted(imgpaths) def get_txtpath(imgpath): txtpath = imgpath.with_suffix(".txt") txtname = txtpath.name if txtpath.exists(): return txtpath if imgpath.parent.name == "images": return imgpath.parent.parent / "questions_answers_YN" / txtname raise ValueError(f"Cannot find txt path from image path `{imgpath}`") data = [] for imgpath in imgpaths: txtpath = get_txtpath(imgpath) with txtpath.open(encoding="utf-8") as f: for line in f: q, a = line.strip().split("\t") data.append((dset_name, imgpath, q, a)) return data class MMEDataset(TaskDataset): def __init__(self, root, processor): root = Path(root) data = [] for subset in EVAL_TYPE_DICT["Perception"] + EVAL_TYPE_DICT["Cognition"]: data += load_subset(root / subset) utils.print_rank_0(f"MME total dataset size = {len(data)}") assert len(data) == 2374 self.data = data self.processor = processor def __len__(self): return len(self.data) def __getitem__(self, index): dset_name, imgpath, question, answer = self.data[index] prompt = f"Answer the question using a single word or phrase. {question}" prompt = self.build_prompt(prompt) imgid = imgpath.name image = Image.open(imgpath) data = { "question": question, "answer": answer, "image_path": str(imgpath), "image_id": imgid, "dataset_name": dset_name, }
EVAL_TYPE_DICT = { "Perception": ["existence", "count", "position", "color", "posters", "celebrity", "scene", "landmark", "artwork", "OCR"], "Cognition": ["commonsense_reasoning", "numerical_calculation", "text_translation", "code_reasoning"] } def load_subset(dir_path): root = Path(dir_path) dset_name = root.name imgpaths = list(root.glob("**/*.jpg")) + list(root.glob("**/*.png")) imgpaths = sorted(imgpaths) def get_txtpath(imgpath): txtpath = imgpath.with_suffix(".txt") txtname = txtpath.name if txtpath.exists(): return txtpath if imgpath.parent.name == "images": return imgpath.parent.parent / "questions_answers_YN" / txtname raise ValueError(f"Cannot find txt path from image path `{imgpath}`") data = [] for imgpath in imgpaths: txtpath = get_txtpath(imgpath) with txtpath.open(encoding="utf-8") as f: for line in f: q, a = line.strip().split("\t") data.append((dset_name, imgpath, q, a)) return data class MMEDataset(TaskDataset): def __init__(self, root, processor): root = Path(root) data = [] for subset in EVAL_TYPE_DICT["Perception"] + EVAL_TYPE_DICT["Cognition"]: data += load_subset(root / subset) utils.print_rank_0(f"MME total dataset size = {len(data)}") assert len(data) == 2374 self.data = data self.processor = processor def __len__(self): return len(self.data) def __getitem__(self, index): dset_name, imgpath, question, answer = self.data[index] prompt = f"Answer the question using a single word or phrase. {question}" prompt = self.build_prompt(prompt) imgid = imgpath.name image = Image.open(imgpath) data = { "question": question, "answer": answer, "image_path": str(imgpath), "image_id": imgid, "dataset_name": dset_name, }
ex = Example(index, image, prompt, data)
1
2023-12-06 14:48:41+00:00
2k
NVlabs/RADIO
radio/hf_model.py
[ { "identifier": "eradio", "path": "radio/eradio_model.py", "snippet": "@register_model\ndef eradio(pretrained=False, **kwargs):\n return fastervit2_large_fullres_ws16(pretrained=pretrained, **kwargs)" }, { "identifier": "create_model_from_args", "path": "radio/radio_model.py", "snippe...
from collections import namedtuple from typing import Optional from timm.models import VisionTransformer from transformers import PretrainedConfig, PreTrainedModel from .eradio_model import eradio from .radio_model import create_model_from_args from .radio_model import RADIOModel as RADIOModelBase from .input_conditioner import get_default_conditioner, InputConditioner import torch
1,421
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class RADIOConfig(PretrainedConfig): """Pretrained Hugging Face configuration for RADIO models.""" def __init__( self, args: Optional[dict] = None, version: Optional[str] = "v1", return_summary: Optional[bool] = True, return_spatial_features: Optional[bool] = True, **kwargs, ): self.args = args self.version = version self.return_summary = return_summary self.return_spatial_features = return_spatial_features super().__init__(**kwargs) class RADIOModel(PreTrainedModel): """Pretrained Hugging Face model for RADIO. This class inherits from PreTrainedModel, which provides HuggingFace's functionality for loading and saving models. """ config_class = RADIOConfig def __init__(self, config): super().__init__(config) RADIOArgs = namedtuple("RADIOArgs", config.args.keys()) args = RADIOArgs(**config.args) self.config = config
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class RADIOConfig(PretrainedConfig): """Pretrained Hugging Face configuration for RADIO models.""" def __init__( self, args: Optional[dict] = None, version: Optional[str] = "v1", return_summary: Optional[bool] = True, return_spatial_features: Optional[bool] = True, **kwargs, ): self.args = args self.version = version self.return_summary = return_summary self.return_spatial_features = return_spatial_features super().__init__(**kwargs) class RADIOModel(PreTrainedModel): """Pretrained Hugging Face model for RADIO. This class inherits from PreTrainedModel, which provides HuggingFace's functionality for loading and saving models. """ config_class = RADIOConfig def __init__(self, config): super().__init__(config) RADIOArgs = namedtuple("RADIOArgs", config.args.keys()) args = RADIOArgs(**config.args) self.config = config
model = create_model_from_args(args)
1
2023-12-08 19:53:01+00:00
2k
taikinman/langrila
src/langrila/database/chroma.py
[ { "identifier": "BaseModule", "path": "src/langrila/base.py", "snippet": "class BaseModule(ABC):\n @abstractmethod\n def run(self, *args, **kwargs):\n raise NotImplementedError\n\n async def arun(self, *args, **kwargs):\n raise NotImplementedError\n\n def stream(self, *args, **...
import sys import chromadb from pathlib import Path from typing import Optional from ..base import BaseModule from ..result import RetrievalResult from ..usage import Usage
1,570
python_version = sys.version_info # NOTE : Python version < 3.10 is bundled by lower version sqlite client, so in that case sqlite modules is override # https://docs.trychroma.com/troubleshooting#sqlite __import__("pysqlite3") sys.modules["sqlite3"] = sys.modules.pop("pysqlite3") class ChromaCollectionModule(BaseModule): def __init__( self, persistence_directory: str, collection_name: str, embedder: Optional[BaseModule] = None, ): self.embedder = embedder self.persistence_directory = Path(persistence_directory) self.collection_name = collection_name def run( self, documents: list[str], metadatas: Optional[list[dict[str, str]]]=None, embeddings: Optional[list[list[float]]] = None, ) -> None: if embeddings is None: if self.embedder is not None: embeddings = self.embedder(documents).embeddings else: raise AttributeError( "attribute embedder must be the instance of the class inheriting BaseModule." ) ids = [str(i) for i in range(len(documents))] client = chromadb.PersistentClient(path=self.persistence_directory.as_posix()) # recreation collection try: client.delete_collection(name=self.collection_name) except ValueError: pass collection = client.get_or_create_collection( name=self.collection_name, metadata={"hnsw:space": "cosine"} ) collection.upsert(ids=ids, embeddings=embeddings, documents=documents, metadatas=metadatas) def as_retriever( self, n_results: int = 4, threshold_similarity: float = 0.8, return_only_relevant_docs: bool = False ) -> "ChromaRetrievalModule": return ChromaRetrievalModule( embedder=self.embedder, persistence_directory=self.persistence_directory, collection_name=self.collection_name, n_results=n_results, threshold_similarity=threshold_similarity, return_only_relevant_docs=return_only_relevant_docs, ) class ChromaRetrievalModule(BaseModule): def __init__( self, embedder: BaseModule, persistence_directory: str, collection_name: str, n_results: int = 4, threshold_similarity: float = 0.8, return_only_relevant_docs: bool = False, ): assert isinstance( embedder, BaseModule ), "embedder must be the instance of the class inheriting BaseModule." self.embedder = embedder self.n_results = n_results self.threshold_similarity = threshold_similarity self.persistence_directory = persistence_directory self.collection_name = collection_name self.return_only_relevant_docs = return_only_relevant_docs self.n_results = n_results def run( self, query: str, where: Optional[dict] = None, ) -> dict: query_embed = self.embedder(query) client = chromadb.PersistentClient(path=self.persistence_directory.as_posix()) collection = client.get_collection(name=self.collection_name) retrieved = collection.query( query_embeddings=query_embed.embeddings[0], n_results=self.n_results, where=where ) _results = self.filter_with_distance(retrieved) results = RetrievalResult( ids=_results["ids"], documents=_results["documents"], metadatas=_results["metadatas"], similarities=_results["similarities"],
python_version = sys.version_info # NOTE : Python version < 3.10 is bundled by lower version sqlite client, so in that case sqlite modules is override # https://docs.trychroma.com/troubleshooting#sqlite __import__("pysqlite3") sys.modules["sqlite3"] = sys.modules.pop("pysqlite3") class ChromaCollectionModule(BaseModule): def __init__( self, persistence_directory: str, collection_name: str, embedder: Optional[BaseModule] = None, ): self.embedder = embedder self.persistence_directory = Path(persistence_directory) self.collection_name = collection_name def run( self, documents: list[str], metadatas: Optional[list[dict[str, str]]]=None, embeddings: Optional[list[list[float]]] = None, ) -> None: if embeddings is None: if self.embedder is not None: embeddings = self.embedder(documents).embeddings else: raise AttributeError( "attribute embedder must be the instance of the class inheriting BaseModule." ) ids = [str(i) for i in range(len(documents))] client = chromadb.PersistentClient(path=self.persistence_directory.as_posix()) # recreation collection try: client.delete_collection(name=self.collection_name) except ValueError: pass collection = client.get_or_create_collection( name=self.collection_name, metadata={"hnsw:space": "cosine"} ) collection.upsert(ids=ids, embeddings=embeddings, documents=documents, metadatas=metadatas) def as_retriever( self, n_results: int = 4, threshold_similarity: float = 0.8, return_only_relevant_docs: bool = False ) -> "ChromaRetrievalModule": return ChromaRetrievalModule( embedder=self.embedder, persistence_directory=self.persistence_directory, collection_name=self.collection_name, n_results=n_results, threshold_similarity=threshold_similarity, return_only_relevant_docs=return_only_relevant_docs, ) class ChromaRetrievalModule(BaseModule): def __init__( self, embedder: BaseModule, persistence_directory: str, collection_name: str, n_results: int = 4, threshold_similarity: float = 0.8, return_only_relevant_docs: bool = False, ): assert isinstance( embedder, BaseModule ), "embedder must be the instance of the class inheriting BaseModule." self.embedder = embedder self.n_results = n_results self.threshold_similarity = threshold_similarity self.persistence_directory = persistence_directory self.collection_name = collection_name self.return_only_relevant_docs = return_only_relevant_docs self.n_results = n_results def run( self, query: str, where: Optional[dict] = None, ) -> dict: query_embed = self.embedder(query) client = chromadb.PersistentClient(path=self.persistence_directory.as_posix()) collection = client.get_collection(name=self.collection_name) retrieved = collection.query( query_embeddings=query_embed.embeddings[0], n_results=self.n_results, where=where ) _results = self.filter_with_distance(retrieved) results = RetrievalResult( ids=_results["ids"], documents=_results["documents"], metadatas=_results["metadatas"], similarities=_results["similarities"],
usage=Usage(
2
2023-12-10 09:42:35+00:00
2k
Open-All-Scale-Causal-Engine/OpenASCE
openasce/inference/learner/dml_test.py
[ { "identifier": "DML", "path": "openasce/inference/learner/dml.py", "snippet": "class DML(_DML, InferenceModel):\n def fit(\n self,\n *,\n X: Iterable[np.ndarray],\n Y: Iterable[np.ndarray],\n T: Iterable[np.ndarray],\n **kwargs\n ):\n \"\"\"Feed th...
from unittest import TestCase from econml.sklearn_extensions.linear_model import WeightedLassoCVWrapper from sklearn.linear_model import LassoCV from openasce.inference.learner.dml import DML from tests.datasets.ihdp_data import get_ihdp_data from openasce.utils.logger import logger import numpy as np
1,324
# Copyright 2023 AntGroup CO., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. class TestDML(TestCase): def setUp(self) -> None: self.train_data, self.test_data = get_ihdp_data() np.random.seed(12) return super().setUp() def test_dml(self): np.random.seed(12) learner = DML( model_y=WeightedLassoCVWrapper(), model_t=WeightedLassoCVWrapper(), model_final=LassoCV(cv=3), categories=[0, 1], ) learner.fit( X=self.train_data[self.train_data.columns[5:]] .to_numpy() .astype(np.float32), Y=self.train_data["y_factual"], T=self.train_data["treatment"], ) learner.estimate( X=self.test_data[self.train_data.columns[5:]].to_numpy().astype(np.float32) ) avg = np.average(learner.get_result())
# Copyright 2023 AntGroup CO., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. class TestDML(TestCase): def setUp(self) -> None: self.train_data, self.test_data = get_ihdp_data() np.random.seed(12) return super().setUp() def test_dml(self): np.random.seed(12) learner = DML( model_y=WeightedLassoCVWrapper(), model_t=WeightedLassoCVWrapper(), model_final=LassoCV(cv=3), categories=[0, 1], ) learner.fit( X=self.train_data[self.train_data.columns[5:]] .to_numpy() .astype(np.float32), Y=self.train_data["y_factual"], T=self.train_data["treatment"], ) learner.estimate( X=self.test_data[self.train_data.columns[5:]].to_numpy().astype(np.float32) ) avg = np.average(learner.get_result())
logger.info(f"dml result: {avg}")
2
2023-12-06 05:54:36+00:00
2k
latorc/Wechat-AI-Assistant
chatbot.py
[ { "identifier": "WcfWrapper", "path": "wcf_wrapper.py", "snippet": "class WcfWrapper:\r\n def __init__(self) -> None:\r\n def __del__(self):\r\n def msg_preview_str(self, msg:WxMsg) -> str:\r\n def wxid_to_nickname(self, wxid) -> str:\r\n def wxid_to_wxcode(self, wxid) -> str:\r\n def ...
import queue import re import config import common import openai_wrapper import preset from typing import Tuple from wcf_wrapper import WcfWrapper, ContentType from wcferry import WxMsg from config import AdminCmd from common import ContentType, ChatMsg
1,525
class Chatbot(): """ 管理微信机器人逻辑. 管理与微信客户端 (如Wechat Ferry) 和 AI 客户端 (如 OpenAI )的交互逻辑 """ def __init__(self, config: config.Config, wcfw: WcfWrapper, oaiw: openai_wrapper.OpenAIWrapper) -> None: """ 初始化 args: config (Config): Config对象 wcfw (WcfWrapper): Wechat Ferry Wrapper对象 oaiw (OpenAIWrapper): AI Wrapper对象 """ self.config = config self.wcfw = wcfw self.openai_wrapper = oaiw self.chat_presets:dict[str, preset.Preset] = {} # 每个对话的预设 {roomid或wxid: 预设} def start_main_loop(self) -> None: """ 主循环, 接收并处理微信消息. 该函数阻塞进程. """ while self.wcfw.wcf.is_receiving_msg(): try: msg:WxMsg = self.wcfw.get_msg() note = f"收到消息 {self.wcfw.msg_preview_str(msg)}" common.logger().info(note) except queue.Empty: continue # 无消息,继续 except Exception as e: common.logger().error("接收微信消息错误: %s", common.error_trace(e)) try: self.run_wxmsg(msg) except Exception as e: common.logger().error("处理消息错误:%s", common.error_trace(e)) def run_wxmsg(self, msg:WxMsg): """ 读取并处理一条消息 args: msg (WxMsg): 消息对象. 群号: msg.roomid, 发送者微信ID: msg.sender, 消息内容: msg.content """ content = self._filter_wxmsg(msg) if content is None: return # 确定回复对象 if msg.from_group(): receiver = msg.roomid if msg.from_self(): at_list = "" else: at_list = msg.sender else: #单聊 receiver = msg.sender at_list = "" # 发送者是管理员, 并且是命令时, 处理命令并直接返回 if self.wcfw.wxid_to_wxcode(msg.sender) in self.config.admins: cmd = self._match_admin_cmd(content) if cmd: try: self.process_admin_cmd(content, receiver, at_list) except Exception as e: common.logger().error("执行管理员命令错误: %s",common.error_trace(e)) self.wcfw.send_text(f"执行管理员命令'{content}'发生错误", receiver, at_list) return ### 调用 AI 处理消息 # 回调函数, 处理 AI 返回消息
class Chatbot(): """ 管理微信机器人逻辑. 管理与微信客户端 (如Wechat Ferry) 和 AI 客户端 (如 OpenAI )的交互逻辑 """ def __init__(self, config: config.Config, wcfw: WcfWrapper, oaiw: openai_wrapper.OpenAIWrapper) -> None: """ 初始化 args: config (Config): Config对象 wcfw (WcfWrapper): Wechat Ferry Wrapper对象 oaiw (OpenAIWrapper): AI Wrapper对象 """ self.config = config self.wcfw = wcfw self.openai_wrapper = oaiw self.chat_presets:dict[str, preset.Preset] = {} # 每个对话的预设 {roomid或wxid: 预设} def start_main_loop(self) -> None: """ 主循环, 接收并处理微信消息. 该函数阻塞进程. """ while self.wcfw.wcf.is_receiving_msg(): try: msg:WxMsg = self.wcfw.get_msg() note = f"收到消息 {self.wcfw.msg_preview_str(msg)}" common.logger().info(note) except queue.Empty: continue # 无消息,继续 except Exception as e: common.logger().error("接收微信消息错误: %s", common.error_trace(e)) try: self.run_wxmsg(msg) except Exception as e: common.logger().error("处理消息错误:%s", common.error_trace(e)) def run_wxmsg(self, msg:WxMsg): """ 读取并处理一条消息 args: msg (WxMsg): 消息对象. 群号: msg.roomid, 发送者微信ID: msg.sender, 消息内容: msg.content """ content = self._filter_wxmsg(msg) if content is None: return # 确定回复对象 if msg.from_group(): receiver = msg.roomid if msg.from_self(): at_list = "" else: at_list = msg.sender else: #单聊 receiver = msg.sender at_list = "" # 发送者是管理员, 并且是命令时, 处理命令并直接返回 if self.wcfw.wxid_to_wxcode(msg.sender) in self.config.admins: cmd = self._match_admin_cmd(content) if cmd: try: self.process_admin_cmd(content, receiver, at_list) except Exception as e: common.logger().error("执行管理员命令错误: %s",common.error_trace(e)) self.wcfw.send_text(f"执行管理员命令'{content}'发生错误", receiver, at_list) return ### 调用 AI 处理消息 # 回调函数, 处理 AI 返回消息
def callback_msg(msg:ChatMsg) -> int:
3
2023-12-07 12:17:15+00:00
2k
tensorsense/faceflow
params/datamodule.py
[ { "identifier": "LocalNaturalDatasetCfg", "path": "lib/data/cfg/local.py", "snippet": "class LocalNaturalDatasetCfg:\n name: str\n root: str\n labels_filename: str = \"au.csv\"\n crops_dir: str = \"crops\"\n aus: List[str] = field(\n default_factory=lambda: [\n \"AU1\",\...
import albumentations as A import wandb from albumentations.pytorch import ToTensorV2 from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from lib.data.cfg.local import LocalNaturalDatasetCfg from lib.data.datamodules.vanilla import AUDataModule
1,019
project = "disfa" aus = [ "AU1", "AU2", "AU4", "AU5", "AU6", "AU9", "AU12", "AU15", "AU17", "AU20", "AU26", ] TRAIN_LABELED = [
project = "disfa" aus = [ "AU1", "AU2", "AU4", "AU5", "AU6", "AU9", "AU12", "AU15", "AU17", "AU20", "AU26", ] TRAIN_LABELED = [
LocalNaturalDatasetCfg(
0
2023-12-05 13:15:58+00:00
2k
Psivant/femto
femto/fe/atm/_setup.py
[ { "identifier": "OpenMMForceGroup", "path": "femto/md/constants.py", "snippet": "class OpenMMForceGroup(enum.IntEnum):\n \"\"\"Standard force groups to assign to common OpenMM forces to make them easier to\n identify.\"\"\"\n\n BOND = 0\n ANGLE = 1\n DIHEDRAL = 2\n\n NONBONDED = 3\n\n ...
import logging import tempfile import typing import numpy import openmm import openmm.app import openmm.unit import parmed import scipy.spatial.distance import femto.fe.reference import femto.md.rest import femto.md.restraints import femto.md.solvate import femto.md.system import femto.md.utils.openmm import femto.fe.atm import femto.fe.atm._utils from femto.md.constants import OpenMMForceGroup, OpenMMForceName
1,395
_LOGGER = logging.getLogger(__name__) def select_displacement( receptor: parmed.amber.AmberParm, ligand_1: parmed.amber.AmberParm, ligand_2: parmed.amber.AmberParm | None, distance: openmm.unit.Quantity, ) -> openmm.unit.Quantity: """Attempts to automatically select a displacement vector for the ligands. Args: receptor: The receptor. ligand_1: The first ligand positioned in the binding site. ligand_2: The second ligand positioned in the binding site. distance: The distance to translate ligands along the displacement vector by. Returns: The displacement vector. """ ligand_coords = numpy.vstack( [ligand_1.coordinates] + ([] if ligand_2 is None else [ligand_2.coordinates]) ) receptor_coords = receptor.coordinates directions = numpy.array( [ [-1.0, -1.0, -1.0], [+1.0, -1.0, -1.0], [+1.0, +1.0, -1.0], [-1.0, +1.0, -1.0], [-1.0, -1.0, +1.0], [+1.0, -1.0, +1.0], [+1.0, +1.0, +1.0], [-1.0, +1.0, +1.0], ] ) directions /= numpy.linalg.norm(directions, axis=1, keepdims=True) closest_distances = [] for direction in directions: displacement = direction * distance.value_in_unit(openmm.unit.angstrom) offset_coords = ligand_coords + displacement distances = scipy.spatial.distance.cdist(offset_coords, receptor_coords) closest_distances.append(distances.min()) direction = directions[numpy.argmax(closest_distances)] return direction.flatten() * distance def _offset_ligand( ligand: parmed.Structure, offset: openmm.unit.Quantity ) -> parmed.Structure: """Offsets the coordinates of the specified ligand by a specified amount. Args: ligand: The ligand to offset. offset: The amount to offset the ligand by. Returns: The offset ligand. """ # we copy in this strange way because parmed doesn't # copy all attrs correctly when using copy.deepycopy with tempfile.TemporaryDirectory() as tmpdir: ligand.save(f"{tmpdir}/ligand.parm7") ligand.save(f"{tmpdir}/ligand.mol2") ligand = parmed.amber.AmberParm( f"{tmpdir}/ligand.parm7", f"{tmpdir}/ligand.mol2" ) for atom in ligand.atoms: atom.xx += offset[0].value_in_unit(openmm.unit.angstrom) atom.xy += offset[1].value_in_unit(openmm.unit.angstrom) atom.xz += offset[2].value_in_unit(openmm.unit.angstrom) return ligand def _apply_atm_restraints( system: openmm.System, config: "femto.fe.atm.ATMRestraints", ligand_1_com_idxs: list[int], ligand_1_ref_idxs: tuple[int, int, int] | None, ligand_2_com_idxs: list[int] | None, ligand_2_ref_idxs: tuple[int, int, int] | None, receptor_ref_idxs: list[int], offset: openmm.unit.Quantity, ): """Adds center of mass (COM) and optionally alignment restraints (if running RBFE) to a system. Args: system: The system to add the constraints to in-place. config: The restraint configuration. ligand_1_com_idxs: The indices to use when computing the COM of the first ligand. ligand_1_ref_idxs: The indices of the first ligand to align on. ligand_2_com_idxs: The indices to use when computing the COM of the second ligand. ligand_2_ref_idxs: The indices of the second ligand to align on. receptor_ref_idxs: The indices of the receptor atoms that form the binding site. offset: The vector that the ligand will be offset by during the ATM calculation. """ com_restraint = femto.fe.atm._utils.create_com_restraint( ligand_1_com_idxs, receptor_ref_idxs, config.com.k, config.com.radius, [0.0, 0.0, 0.0] * openmm.unit.angstrom, ) com_restraint.setForceGroup(OpenMMForceGroup.COM_RESTRAINT)
"""Set up the system for ATM calculations.""" if typing.TYPE_CHECKING: _LOGGER = logging.getLogger(__name__) def select_displacement( receptor: parmed.amber.AmberParm, ligand_1: parmed.amber.AmberParm, ligand_2: parmed.amber.AmberParm | None, distance: openmm.unit.Quantity, ) -> openmm.unit.Quantity: """Attempts to automatically select a displacement vector for the ligands. Args: receptor: The receptor. ligand_1: The first ligand positioned in the binding site. ligand_2: The second ligand positioned in the binding site. distance: The distance to translate ligands along the displacement vector by. Returns: The displacement vector. """ ligand_coords = numpy.vstack( [ligand_1.coordinates] + ([] if ligand_2 is None else [ligand_2.coordinates]) ) receptor_coords = receptor.coordinates directions = numpy.array( [ [-1.0, -1.0, -1.0], [+1.0, -1.0, -1.0], [+1.0, +1.0, -1.0], [-1.0, +1.0, -1.0], [-1.0, -1.0, +1.0], [+1.0, -1.0, +1.0], [+1.0, +1.0, +1.0], [-1.0, +1.0, +1.0], ] ) directions /= numpy.linalg.norm(directions, axis=1, keepdims=True) closest_distances = [] for direction in directions: displacement = direction * distance.value_in_unit(openmm.unit.angstrom) offset_coords = ligand_coords + displacement distances = scipy.spatial.distance.cdist(offset_coords, receptor_coords) closest_distances.append(distances.min()) direction = directions[numpy.argmax(closest_distances)] return direction.flatten() * distance def _offset_ligand( ligand: parmed.Structure, offset: openmm.unit.Quantity ) -> parmed.Structure: """Offsets the coordinates of the specified ligand by a specified amount. Args: ligand: The ligand to offset. offset: The amount to offset the ligand by. Returns: The offset ligand. """ # we copy in this strange way because parmed doesn't # copy all attrs correctly when using copy.deepycopy with tempfile.TemporaryDirectory() as tmpdir: ligand.save(f"{tmpdir}/ligand.parm7") ligand.save(f"{tmpdir}/ligand.mol2") ligand = parmed.amber.AmberParm( f"{tmpdir}/ligand.parm7", f"{tmpdir}/ligand.mol2" ) for atom in ligand.atoms: atom.xx += offset[0].value_in_unit(openmm.unit.angstrom) atom.xy += offset[1].value_in_unit(openmm.unit.angstrom) atom.xz += offset[2].value_in_unit(openmm.unit.angstrom) return ligand def _apply_atm_restraints( system: openmm.System, config: "femto.fe.atm.ATMRestraints", ligand_1_com_idxs: list[int], ligand_1_ref_idxs: tuple[int, int, int] | None, ligand_2_com_idxs: list[int] | None, ligand_2_ref_idxs: tuple[int, int, int] | None, receptor_ref_idxs: list[int], offset: openmm.unit.Quantity, ): """Adds center of mass (COM) and optionally alignment restraints (if running RBFE) to a system. Args: system: The system to add the constraints to in-place. config: The restraint configuration. ligand_1_com_idxs: The indices to use when computing the COM of the first ligand. ligand_1_ref_idxs: The indices of the first ligand to align on. ligand_2_com_idxs: The indices to use when computing the COM of the second ligand. ligand_2_ref_idxs: The indices of the second ligand to align on. receptor_ref_idxs: The indices of the receptor atoms that form the binding site. offset: The vector that the ligand will be offset by during the ATM calculation. """ com_restraint = femto.fe.atm._utils.create_com_restraint( ligand_1_com_idxs, receptor_ref_idxs, config.com.k, config.com.radius, [0.0, 0.0, 0.0] * openmm.unit.angstrom, ) com_restraint.setForceGroup(OpenMMForceGroup.COM_RESTRAINT)
com_restraint.setName(OpenMMForceName.COM_RESTRAINT)
1
2023-12-07 15:28:18+00:00
2k
AIFSH/NativeDancer
nativedancer/third_part/detectron2/evaluation/cityscapes_evaluation.py
[ { "identifier": "MetadataCatalog", "path": "nativedancer/third_part/detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> L...
import glob import logging import numpy as np import os import tempfile import torch import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval from collections import OrderedDict from PIL import Image from ..data import MetadataCatalog from ..utils import comm from ..utils.file_io import PathManager from .evaluator import DatasetEvaluator from cityscapesscripts.helpers.labels import name2label from cityscapesscripts.helpers.labels import trainId2label
1,295
# Copyright (c) Facebook, Inc. and its affiliates. class CityscapesEvaluator(DatasetEvaluator): """ Base class for evaluation using cityscapes API. """ def __init__(self, dataset_name): """ Args: dataset_name (str): the name of the dataset. It must have the following metadata associated with it: "thing_classes", "gt_dir". """ self._metadata = MetadataCatalog.get(dataset_name) self._cpu_device = torch.device("cpu") self._logger = logging.getLogger(__name__) def reset(self): self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_") self._temp_dir = self._working_dir.name # All workers will write to the same results directory # TODO this does not work in distributed training assert (
# Copyright (c) Facebook, Inc. and its affiliates. class CityscapesEvaluator(DatasetEvaluator): """ Base class for evaluation using cityscapes API. """ def __init__(self, dataset_name): """ Args: dataset_name (str): the name of the dataset. It must have the following metadata associated with it: "thing_classes", "gt_dir". """ self._metadata = MetadataCatalog.get(dataset_name) self._cpu_device = torch.device("cpu") self._logger = logging.getLogger(__name__) def reset(self): self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_") self._temp_dir = self._working_dir.name # All workers will write to the same results directory # TODO this does not work in distributed training assert (
comm.get_local_size() == comm.get_world_size()
1
2023-12-10 20:14:00+00:00
2k
ethanweber/nerfiller
nerfiller/inpaint/saicinpainting/training/modules/base.py
[ { "identifier": "DepthWiseSeperableConv", "path": "nerfiller/inpaint/saicinpainting/training/modules/depthwise_sep_conv.py", "snippet": "class DepthWiseSeperableConv(nn.Module):\n def __init__(self, in_dim, out_dim, *args, **kwargs):\n super().__init__()\n if \"groups\" in kwargs:\n ...
import abc import torch import torch.nn as nn from typing import Tuple, List from nerfiller.inpaint.saicinpainting.training.modules.depthwise_sep_conv import ( DepthWiseSeperableConv, ) from nerfiller.inpaint.saicinpainting.training.modules.multidilated_conv import ( MultidilatedConv, )
1,459
class BaseDiscriminator(nn.Module): @abc.abstractmethod def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: """ Predict scores and get intermediate activations. Useful for feature matching loss :return tuple (scores, list of intermediate activations) """ raise NotImplemented() def get_conv_block_ctor(kind="default"): if not isinstance(kind, str): return kind if kind == "default": return nn.Conv2d if kind == "depthwise": return DepthWiseSeperableConv if kind == "multidilated":
class BaseDiscriminator(nn.Module): @abc.abstractmethod def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: """ Predict scores and get intermediate activations. Useful for feature matching loss :return tuple (scores, list of intermediate activations) """ raise NotImplemented() def get_conv_block_ctor(kind="default"): if not isinstance(kind, str): return kind if kind == "default": return nn.Conv2d if kind == "depthwise": return DepthWiseSeperableConv if kind == "multidilated":
return MultidilatedConv
1
2023-12-07 19:12:08+00:00
2k
nnanhuang/Customize-it-3D
ldm/models/diffusion/plms.py
[ { "identifier": "make_ddim_sampling_parameters", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):\n # select alphas for computing the variance schedule\n alphas = alphacums[ddim_timesteps]\n alphas_prev ...
import torch import numpy as np from tqdm import tqdm from functools import partial from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like from ldm.models.diffusion.sampling_util import norm_thresholding
868
"""SAMPLING ONLY.""" class PLMSSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): if ddim_eta != 0: raise ValueError('ddim_eta must be 0 for PLMS')
"""SAMPLING ONLY.""" class PLMSSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): if ddim_eta != 0: raise ValueError('ddim_eta must be 0 for PLMS')
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
1
2023-12-14 11:03:35+00:00
2k
TaoHuang13/diffusion_reward
diffusion_reward/models/codec_models/vqgan/vqgan.py
[ { "identifier": "Codebook", "path": "diffusion_reward/models/codec_models/vqgan/codebook.py", "snippet": "class Codebook(nn.Module):\n def __init__(self, args):\n super(Codebook, self).__init__()\n self.num_codebook_vectors = args.num_codebook_vectors\n self.latent_dim = args.lat...
import torch import torch.nn as nn from .codebook import Codebook from .decoder import Decoder from .encoder import Encoder
1,162
class VQGAN(nn.Module): def __init__(self, args): super(VQGAN, self).__init__() self.encoder = Encoder(args).to(device=args.device)
class VQGAN(nn.Module): def __init__(self, args): super(VQGAN, self).__init__() self.encoder = Encoder(args).to(device=args.device)
self.decoder = Decoder(args).to(device=args.device)
1
2023-12-05 02:42:28+00:00
2k
its0x4d/fastapi-jet
fastapi_jet/commands/startproject.py
[ { "identifier": "app", "path": "fastapi_jet/cli.py", "snippet": "def _version_callback(value: bool) -> None:\ndef _register_commands() -> None:\ndef main(\n version: Optional[bool] = typer.Option(\n None,\n \"--version\",\n \"-v\",\n help=\"Show the app...
import os import typer from questionary.form import form from fastapi_jet.cli import app from fastapi_jet.context import ProjectContext from fastapi_jet.generator import generate_template from fastapi_jet.utils import binary_question, name_fixer
1,211
@app.command(name="startproject") def startproject( name: str = typer.Argument( ..., help="Name of the project", callback=lambda name: name_fixer(name), metavar="PROJECT_NAME" ), interactive: bool = typer.Option(False, "--interactive", "-i", help="Interactive mode"), use_templates: bool = typer.Option(False, "--use-templates", "-t", help="Use templates"), ): """ Start a new project """ if interactive: project = form(
@app.command(name="startproject") def startproject( name: str = typer.Argument( ..., help="Name of the project", callback=lambda name: name_fixer(name), metavar="PROJECT_NAME" ), interactive: bool = typer.Option(False, "--interactive", "-i", help="Interactive mode"), use_templates: bool = typer.Option(False, "--use-templates", "-t", help="Use templates"), ): """ Start a new project """ if interactive: project = form(
use_templates=binary_question("Do you want to use templates?", default=True),
3
2023-12-12 00:15:53+00:00
2k
WithSecureLabs/damn-vulnerable-llm-agent
main.py
[ { "identifier": "get_current_user_tool", "path": "tools.py", "snippet": "def get_current_user(input : str):\ndef get_transactions(userId : str):" }, { "identifier": "display_instructions", "path": "utils.py", "snippet": "def display_instructions():\n # Markdown with some basic CSS sty...
import langchain import streamlit as st from dotenv import load_dotenv from langchain.agents import ConversationalChatAgent, AgentExecutor from langchain.callbacks import StreamlitCallbackHandler from langchain.chat_models import ChatOpenAI from langchain.memory import ConversationBufferMemory from langchain.memory.chat_message_histories import StreamlitChatMessageHistory from langchain.agents import initialize_agent from langchain.callbacks import get_openai_callback from tools import get_current_user_tool, get_recent_transactions_tool from utils import display_instructions, display_logo
1,244
load_dotenv() # Initialise tools tools = [get_current_user_tool, get_recent_transactions_tool] system_msg = """Assistant helps the current user retrieve the list of their recent bank transactions ans shows them as a table. Assistant will ONLY operate on the userId returned by the GetCurrentUser() tool, and REFUSE to operate on any other userId provided by the user.""" welcome_message = """Hi! I'm an helpful assistant and I can help fetch information about your recent transactions.\n\nTry asking me: "What are my recent transactions?" """ st.set_page_config(page_title="Damn Vulnerable LLM Agent") st.title("Damn Vulnerable LLM Agent") hide_st_style = """ <style> #MainMenu {visibility: hidden;} footer {visibility: hidden;} header {visibility: hidden;} </style> """ st.markdown(hide_st_style, unsafe_allow_html=True) msgs = StreamlitChatMessageHistory() memory = ConversationBufferMemory( chat_memory=msgs, return_messages=True, memory_key="chat_history", output_key="output" ) if len(msgs.messages) == 0: msgs.clear() msgs.add_ai_message(welcome_message) st.session_state.steps = {} avatars = {"human": "user", "ai": "assistant"} for idx, msg in enumerate(msgs.messages): with st.chat_message(avatars[msg.type]): # Render intermediate steps if any were saved for step in st.session_state.steps.get(str(idx), []): if step[0].tool == "_Exception": continue with st.status(f"**{step[0].tool}**: {step[0].tool_input}", state="complete"): st.write(step[0].log) st.write(step[1]) st.write(msg.content) if prompt := st.chat_input(placeholder="Show my recent transactions"): st.chat_message("user").write(prompt) llm = ChatOpenAI( model_name="gpt-4-1106-preview", temperature=0, streaming=True ) tools = tools chat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools, verbose=True, system_message=system_msg) executor = AgentExecutor.from_agent_and_tools( agent=chat_agent, tools=tools, memory=memory, return_intermediate_steps=True, handle_parsing_errors=True, verbose=True, max_iterations=6 ) with st.chat_message("assistant"): st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False) response = executor(prompt, callbacks=[st_cb]) st.write(response["output"]) st.session_state.steps[str(len(msgs.messages) - 1)] = response["intermediate_steps"] display_instructions()
load_dotenv() # Initialise tools tools = [get_current_user_tool, get_recent_transactions_tool] system_msg = """Assistant helps the current user retrieve the list of their recent bank transactions ans shows them as a table. Assistant will ONLY operate on the userId returned by the GetCurrentUser() tool, and REFUSE to operate on any other userId provided by the user.""" welcome_message = """Hi! I'm an helpful assistant and I can help fetch information about your recent transactions.\n\nTry asking me: "What are my recent transactions?" """ st.set_page_config(page_title="Damn Vulnerable LLM Agent") st.title("Damn Vulnerable LLM Agent") hide_st_style = """ <style> #MainMenu {visibility: hidden;} footer {visibility: hidden;} header {visibility: hidden;} </style> """ st.markdown(hide_st_style, unsafe_allow_html=True) msgs = StreamlitChatMessageHistory() memory = ConversationBufferMemory( chat_memory=msgs, return_messages=True, memory_key="chat_history", output_key="output" ) if len(msgs.messages) == 0: msgs.clear() msgs.add_ai_message(welcome_message) st.session_state.steps = {} avatars = {"human": "user", "ai": "assistant"} for idx, msg in enumerate(msgs.messages): with st.chat_message(avatars[msg.type]): # Render intermediate steps if any were saved for step in st.session_state.steps.get(str(idx), []): if step[0].tool == "_Exception": continue with st.status(f"**{step[0].tool}**: {step[0].tool_input}", state="complete"): st.write(step[0].log) st.write(step[1]) st.write(msg.content) if prompt := st.chat_input(placeholder="Show my recent transactions"): st.chat_message("user").write(prompt) llm = ChatOpenAI( model_name="gpt-4-1106-preview", temperature=0, streaming=True ) tools = tools chat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=tools, verbose=True, system_message=system_msg) executor = AgentExecutor.from_agent_and_tools( agent=chat_agent, tools=tools, memory=memory, return_intermediate_steps=True, handle_parsing_errors=True, verbose=True, max_iterations=6 ) with st.chat_message("assistant"): st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False) response = executor(prompt, callbacks=[st_cb]) st.write(response["output"]) st.session_state.steps[str(len(msgs.messages) - 1)] = response["intermediate_steps"] display_instructions()
display_logo()
2
2023-12-07 09:37:47+00:00
2k
MarcoGorelli/polars-upgrade
polars_upgrade/_plugins/map_dict.py
[ { "identifier": "ast_to_offset", "path": "polars_upgrade/_ast_helpers.py", "snippet": "def ast_to_offset(node: ast.expr | ast.stmt) -> Offset:\n return Offset(node.lineno, node.col_offset)" }, { "identifier": "register", "path": "polars_upgrade/_data.py", "snippet": "def register(tp: ...
import ast import functools from typing import Iterable from tokenize_rt import NON_CODING_TOKENS from tokenize_rt import Offset from tokenize_rt import Token from polars_upgrade._ast_helpers import ast_to_offset from polars_upgrade._data import register from polars_upgrade._data import State from polars_upgrade._data import TokenFunc from polars_upgrade._token_helpers import find_op from polars_upgrade._token_helpers import is_simple_expression
1,010
from __future__ import annotations def rename( i: int, tokens: list[Token], *, name: str, new: str, ) -> None: while not (tokens[i].name == 'NAME' and tokens[i].src == name): i += 1 tokens[i] = tokens[i]._replace(src=new) def rename_and_add_default( i: int, tokens: list[Token], *, name: str, new: str, ) -> None: while not (tokens[i].name == 'NAME' and tokens[i].src == name): i += 1 tokens[i] = tokens[i]._replace(src=new) start_paren = find_op(tokens, i, '(') close_paren = find_op(tokens, start_paren, ')') # is there a comma before the close paren? i = close_paren - 1 while tokens[i].name in NON_CODING_TOKENS: i -= 1 if ',' not in tokens[i].src: tokens.insert(i + 1, Token('OP', ', ')) tokens.insert(i + 2, Token('NAME', 'default')) tokens.insert(i + 3, Token('OP', '=')) tokens.insert(i + 4, Token('NUMBER', 'None')) else: tokens.insert(i + 1, Token('NAME', 'default')) tokens.insert(i + 2, Token('OP', '=')) tokens.insert(i + 3, Token('NUMBER', 'None')) @register(ast.Call) def visit_Call( state: State, node: ast.Call, parent: ast.AST, ) -> Iterable[tuple[Offset, TokenFunc]]: if ( isinstance(node.func, ast.Attribute) and
from __future__ import annotations def rename( i: int, tokens: list[Token], *, name: str, new: str, ) -> None: while not (tokens[i].name == 'NAME' and tokens[i].src == name): i += 1 tokens[i] = tokens[i]._replace(src=new) def rename_and_add_default( i: int, tokens: list[Token], *, name: str, new: str, ) -> None: while not (tokens[i].name == 'NAME' and tokens[i].src == name): i += 1 tokens[i] = tokens[i]._replace(src=new) start_paren = find_op(tokens, i, '(') close_paren = find_op(tokens, start_paren, ')') # is there a comma before the close paren? i = close_paren - 1 while tokens[i].name in NON_CODING_TOKENS: i -= 1 if ',' not in tokens[i].src: tokens.insert(i + 1, Token('OP', ', ')) tokens.insert(i + 2, Token('NAME', 'default')) tokens.insert(i + 3, Token('OP', '=')) tokens.insert(i + 4, Token('NUMBER', 'None')) else: tokens.insert(i + 1, Token('NAME', 'default')) tokens.insert(i + 2, Token('OP', '=')) tokens.insert(i + 3, Token('NUMBER', 'None')) @register(ast.Call) def visit_Call( state: State, node: ast.Call, parent: ast.AST, ) -> Iterable[tuple[Offset, TokenFunc]]: if ( isinstance(node.func, ast.Attribute) and
is_simple_expression(node.func.value, state.aliases) and
5
2023-12-09 19:31:35+00:00
2k
I-am-PUID-0/pd_zurg
main.py
[ { "identifier": "rclone", "path": "rclone_rd/rclone.py", "snippet": "def get_port_from_config(config_file_path, key_type):\ndef setup():\n RCLONEMN_RD = f\"{RCLONEMN}_RD\"\n RCLONEMN_AD = f\"{RCLONEMN}_AD\"\n RCLONEMN_RD = RCLONEMN_AD = RCLONEMN" }, { "identifier...
from base import * from rclone_rd import rclone from cleanup import duplicate_cleanup from update import auto_update import plex_debrid_ as p import zurg as z
720
def main(): logger = get_logger() version = '2.0.1' ascii_art = f''' _______ ______ _______ _______ _______ ( ____ )( __ \ / ___ )|\ /|( ____ )( ____ \\ | ( )|| ( \ ) \/ ) || ) ( || ( )|| ( \/ | (____)|| | ) | / )| | | || (____)|| | | _____)| | | | / / | | | || __)| | ____ | ( | | ) | / / | | | || (\ ( | | \_ ) | ) | (__/ ) / (_/\| (___) || ) \ \__| (___) | |/ (______/_____(_______/(_______)|/ \__/(_______) (_____) Version: {version} ''' logger.info(ascii_art.format(version=version) + "\n" + "\n") def healthcheck(): while True: time.sleep(10) try: result = subprocess.run(['python', 'healthcheck.py'], capture_output=True, text=True) if result.stderr: logger.error(result.stderr.strip()) except Exception as e: logger.error('Error running healthcheck.py: %s', e) time.sleep(50) thread = threading.Thread(target=healthcheck) thread.daemon = True thread.start() try: if ZURG is None or str(ZURG).lower() == 'false': pass elif str(ZURG).lower() == 'true': try: if RDAPIKEY or ADAPIKEY: try: z.setup.zurg_setup() z_updater = z.update.ZurgUpdate() if ZURGUPDATE:
def main(): logger = get_logger() version = '2.0.1' ascii_art = f''' _______ ______ _______ _______ _______ ( ____ )( __ \ / ___ )|\ /|( ____ )( ____ \\ | ( )|| ( \ ) \/ ) || ) ( || ( )|| ( \/ | (____)|| | ) | / )| | | || (____)|| | | _____)| | | | / / | | | || __)| | ____ | ( | | ) | / / | | | || (\ ( | | \_ ) | ) | (__/ ) / (_/\| (___) || ) \ \__| (___) | |/ (______/_____(_______/(_______)|/ \__/(_______) (_____) Version: {version} ''' logger.info(ascii_art.format(version=version) + "\n" + "\n") def healthcheck(): while True: time.sleep(10) try: result = subprocess.run(['python', 'healthcheck.py'], capture_output=True, text=True) if result.stderr: logger.error(result.stderr.strip()) except Exception as e: logger.error('Error running healthcheck.py: %s', e) time.sleep(50) thread = threading.Thread(target=healthcheck) thread.daemon = True thread.start() try: if ZURG is None or str(ZURG).lower() == 'false': pass elif str(ZURG).lower() == 'true': try: if RDAPIKEY or ADAPIKEY: try: z.setup.zurg_setup() z_updater = z.update.ZurgUpdate() if ZURGUPDATE:
z_updater.auto_update('Zurg',True)
2
2023-12-05 14:49:38+00:00
2k
JeffersonQin/DungeonAssistant
registration.py
[ { "identifier": "o3dobj", "path": "utils/o3dobj.py", "snippet": "def get_o3d_unit_block_at_origin():\ndef get_o3d_trajectory_object(points, color=(1, 0, 0)):\n def transform_o3d_format(points):" }, { "identifier": "io", "path": "utils/io.py", "snippet": "def load_point_clouds(\n po...
import json import argparse import os import os.path as osp import time import open3d as o3d import numpy as np import copy import matplotlib.pyplot as plt from utils import o3dobj from utils import io from utils import tfm
1,505
default=0.05, help="voxel size for global fast registration downsampling. default is 0.05", ) parser.add_argument( "--voxel_size_icp", type=float, default=0.05, help="voxel size for icp downsampling. default is 0.05", ) parser.add_argument("--skip_icp", action="store_true", help="skip icp and only run fgr") parser.add_argument( "--transformed_trajectory_out", type=str, default="trajectory_1.jsonl", help="output trajectory of the transformed trajectory 1 (to trajectory 2)", ) args = parser.parse_args() pointcloud_file_path_1 = args.pointcloud1 pointcloud_file_path_2 = args.pointcloud2 trajectory_file_path_1 = args.trajectory1 trajectory_file_path_2 = args.trajectory2 def preprocess_point_cloud(pcd, voxel_size): """Downsamples the point cloud and computes the normals and FPFH features""" print(f":: Downsample with a voxel size {voxel_size:.3f}.") pcd_down = pcd.voxel_down_sample(voxel_size) radius_normal = voxel_size * 2 print(f":: Estimate normal with search radius {radius_normal:.3f}.") pcd_down.estimate_normals( o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30) ) radius_feature = voxel_size * 5 print(f":: Compute FPFH feature with search radius {radius_feature:.3f}.") pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature( pcd_down, o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100), ) return pcd_down, pcd_fpfh def prepare_dataset(voxel_size): """Loads two point clouds and downsamples them.""" print(":: Load two point clouds") source = o3d.io.read_point_cloud(pointcloud_file_path_1) target = o3d.io.read_point_cloud(pointcloud_file_path_2) source_down, source_fpfh = preprocess_point_cloud(source, voxel_size) target_down, target_fpfh = preprocess_point_cloud(target, voxel_size) return source, target, source_down, target_down, source_fpfh, target_fpfh def execute_fast_global_registration( source_down, target_down, source_fpfh, target_fpfh, voxel_size ): """Performs fast global registration on the downsampled point clouds""" distance_threshold = voxel_size * 0.5 print( f":: Apply fast global registration with distance threshold {distance_threshold:.3f}" ) result = o3d.pipelines.registration.registration_fgr_based_on_feature_matching( source_down, target_down, source_fpfh, target_fpfh, o3d.pipelines.registration.FastGlobalRegistrationOption( maximum_correspondence_distance=distance_threshold ), ) return result def execute_vanilla_icp(source, target): """Performs vanilla ICP on the point clouds""" estimation = o3d.pipelines.registration.TransformationEstimationPointToPlane() max_correspondence_distance = 0.5 # Convergence-Criteria for Vanilla ICP criteria = o3d.pipelines.registration.ICPConvergenceCriteria( relative_fitness=0.000001, relative_rmse=0.000001, max_iteration=50 ) result = o3d.pipelines.registration.registration_icp( source, target, max_correspondence_distance, estimation_method=estimation, criteria=criteria, ) return result if __name__ == "__main__": voxel_size_fgr = args.voxel_size_fgr voxel_size_icp = args.voxel_size_icp ( cloud_1, cloud_2, cloud_1_down, cloud_2_down, cloud_1_fpfh, cloud_2_fpfh, ) = prepare_dataset(voxel_size=voxel_size_fgr) color_1 = [0.9450980392, 0.5764705882, 0.7098039216] color_2 = [0.11, 0.72, 0.89] cloud_1.paint_uniform_color(color_1) cloud_2.paint_uniform_color(color_2) cloud_1_down.paint_uniform_color(color_1) cloud_2_down.paint_uniform_color(color_2) # axis axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0, 0, 0]) # unit block
parser = argparse.ArgumentParser() parser.add_argument( "--pointcloud1", type=str, default="pointcloud1.ply", help="first point cloud file path (1 --[transform]-> 2)", ) parser.add_argument( "--pointcloud2", type=str, default="pointcloud2.ply", help="second point cloud file path (1 --[transform]-> 2)", ) parser.add_argument( "--trajectory1", type=str, default="trajectory1.json", help="first trajectory file path", ) parser.add_argument( "--trajectory2", type=str, default="trajectory2.json", help="second trajectory file path", ) parser.add_argument( "--fast_cache", type=str, default="", help="transformation cache of fast global registration if available. default is none", ) parser.add_argument( "--icp_cache", type=str, default="", help="transformation cache of icp if available. default is none", ) parser.add_argument( "--voxel_size_fgr", type=float, default=0.05, help="voxel size for global fast registration downsampling. default is 0.05", ) parser.add_argument( "--voxel_size_icp", type=float, default=0.05, help="voxel size for icp downsampling. default is 0.05", ) parser.add_argument("--skip_icp", action="store_true", help="skip icp and only run fgr") parser.add_argument( "--transformed_trajectory_out", type=str, default="trajectory_1.jsonl", help="output trajectory of the transformed trajectory 1 (to trajectory 2)", ) args = parser.parse_args() pointcloud_file_path_1 = args.pointcloud1 pointcloud_file_path_2 = args.pointcloud2 trajectory_file_path_1 = args.trajectory1 trajectory_file_path_2 = args.trajectory2 def preprocess_point_cloud(pcd, voxel_size): """Downsamples the point cloud and computes the normals and FPFH features""" print(f":: Downsample with a voxel size {voxel_size:.3f}.") pcd_down = pcd.voxel_down_sample(voxel_size) radius_normal = voxel_size * 2 print(f":: Estimate normal with search radius {radius_normal:.3f}.") pcd_down.estimate_normals( o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30) ) radius_feature = voxel_size * 5 print(f":: Compute FPFH feature with search radius {radius_feature:.3f}.") pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature( pcd_down, o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100), ) return pcd_down, pcd_fpfh def prepare_dataset(voxel_size): """Loads two point clouds and downsamples them.""" print(":: Load two point clouds") source = o3d.io.read_point_cloud(pointcloud_file_path_1) target = o3d.io.read_point_cloud(pointcloud_file_path_2) source_down, source_fpfh = preprocess_point_cloud(source, voxel_size) target_down, target_fpfh = preprocess_point_cloud(target, voxel_size) return source, target, source_down, target_down, source_fpfh, target_fpfh def execute_fast_global_registration( source_down, target_down, source_fpfh, target_fpfh, voxel_size ): """Performs fast global registration on the downsampled point clouds""" distance_threshold = voxel_size * 0.5 print( f":: Apply fast global registration with distance threshold {distance_threshold:.3f}" ) result = o3d.pipelines.registration.registration_fgr_based_on_feature_matching( source_down, target_down, source_fpfh, target_fpfh, o3d.pipelines.registration.FastGlobalRegistrationOption( maximum_correspondence_distance=distance_threshold ), ) return result def execute_vanilla_icp(source, target): """Performs vanilla ICP on the point clouds""" estimation = o3d.pipelines.registration.TransformationEstimationPointToPlane() max_correspondence_distance = 0.5 # Convergence-Criteria for Vanilla ICP criteria = o3d.pipelines.registration.ICPConvergenceCriteria( relative_fitness=0.000001, relative_rmse=0.000001, max_iteration=50 ) result = o3d.pipelines.registration.registration_icp( source, target, max_correspondence_distance, estimation_method=estimation, criteria=criteria, ) return result if __name__ == "__main__": voxel_size_fgr = args.voxel_size_fgr voxel_size_icp = args.voxel_size_icp ( cloud_1, cloud_2, cloud_1_down, cloud_2_down, cloud_1_fpfh, cloud_2_fpfh, ) = prepare_dataset(voxel_size=voxel_size_fgr) color_1 = [0.9450980392, 0.5764705882, 0.7098039216] color_2 = [0.11, 0.72, 0.89] cloud_1.paint_uniform_color(color_1) cloud_2.paint_uniform_color(color_2) cloud_1_down.paint_uniform_color(color_1) cloud_2_down.paint_uniform_color(color_2) # axis axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0, 0, 0]) # unit block
unit_block = o3dobj.get_o3d_unit_block_at_origin()
0
2023-12-08 19:52:08+00:00
2k
KAIST-VICLab/From_Ground_To_Objects
networks/depth_decoder.py
[ { "identifier": "ConvBlock", "path": "networks/layers.py", "snippet": "class ConvBlock(nn.Module):\r\n \"\"\"Layer to perform a convolution followed by ELU\r\n \"\"\"\r\n\r\n def __init__(self, in_channels, out_channels):\r\n super(ConvBlock, self).__init__()\r\n\r\n self.conv = C...
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from collections import OrderedDict from .layers import ConvBlock, Conv3x3, upsample, disp_to_depth, coords_to_normals from timm.models.layers import trunc_normal_ from .cadepth import SPM, DEM
1,539
# Copyright Niantic 2021. Patent Pending. All rights reserved. # # This software is licensed under the terms of the ManyDepth licence # which allows for non-commercial use only, the full terms of which are made # available in the LICENSE file. class DepthDecoder(nn.Module): def __init__(self, num_ch_enc, scales=range(4), num_output_channels=1, use_skips=True, opt=None, backproject_depth=None, min_depth=0.1, max_depth=100): super(DepthDecoder, self).__init__() self.num_output_channels = num_output_channels self.use_skips = use_skips self.upsample_mode = 'nearest' self.scales = scales self.opt = opt self.num_ch_enc = num_ch_enc self.num_ch_dec = np.array([16, 32, 64, 128, 256]) self.backproject_depth = backproject_depth self.min_depth = min_depth self.max_depth = max_depth # decoder self.convs = OrderedDict() for i in range(4, -1, -1): # upconv_0 num_ch_in = self.num_ch_enc[-1] if i == 4 else self.num_ch_dec[i + 1] if self.opt["use_surface_normal"] and i != 4: num_ch_in += 3 num_ch_out = self.num_ch_dec[i] self.convs[("upconv", i, 0)] = ConvBlock(num_ch_in, num_ch_out) # upconv_1 num_ch_in = self.num_ch_dec[i] if self.use_skips and i > 0: num_ch_in += self.num_ch_enc[i - 1] num_ch_out = self.num_ch_dec[i] self.convs[("upconv", i, 1)] = ConvBlock(num_ch_in, num_ch_out) if self.opt['cadepth']: self.convs[("dem", i)] = DEM(num_ch_in) for s in self.scales:
# Copyright Niantic 2021. Patent Pending. All rights reserved. # # This software is licensed under the terms of the ManyDepth licence # which allows for non-commercial use only, the full terms of which are made # available in the LICENSE file. class DepthDecoder(nn.Module): def __init__(self, num_ch_enc, scales=range(4), num_output_channels=1, use_skips=True, opt=None, backproject_depth=None, min_depth=0.1, max_depth=100): super(DepthDecoder, self).__init__() self.num_output_channels = num_output_channels self.use_skips = use_skips self.upsample_mode = 'nearest' self.scales = scales self.opt = opt self.num_ch_enc = num_ch_enc self.num_ch_dec = np.array([16, 32, 64, 128, 256]) self.backproject_depth = backproject_depth self.min_depth = min_depth self.max_depth = max_depth # decoder self.convs = OrderedDict() for i in range(4, -1, -1): # upconv_0 num_ch_in = self.num_ch_enc[-1] if i == 4 else self.num_ch_dec[i + 1] if self.opt["use_surface_normal"] and i != 4: num_ch_in += 3 num_ch_out = self.num_ch_dec[i] self.convs[("upconv", i, 0)] = ConvBlock(num_ch_in, num_ch_out) # upconv_1 num_ch_in = self.num_ch_dec[i] if self.use_skips and i > 0: num_ch_in += self.num_ch_enc[i - 1] num_ch_out = self.num_ch_dec[i] self.convs[("upconv", i, 1)] = ConvBlock(num_ch_in, num_ch_out) if self.opt['cadepth']: self.convs[("dem", i)] = DEM(num_ch_in) for s in self.scales:
self.convs[("dispconv", s)] = Conv3x3(self.num_ch_dec[s], self.num_output_channels)
1
2023-12-12 08:29:30+00:00
2k
marc-rigter/polygrad-world-models
polygrad/agent/a2c.py
[ { "identifier": "EMA", "path": "polygrad/utils/training.py", "snippet": "class EMA():\n '''\n empirical moving average\n '''\n def __init__(self, beta):\n super().__init__()\n self.beta = beta\n\n def update_model_average(self, ma_model, current_model):\n for curr...
import torch import copy import torch.nn as nn import copy import torch.nn.functional as F import torch.distributions as D import importlib import wandb from torch import Tensor from polygrad.utils.training import EMA from .functions import * from .common import * from polygrad.utils.evaluation import get_standardized_stats
1,009
class ActorCritic(nn.Module): def __init__(self, in_dim, out_actions, normalizer, device="cuda:0", hidden_dim=256, min_std=0.01, fixed_std=False, decay_std_steps=500000, init_std=0.5, hidden_layers=2, layer_norm=True, gamma=0.999, ema=0.995, lambda_gae=0.8, entropy_weight=1e-3, entropy_target=-1, tune_entropy=True, target_interval=100, lr_actor=1e-4, lr_critic=3e-4, lr_alpha=1e-2, actor_grad='reinforce', actor_dist='normal_tanh', normalize_adv=False, grad_clip=None, clip_logprob=True, min_logprob=-10.0, learned_std=False, ac_use_normed_inputs=True, target_update=0.02, tune_actor_lr=3e-4, lr_schedule='constant', lr_decay_steps=1000000, log_interval=20000, linesearch=False, linesearch_tolerance=0.25, linesearch_ratio=0.8, **kwargs ): super().__init__() self.in_dim = in_dim self.action_dim = out_actions self.gamma = gamma self.lambda_ = lambda_gae self.target_interval = target_interval self.actor_grad = actor_grad self.actor_dist = actor_dist self.min_std = min_std self.clip_logprob = clip_logprob self.normalizer = normalizer self.min_logprob = min_logprob * self.action_dim self.learned_std = learned_std self.fixed_std = fixed_std self.decay_std_steps = decay_std_steps self.init_std = init_std self.current_std = init_std self.use_normed_inputs = ac_use_normed_inputs self.lr_decay_steps = lr_decay_steps self.log_interval = log_interval self.last_log = -float('inf') self.linesearch = linesearch self.linesearch_tolerance = linesearch_tolerance self.linesearch_ratio = linesearch_ratio if not self.fixed_std and not self.learned_std: actor_out_dim = 2 * out_actions else: actor_out_dim = out_actions self.actor = MLP(in_dim, actor_out_dim, hidden_dim, hidden_layers, layer_norm).to(device) self.critic = MLP(in_dim, 1, hidden_dim, hidden_layers, layer_norm).to(device) self.critic_target = copy.deepcopy(self.critic) self.critic_target.requires_grad_(False)
class ActorCritic(nn.Module): def __init__(self, in_dim, out_actions, normalizer, device="cuda:0", hidden_dim=256, min_std=0.01, fixed_std=False, decay_std_steps=500000, init_std=0.5, hidden_layers=2, layer_norm=True, gamma=0.999, ema=0.995, lambda_gae=0.8, entropy_weight=1e-3, entropy_target=-1, tune_entropy=True, target_interval=100, lr_actor=1e-4, lr_critic=3e-4, lr_alpha=1e-2, actor_grad='reinforce', actor_dist='normal_tanh', normalize_adv=False, grad_clip=None, clip_logprob=True, min_logprob=-10.0, learned_std=False, ac_use_normed_inputs=True, target_update=0.02, tune_actor_lr=3e-4, lr_schedule='constant', lr_decay_steps=1000000, log_interval=20000, linesearch=False, linesearch_tolerance=0.25, linesearch_ratio=0.8, **kwargs ): super().__init__() self.in_dim = in_dim self.action_dim = out_actions self.gamma = gamma self.lambda_ = lambda_gae self.target_interval = target_interval self.actor_grad = actor_grad self.actor_dist = actor_dist self.min_std = min_std self.clip_logprob = clip_logprob self.normalizer = normalizer self.min_logprob = min_logprob * self.action_dim self.learned_std = learned_std self.fixed_std = fixed_std self.decay_std_steps = decay_std_steps self.init_std = init_std self.current_std = init_std self.use_normed_inputs = ac_use_normed_inputs self.lr_decay_steps = lr_decay_steps self.log_interval = log_interval self.last_log = -float('inf') self.linesearch = linesearch self.linesearch_tolerance = linesearch_tolerance self.linesearch_ratio = linesearch_ratio if not self.fixed_std and not self.learned_std: actor_out_dim = 2 * out_actions else: actor_out_dim = out_actions self.actor = MLP(in_dim, actor_out_dim, hidden_dim, hidden_layers, layer_norm).to(device) self.critic = MLP(in_dim, 1, hidden_dim, hidden_layers, layer_norm).to(device) self.critic_target = copy.deepcopy(self.critic) self.critic_target.requires_grad_(False)
self.ema = EMA(ema)
0
2023-12-12 21:05:26+00:00
2k
Chat-3D/Chat-3D-v2
utils/logger.py
[ { "identifier": "get_rank", "path": "utils/distributed.py", "snippet": "def get_rank():\n if not is_dist_avail_and_initialized():\n return 0\n return dist.get_rank()" }, { "identifier": "is_main_process", "path": "utils/distributed.py", "snippet": "def is_main_process():\n ...
import functools import logging import os import sys import time import wandb import torch from typing import Any, Dict, Union from .distributed import get_rank, is_main_process from termcolor import colored from torch.utils.tensorboard import SummaryWriter
831
# from MMF: https://github.com/facebookresearch/mmf/blob/master/mmf/utils/logger.py # Copyright (c) Facebook, Inc. and its affiliates. def log_dict_to_wandb(log_dict, step, prefix=""): """include a separator `/` at the end of `prefix`""" if not is_main_process(): return log_dict = {f"{prefix}{k}": v for k, v in log_dict.items()} wandb.log(log_dict, step) def setup_wandb(config): if not (config.wandb.enable and is_main_process()): return run = wandb.init( config=config, project=config.wandb.project, entity=config.wandb.entity, name=os.path.basename(config.output_dir), reinit=True ) return run def setup_output_folder(save_dir: str, folder_only: bool = False): """Sets up and returns the output file where the logs will be placed based on the configuration passed. Usually "save_dir/logs/log_<timestamp>.txt". If env.log_dir is passed, logs will be directly saved in this folder. Args: folder_only (bool, optional): If folder should be returned and not the file. Defaults to False. Returns: str: folder or file path depending on folder_only flag """ log_filename = "train_" log_filename += time.strftime("%Y_%m_%dT%H_%M_%S") log_filename += ".log" log_folder = os.path.join(save_dir, "logs") if not os.path.exists(log_folder): os.path.mkdirs(log_folder) if folder_only: return log_folder log_filename = os.path.join(log_folder, log_filename) return log_filename def setup_logger( output: str = None, color: bool = True, name: str = "mmf", disable: bool = False, clear_handlers=True, *args, **kwargs, ): """ Initialize the MMF logger and set its verbosity level to "INFO". Outside libraries shouldn't call this in case they have set there own logging handlers and setup. If they do, and don't want to clear handlers, pass clear_handlers options. The initial version of this function was taken from D2 and adapted for MMF. Args: output (str): a file name or a directory to save log. If ends with ".txt" or ".log", assumed to be a file name. Default: Saved to file <save_dir/logs/log_[timestamp].txt> color (bool): If false, won't log colored logs. Default: true name (str): the root module name of this logger. Defaults to "mmf". disable: do not use clear_handlers (bool): If false, won't clear existing handlers. Returns: logging.Logger: a logger """ if disable: return None logger = logging.getLogger(name) logger.propagate = False logging.captureWarnings(True) warnings_logger = logging.getLogger("py.warnings") plain_formatter = logging.Formatter( "%(asctime)s | %(levelname)s | %(name)s : %(message)s", datefmt="%Y-%m-%dT%H:%M:%S", )
# from MMF: https://github.com/facebookresearch/mmf/blob/master/mmf/utils/logger.py # Copyright (c) Facebook, Inc. and its affiliates. def log_dict_to_wandb(log_dict, step, prefix=""): """include a separator `/` at the end of `prefix`""" if not is_main_process(): return log_dict = {f"{prefix}{k}": v for k, v in log_dict.items()} wandb.log(log_dict, step) def setup_wandb(config): if not (config.wandb.enable and is_main_process()): return run = wandb.init( config=config, project=config.wandb.project, entity=config.wandb.entity, name=os.path.basename(config.output_dir), reinit=True ) return run def setup_output_folder(save_dir: str, folder_only: bool = False): """Sets up and returns the output file where the logs will be placed based on the configuration passed. Usually "save_dir/logs/log_<timestamp>.txt". If env.log_dir is passed, logs will be directly saved in this folder. Args: folder_only (bool, optional): If folder should be returned and not the file. Defaults to False. Returns: str: folder or file path depending on folder_only flag """ log_filename = "train_" log_filename += time.strftime("%Y_%m_%dT%H_%M_%S") log_filename += ".log" log_folder = os.path.join(save_dir, "logs") if not os.path.exists(log_folder): os.path.mkdirs(log_folder) if folder_only: return log_folder log_filename = os.path.join(log_folder, log_filename) return log_filename def setup_logger( output: str = None, color: bool = True, name: str = "mmf", disable: bool = False, clear_handlers=True, *args, **kwargs, ): """ Initialize the MMF logger and set its verbosity level to "INFO". Outside libraries shouldn't call this in case they have set there own logging handlers and setup. If they do, and don't want to clear handlers, pass clear_handlers options. The initial version of this function was taken from D2 and adapted for MMF. Args: output (str): a file name or a directory to save log. If ends with ".txt" or ".log", assumed to be a file name. Default: Saved to file <save_dir/logs/log_[timestamp].txt> color (bool): If false, won't log colored logs. Default: true name (str): the root module name of this logger. Defaults to "mmf". disable: do not use clear_handlers (bool): If false, won't clear existing handlers. Returns: logging.Logger: a logger """ if disable: return None logger = logging.getLogger(name) logger.propagate = False logging.captureWarnings(True) warnings_logger = logging.getLogger("py.warnings") plain_formatter = logging.Formatter( "%(asctime)s | %(levelname)s | %(name)s : %(message)s", datefmt="%Y-%m-%dT%H:%M:%S", )
distributed_rank = get_rank()
0
2023-12-11 14:39:58+00:00
2k
SqueezeBits/owlite
owlite/calib/mse_calibrator.py
[ { "identifier": "log", "path": "owlite/logger.py", "snippet": "class Logger(logging.Logger):\n class _WarningFilterContext:\n class WarningFilter(logging.Filter):\n ENV_VAR = \"OWLITE_LOG_LEVEL\"\n DEBUG_WARNING = 15\n ULTRA_VERBOSE = -10\n def ignore_warnings(self):\n ...
import torch from ..logger import log from ._histogram_calibrator import _HistogramCalibrator
1,461
"""MSE(Mean Squared Error) calibrator""" class MSECalibrator(_HistogramCalibrator): """MSE Calibrator Class""" def update(self): # update step_size using "mse" if self.quantizer.histogram is None or self.quantizer.bin_edges is None:
"""MSE(Mean Squared Error) calibrator""" class MSECalibrator(_HistogramCalibrator): """MSE Calibrator Class""" def update(self): # update step_size using "mse" if self.quantizer.histogram is None or self.quantizer.bin_edges is None:
log.error(f"quantizer.histogram : {self.quantizer.histogram}")
0
2023-12-08 06:41:50+00:00
2k
ximinng/PyTorch-SVGRender
pytorch_svgrender/svgtools/process.py
[ { "identifier": "circle_tag", "path": "pytorch_svgrender/svgtools/shape.py", "snippet": "def circle_tag(cx: float, cy: float, r: float, transform: str = None):\n attrib = {\n 'cx': f'{cx}', 'cy': f'{cy}', 'r': f'{r}'\n }\n if transform is not None:\n attrib['transform'] = transfor...
import xml.etree.ElementTree as ET import omegaconf from typing import Tuple from .shape import circle_tag, rect_tag from .type import is_valid_svg
768
# -*- coding: utf-8 -*- # Author: ximing # Description: process # Copyright (c) 2023, XiMing Xing. # License: MIT License def delete_empty_path(input_svg: str, output_svg: str): is_valid_svg(input_svg) # read svg tree = ET.parse(input_svg) root = tree.getroot() group = ET.Element('g') for i, element in enumerate(root.iter()): element.tag = element.tag.split('}')[-1] if element.tag == 'path': if element.get('d') == 'C NaN NaN' or element.get('d') == '': continue group.append(element) # new svg svg = ET.Element('svg', xmlns="http://www.w3.org/2000/svg", version='1.1', width=root.get('width'), height=root.get('height'), viewBox=root.get('viewBox')) svg.append(group) tree = ET.ElementTree(svg) tree.write(output_svg, encoding='utf-8', xml_declaration=True) def add_clipPath2def(mounted_node: ET.Element, tag_name: str, attrs: omegaconf.DictConfig): # add defs node defs = ET.SubElement(mounted_node, 'defs') # parent=mounted_node, tag='defs' if tag_name == 'none': return None # add clipPath node id = 'def_clip' _circleClip = ET.SubElement(defs, 'clipPath', id='def_clip') # parent=defs, tag='clipPath' # add ops if tag_name == 'circle_clip': _circleClip.append(
# -*- coding: utf-8 -*- # Author: ximing # Description: process # Copyright (c) 2023, XiMing Xing. # License: MIT License def delete_empty_path(input_svg: str, output_svg: str): is_valid_svg(input_svg) # read svg tree = ET.parse(input_svg) root = tree.getroot() group = ET.Element('g') for i, element in enumerate(root.iter()): element.tag = element.tag.split('}')[-1] if element.tag == 'path': if element.get('d') == 'C NaN NaN' or element.get('d') == '': continue group.append(element) # new svg svg = ET.Element('svg', xmlns="http://www.w3.org/2000/svg", version='1.1', width=root.get('width'), height=root.get('height'), viewBox=root.get('viewBox')) svg.append(group) tree = ET.ElementTree(svg) tree.write(output_svg, encoding='utf-8', xml_declaration=True) def add_clipPath2def(mounted_node: ET.Element, tag_name: str, attrs: omegaconf.DictConfig): # add defs node defs = ET.SubElement(mounted_node, 'defs') # parent=mounted_node, tag='defs' if tag_name == 'none': return None # add clipPath node id = 'def_clip' _circleClip = ET.SubElement(defs, 'clipPath', id='def_clip') # parent=defs, tag='clipPath' # add ops if tag_name == 'circle_clip': _circleClip.append(
circle_tag(cx=attrs.cx, cy=attrs.cy, r=attrs.r)
0
2023-12-13 08:18:01+00:00
2k
lyhisme/DeST
libs/models/SP.py
[ { "identifier": "Graph", "path": "libs/models/graph/graph.py", "snippet": "class Graph:\n def __init__(self, labeling_mode='spatial', layout='MCFS-22'):\n\n self.get_edge(layout)\n self.A = self.get_adjacency_matrix(labeling_mode)\n\n def get_edge(self, layout):\n if layout ==...
import torch import torch.nn as nn import numpy as np from .graph.graph import Graph from .graph.tools import k_adjacency, normalize_adjacency_matrix, get_adjacency_matrix
1,214
class MultiScale_GraphConv(nn.Module): def __init__(self, num_scales, # 13 in_channels, out_channels, dataset, disentangled_agg=True, use_mask=True, dropout=0, activation='relu'): super().__init__()
class MultiScale_GraphConv(nn.Module): def __init__(self, num_scales, # 13 in_channels, out_channels, dataset, disentangled_agg=True, use_mask=True, dropout=0, activation='relu'): super().__init__()
self.graph = Graph(labeling_mode='spatial', layout=dataset)
0
2023-12-12 02:27:15+00:00
2k
soCzech/GenHowTo
genhowto.py
[ { "identifier": "load_genhowto_model", "path": "genhowto_utils.py", "snippet": "def load_genhowto_model(weights_path, device=\"cpu\"):\n with open(os.path.join(weights_path, \"GenHowTo_controlnet_config.json\")) as file:\n gef_controlnet_config = json.load(file)\n\n controlnet = ControlNetM...
import os import math import torch import argparse import numpy as np from PIL import Image from genhowto_utils import load_genhowto_model, DDIMSkipScheduler
1,103
def main(args): if os.path.exists(args.output_path): print(f"{args.output_path} already exists.") return pipe = load_genhowto_model(args.weights_path, device=args.device) pipe.scheduler.set_timesteps(args.num_inference_steps) if args.num_steps_to_skip is not None: # possibly do not start from complete noise
def main(args): if os.path.exists(args.output_path): print(f"{args.output_path} already exists.") return pipe = load_genhowto_model(args.weights_path, device=args.device) pipe.scheduler.set_timesteps(args.num_inference_steps) if args.num_steps_to_skip is not None: # possibly do not start from complete noise
pipe.scheduler = DDIMSkipScheduler.from_config(pipe.scheduler.config)
1
2023-12-11 08:47:51+00:00
2k
bolna-ai/bolna
bolna/helpers/utils.py
[ { "identifier": "configure_logger", "path": "bolna/helpers/logger_config.py", "snippet": "def configure_logger(file_name, enabled=True, logging_level='INFO'):\n if logging_level not in VALID_LOGGING_LEVELS:\n logging_level = \"INFO\"\n\n logging.basicConfig(\n level=logging_level,\n ...
import json import asyncio import re import numpy as np import copy import hashlib import os import traceback import ast from botocore.exceptions import BotoCoreError, ClientError from aiobotocore.session import AioSession from contextlib import AsyncExitStack from dotenv import load_dotenv from pydantic import BaseModel, create_model from .logger_config import configure_logger from bolna.constants import PREPROCESS_DIR
1,049
logger = configure_logger(__name__) load_dotenv() BUCKET_NAME = os.getenv('BUCKET_NAME') def load_file(file_path, is_json=False): data = None with open(file_path, "r") as f: if is_json: data = json.load(f) else: data = f.read() return data def write_json_file(file_path, data): with open(file_path, 'w') as file: json.dump(data, file, indent=4, ensure_ascii=False) def create_ws_data_packet(data, meta_info=None, is_md5_hash=False, llm_generated=False): metadata = copy.deepcopy(meta_info) if meta_info is not None: #It'll be none in case we connect through dashboard playground metadata["is_md5_hash"] = is_md5_hash metadata["llm_generated"] = llm_generated return { 'data': data, 'meta_info': metadata } def int2float(sound): abs_max = np.abs(sound).max() sound = sound.astype('float32') if abs_max > 0: sound *= 1 / 32768 sound = sound.squeeze() # depends on the use case return sound def float2int(sound): sound = np.int16(sound * 32767) return sound def mu_law_encode(audio, quantization_channels=256): mu = quantization_channels - 1 safe_audio_abs = np.minimum(np.abs(audio), 1.0) magnitude = np.log1p(mu * safe_audio_abs) / np.log1p(mu) signal = np.sign(audio) * magnitude return ((signal + 1) / 2 * mu + 0.5).astype(np.int32) def raw_to_mulaw(raw_bytes): # Convert bytes to numpy array of int16 values samples = np.frombuffer(raw_bytes, dtype=np.int16) samples = samples.astype(np.float32) / (2 ** 15) mulaw_encoded = mu_law_encode(samples) return mulaw_encoded async def get_s3_file(bucket_name, file_key): session = AioSession() async with AsyncExitStack() as exit_stack: s3_client = await exit_stack.enter_async_context(session.create_client('s3')) try: response = await s3_client.get_object(Bucket=bucket_name, Key=file_key) except (BotoCoreError, ClientError) as error: logger.error(error) else: file_content = await response['Body'].read() return file_content async def put_s3_file(bucket_name, file_key, file_data, content_type): session = AioSession() async with AsyncExitStack() as exit_stack: s3_client = await exit_stack.enter_async_context(session.create_client('s3')) data = None if content_type == "json": data = json.dumps(file_data) elif content_type in ["mp3", "wav", "pcm"]: data = file_data try: await s3_client.put_object(Bucket=bucket_name, Key=file_key, Body=data) except (BotoCoreError, ClientError) as error: logger.error(error) except Exception as e: logger.error('Exception occurred while s3 put object: {}'.format(e)) async def get_raw_audio_bytes_from_base64(agent_name, b64_string, audio_format='mp3', user_id = None, assistant_id=None, local = False): # we are already storing pcm formatted audio in the filler config. No need to encode/decode them further audio_data = None if local:
logger = configure_logger(__name__) load_dotenv() BUCKET_NAME = os.getenv('BUCKET_NAME') def load_file(file_path, is_json=False): data = None with open(file_path, "r") as f: if is_json: data = json.load(f) else: data = f.read() return data def write_json_file(file_path, data): with open(file_path, 'w') as file: json.dump(data, file, indent=4, ensure_ascii=False) def create_ws_data_packet(data, meta_info=None, is_md5_hash=False, llm_generated=False): metadata = copy.deepcopy(meta_info) if meta_info is not None: #It'll be none in case we connect through dashboard playground metadata["is_md5_hash"] = is_md5_hash metadata["llm_generated"] = llm_generated return { 'data': data, 'meta_info': metadata } def int2float(sound): abs_max = np.abs(sound).max() sound = sound.astype('float32') if abs_max > 0: sound *= 1 / 32768 sound = sound.squeeze() # depends on the use case return sound def float2int(sound): sound = np.int16(sound * 32767) return sound def mu_law_encode(audio, quantization_channels=256): mu = quantization_channels - 1 safe_audio_abs = np.minimum(np.abs(audio), 1.0) magnitude = np.log1p(mu * safe_audio_abs) / np.log1p(mu) signal = np.sign(audio) * magnitude return ((signal + 1) / 2 * mu + 0.5).astype(np.int32) def raw_to_mulaw(raw_bytes): # Convert bytes to numpy array of int16 values samples = np.frombuffer(raw_bytes, dtype=np.int16) samples = samples.astype(np.float32) / (2 ** 15) mulaw_encoded = mu_law_encode(samples) return mulaw_encoded async def get_s3_file(bucket_name, file_key): session = AioSession() async with AsyncExitStack() as exit_stack: s3_client = await exit_stack.enter_async_context(session.create_client('s3')) try: response = await s3_client.get_object(Bucket=bucket_name, Key=file_key) except (BotoCoreError, ClientError) as error: logger.error(error) else: file_content = await response['Body'].read() return file_content async def put_s3_file(bucket_name, file_key, file_data, content_type): session = AioSession() async with AsyncExitStack() as exit_stack: s3_client = await exit_stack.enter_async_context(session.create_client('s3')) data = None if content_type == "json": data = json.dumps(file_data) elif content_type in ["mp3", "wav", "pcm"]: data = file_data try: await s3_client.put_object(Bucket=bucket_name, Key=file_key, Body=data) except (BotoCoreError, ClientError) as error: logger.error(error) except Exception as e: logger.error('Exception occurred while s3 put object: {}'.format(e)) async def get_raw_audio_bytes_from_base64(agent_name, b64_string, audio_format='mp3', user_id = None, assistant_id=None, local = False): # we are already storing pcm formatted audio in the filler config. No need to encode/decode them further audio_data = None if local:
file_name = f"{PREPROCESS_DIR}/{agent_name}/{audio_format}/{b64_string}.{audio_format}"
1
2023-12-13 09:07:35+00:00
2k
relari-ai/continuous-eval
continuous_eval/metrics/generation_LLM_based_metrics.py
[ { "identifier": "DefaultLLM", "path": "continuous_eval/llm_factory.py", "snippet": " GOOGLE_GENAI_AVAILABLE = True\n GOOGLE_GENAI_AVAILABLE = False\n ANTHROPIC_AVAILABLE = True\n ANTHROPIC_AVAILABLE = False\nclass LLMInterface(ABC):\nclass LLMFactory(LLMInterface):\n def run(self, prompt,...
from continuous_eval.llm_factory import DefaultLLM, LLMInterface from continuous_eval.metrics.base import LLMBasedMetric from continuous_eval.metrics.retrieval_LLM_based_metrics import LLMBasedContextCoverage
1,293
class LLMBasedFaithfulness(LLMBasedMetric): """ The LLM based faithfulness metric. Measures whether the generated answer is faithful to the retrieved context. """ def __init__( self, model: LLMInterface = DefaultLLM, use_few_shot: bool = True, classify_by_statement: bool = False, ): super().__init__(model) self.use_few_shot = use_few_shot self.classify_by_statement = classify_by_statement def __str__(self): return f"LLMBasedFaithfulness(model={self.model}, use_few_shot={self.use_few_shot}, classify_by_statement={self.classify_by_statement})" def calculate(self, question, retrieved_contexts, answer, **kwargs): """ Calculate the faithfulness score for the given datapoint. """ if self.classify_by_statement: # Context coverage uses the same prompt as faithfulness because it calculates how what proportion statements in the answer can be attributed to the context. # The difference is that faithfulness uses the generated answer, while context coverage uses ground truth answer (to evaluate context).
class LLMBasedFaithfulness(LLMBasedMetric): """ The LLM based faithfulness metric. Measures whether the generated answer is faithful to the retrieved context. """ def __init__( self, model: LLMInterface = DefaultLLM, use_few_shot: bool = True, classify_by_statement: bool = False, ): super().__init__(model) self.use_few_shot = use_few_shot self.classify_by_statement = classify_by_statement def __str__(self): return f"LLMBasedFaithfulness(model={self.model}, use_few_shot={self.use_few_shot}, classify_by_statement={self.classify_by_statement})" def calculate(self, question, retrieved_contexts, answer, **kwargs): """ Calculate the faithfulness score for the given datapoint. """ if self.classify_by_statement: # Context coverage uses the same prompt as faithfulness because it calculates how what proportion statements in the answer can be attributed to the context. # The difference is that faithfulness uses the generated answer, while context coverage uses ground truth answer (to evaluate context).
context_coverage = LLMBasedContextCoverage(use_few_shot=self.use_few_shot)
2
2023-12-08 21:30:39+00:00
2k
ryanhe312/STSSNet-AAAI2024
eval.py
[ { "identifier": "matlab_metric", "path": "utils/matlab_metric.py", "snippet": "def rgb2ycbcr(img, only_y=True):\ndef calc_metrics(img1, img2, crop_border, test_Y=True, norm=False, mask=None):\ndef calc_metrics_y(img1, img2, crop_border, test_Y=True):\ndef calc_psnr(img1, img2, mask=None):\ndef ssim(img1...
import os import cv2 import lpips import torch import numpy as np import torch.nn.functional as F import torch.utils.data as data import matplotlib.pyplot as plt from tqdm import tqdm from utils import matlab_metric, metrics from dataloaders import * from model import STSSNet
1,184
def ImgWrite(mPath,prefix,idx,img): cv2.imwrite(os.path.join(mPath,prefix+"."+str(idx).zfill(4)+".png"),img) @torch.no_grad() def save_res(dataLoaderIns, model, modelPath, save_dir, save_img=True, mode='all'): if not os.path.exists(save_dir): os.makedirs(save_dir) if modelPath.endswith(".tar"): model_CKPT = torch.load(modelPath, map_location="cuda:0")["state_dict"] elif modelPath.endswith(".ckpt"): model_CKPT = {k[6:]:v for k,v in torch.load(modelPath, map_location="cuda:0")["state_dict"].items() if 'vgg' not in k} else: model_CKPT = torch.load(modelPath, map_location="cuda:0") model.load_state_dict(model_CKPT) model = model.to("cuda:0") model.eval() all_PSNR_SF = [] all_ssim_SF = [] all_lpips_SF = [] all_PSNR_IF = [] all_ssim_IF = [] all_lpips_IF = [] loss_fn_alex = lpips.LPIPS(net='alex').cuda() print('saving to ',save_dir) f = open(os.path.join(save_dir, 'metrics.csv'), 'w') print('frame,psnr,ssim,lpips', file=f) for index, (input,features,mask,hisBuffer,label) in tqdm(dataLoaderIns): index = index[0].item() input=input.cuda() hisBuffer=hisBuffer.cuda() mask=mask.cuda() features=features.cuda() label=label.cuda() B,C,H,W = input.size() input = F.pad(input,(0,0,0,4),'replicate') mask = F.pad(mask,(0,0,0,4),'replicate') features = F.pad(features,(0,0,0,4),'replicate') hisBuffer = F.pad(hisBuffer.reshape(B,-1,H,W),(0,0,0,4),'replicate').reshape(B,3,4,H+4,W) res=model(input, features, mask, hisBuffer) res = res[:,:,:-8] ## mask if mode == 'edge': gray = cv2.cvtColor((label[0].permute(1,2,0).detach().cpu().numpy() * 255).astype(np.uint8), cv2.COLOR_RGB2GRAY) mask = cv2.Canny(gray, 100, 200) elif mode == 'hole': mask = 1 - mask[:, :, :-4] mask = F.interpolate(mask, scale_factor=2, mode='bilinear').squeeze().cpu().numpy() else: mask = None ## calculate metrics
def ImgWrite(mPath,prefix,idx,img): cv2.imwrite(os.path.join(mPath,prefix+"."+str(idx).zfill(4)+".png"),img) @torch.no_grad() def save_res(dataLoaderIns, model, modelPath, save_dir, save_img=True, mode='all'): if not os.path.exists(save_dir): os.makedirs(save_dir) if modelPath.endswith(".tar"): model_CKPT = torch.load(modelPath, map_location="cuda:0")["state_dict"] elif modelPath.endswith(".ckpt"): model_CKPT = {k[6:]:v for k,v in torch.load(modelPath, map_location="cuda:0")["state_dict"].items() if 'vgg' not in k} else: model_CKPT = torch.load(modelPath, map_location="cuda:0") model.load_state_dict(model_CKPT) model = model.to("cuda:0") model.eval() all_PSNR_SF = [] all_ssim_SF = [] all_lpips_SF = [] all_PSNR_IF = [] all_ssim_IF = [] all_lpips_IF = [] loss_fn_alex = lpips.LPIPS(net='alex').cuda() print('saving to ',save_dir) f = open(os.path.join(save_dir, 'metrics.csv'), 'w') print('frame,psnr,ssim,lpips', file=f) for index, (input,features,mask,hisBuffer,label) in tqdm(dataLoaderIns): index = index[0].item() input=input.cuda() hisBuffer=hisBuffer.cuda() mask=mask.cuda() features=features.cuda() label=label.cuda() B,C,H,W = input.size() input = F.pad(input,(0,0,0,4),'replicate') mask = F.pad(mask,(0,0,0,4),'replicate') features = F.pad(features,(0,0,0,4),'replicate') hisBuffer = F.pad(hisBuffer.reshape(B,-1,H,W),(0,0,0,4),'replicate').reshape(B,3,4,H+4,W) res=model(input, features, mask, hisBuffer) res = res[:,:,:-8] ## mask if mode == 'edge': gray = cv2.cvtColor((label[0].permute(1,2,0).detach().cpu().numpy() * 255).astype(np.uint8), cv2.COLOR_RGB2GRAY) mask = cv2.Canny(gray, 100, 200) elif mode == 'hole': mask = 1 - mask[:, :, :-4] mask = F.interpolate(mask, scale_factor=2, mode='bilinear').squeeze().cpu().numpy() else: mask = None ## calculate metrics
psnr, ssim = matlab_metric.calc_metrics(res[0].permute(1,2,0).detach().cpu().numpy(), label[0].permute(1,2,0).detach().cpu().numpy(), 0, norm=True, mask=mask)
0
2023-12-10 02:02:37+00:00
2k
Seunggu0305/VLCounter
tools/models/ViT_Encoder_add.py
[ { "identifier": "LayerNorm", "path": "tools/models/Encoder_utils.py", "snippet": "class LayerNorm(nn.LayerNorm):\n \"\"\"Subclass torch's LayerNorm to handle fp16.\"\"\"\n\n def forward(self, x: torch.Tensor):\n orig_type = x.dtype\n ret = super().forward(x.type(torch.float32))\n ...
import torch import torch.nn.functional as F import math from torch.nn import Dropout from torch import nn from functools import reduce from operator import mul from .Encoder_utils import LayerNorm, Transformer, Attention
1,124
class SPTCLIPVisionTransformer(nn.Module): def __init__(self, input_resolution=384, patch_size=16, width=768, layers=12, heads=12, output_dim=512, drop_path_rate=0.1, out_indices=[5,6,7,8,11], pretrained=None, get_embeddings=True, num_tokens=10, prompt_dim=768, total_d_layer=11, **kwargs): super().__init__() self.pretrained = pretrained self.input_resolution = input_resolution self.output_dim = output_dim self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) scale = width ** -0.5 self.class_embedding = nn.Parameter(scale * torch.randn(width)) self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) self.spatial_size = input_resolution // patch_size
class SPTCLIPVisionTransformer(nn.Module): def __init__(self, input_resolution=384, patch_size=16, width=768, layers=12, heads=12, output_dim=512, drop_path_rate=0.1, out_indices=[5,6,7,8,11], pretrained=None, get_embeddings=True, num_tokens=10, prompt_dim=768, total_d_layer=11, **kwargs): super().__init__() self.pretrained = pretrained self.input_resolution = input_resolution self.output_dim = output_dim self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) scale = width ** -0.5 self.class_embedding = nn.Parameter(scale * torch.randn(width)) self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) self.spatial_size = input_resolution // patch_size
self.ln_pre = LayerNorm(width)
0
2023-12-13 08:00:28+00:00
2k
qitan/devops-backend-lite
apps/workflow/serializers.py
[ { "identifier": "Product", "path": "dbapp/models.py", "snippet": "" }, { "identifier": "RecursiveField", "path": "common/recursive.py", "snippet": "class RecursiveField(Field):\n \"\"\"\n A field that gets its representation from its parent.\n\n This method could be used to seri...
from rest_framework import serializers from dbapp.models import Product, Project from common.recursive import RecursiveField from dbapp.models import UserProfile from dbapp.models import WorkflowCategory, Workflow, WorkflowNodeHistory, WorkflowTemplate, \ WorkflowTemplateRevisionHistory, WorkflowNodeHistoryCallback from common.extends.serializers import ModelSerializer from django.conf import settings import logging
1,600
""" @Author : Ken Chen @Contact : 316084217@qq.com @Time : 2021/11/2 上午9:50 """ logger = logging.getLogger(__name__) class WorkflowTemplateSerializer(ModelSerializer): projects_info = serializers.SerializerMethodField() env_info = serializers.SerializerMethodField() def get_env_info(self, instance): if instance.environment: return {'name': instance.environment.name, 'alias': instance.environment.alias} return {} def get_projects_info(self, instance): data = [] product_ids = {} for i in instance.projects: if i[0] not in product_ids: product_ids[i[0]] = [] product_ids[i[0]].append(i[1]) for k, v in product_ids.items(): product = Product.objects.get(id=k) _projects = Project.objects.filter(id__in=v) data.append({'value': product.id, 'name': product.name, 'label': product.alias, 'children': [{'value': i.id, 'name': i.name, 'label': i.alias} for i in _projects]}) return data class Meta: model = WorkflowTemplate fields = '__all__' class WorkflowTemplateForRetrieveSerializer(ModelSerializer): class Meta: model = WorkflowTemplate fields = '__all__' class WorkflowRevisionTemplateSerializer(ModelSerializer): class Meta:
""" @Author : Ken Chen @Contact : 316084217@qq.com @Time : 2021/11/2 上午9:50 """ logger = logging.getLogger(__name__) class WorkflowTemplateSerializer(ModelSerializer): projects_info = serializers.SerializerMethodField() env_info = serializers.SerializerMethodField() def get_env_info(self, instance): if instance.environment: return {'name': instance.environment.name, 'alias': instance.environment.alias} return {} def get_projects_info(self, instance): data = [] product_ids = {} for i in instance.projects: if i[0] not in product_ids: product_ids[i[0]] = [] product_ids[i[0]].append(i[1]) for k, v in product_ids.items(): product = Product.objects.get(id=k) _projects = Project.objects.filter(id__in=v) data.append({'value': product.id, 'name': product.name, 'label': product.alias, 'children': [{'value': i.id, 'name': i.name, 'label': i.alias} for i in _projects]}) return data class Meta: model = WorkflowTemplate fields = '__all__' class WorkflowTemplateForRetrieveSerializer(ModelSerializer): class Meta: model = WorkflowTemplate fields = '__all__' class WorkflowRevisionTemplateSerializer(ModelSerializer): class Meta:
model = WorkflowTemplateRevisionHistory
3
2023-12-13 03:09:32+00:00
2k
timo-reymann/python-oauth2-cli-auth
oauth2_cli_auth/simplified_flow.py
[ { "identifier": "OAuthCallbackHttpServer", "path": "oauth2_cli_auth/http_server.py", "snippet": "class OAuthCallbackHttpServer(HTTPServer):\n \"\"\"\n Simplistic HTTP Server to provide local callback URL for oauth2 provider\n \"\"\"\n\n def __init__(self, port):\n super().__init__((\"...
from oauth2_cli_auth import OAuthCallbackHttpServer, get_auth_url, exchange_code_for_access_token, OAuth2ClientInfo, \ open_browser
1,227
def get_access_token_with_browser_open(client_info: OAuth2ClientInfo, server_port: int = 8080) -> str: """ Provides a simplified API to: - Spin up the callback server - Open the browser with the authorization URL - Wait for the code to arrive - Get access token from code :param client_info: Client Info for Oauth2 Interaction :param server_port: Port of the local web server to spin up :return: Access Token """ callback_server = OAuthCallbackHttpServer(server_port) auth_url = get_auth_url(client_info, callback_server.callback_url)
def get_access_token_with_browser_open(client_info: OAuth2ClientInfo, server_port: int = 8080) -> str: """ Provides a simplified API to: - Spin up the callback server - Open the browser with the authorization URL - Wait for the code to arrive - Get access token from code :param client_info: Client Info for Oauth2 Interaction :param server_port: Port of the local web server to spin up :return: Access Token """ callback_server = OAuthCallbackHttpServer(server_port) auth_url = get_auth_url(client_info, callback_server.callback_url)
open_browser(auth_url)
4
2023-12-09 12:14:33+00:00
2k
solanav/phishflood
phishflood/__main__.py
[ { "identifier": "extract_inputs", "path": "credfind/utils.py", "snippet": "def extract_inputs(html: str) -> InputList:\n \"\"\"Given an HTML page, returns a list of inputs or None if nothing was found\"\"\"\n soup = BeautifulSoup(html, \"html.parser\")\n\n print(\"Finding all forms in the page\...
import json import os import sys import time import requests from hashlib import sha256 from typing import Any, Dict, List, Optional, Tuple from credfind.utils import extract_inputs from credfind.objects import Input, InputList, InputType from playwright.sync_api import sync_playwright, TimeoutError, Page from credgen.utils import creds_from_input from phishflood.rabbit import RabbitConsumer from config import general_conf from pprint import pprint; pprint(forms)
1,591
SCREENSHOT_I = 0 Actions = List[Dict[str, Any]] def screenshot(page: Page): global SCREENSHOT_I SCREENSHOT_I += 1 page.screenshot(path=f"samples/{SCREENSHOT_I}.png") def hash_inputs(inputs: List[Input]) -> str: """Returns a unique string identifying the inputs in the website""" return sha256("".join([str(i) for i in inputs]).encode()).hexdigest() def flood_page( page: Page, last_hash: str = "", page_num: int = 0 ) -> Optional[Tuple[str, InputList, Actions]]: """Returns a unique string identifying the inputs in the website""" # Get a first screenshot page.wait_for_timeout(3000) screenshot(page) # Get html and extract the inputs try: html = page.content() except: return None res = extract_inputs(html) if len(res) > 0: fi, form, inputs = res[0] else: print("No inputs found") return None # Calculate the hash of the inputs input_hash = hash_inputs(inputs) print(f"Input hash: {input_hash}") if input_hash == last_hash: print("Already flooded this page") return None form_locator = page.locator(f"form >> nth = {form.meta_id}") actions = [] # Generate the fake credentials for each form and each input for inp in inputs: FILLABLE_INPUTS = [ InputType.TEXT, InputType.EMAIL, InputType.PASSWORD, InputType.NUMBER, InputType.TEL, InputType.SEARCH, InputType.URL, ] if inp.type_ in FILLABLE_INPUTS:
SCREENSHOT_I = 0 Actions = List[Dict[str, Any]] def screenshot(page: Page): global SCREENSHOT_I SCREENSHOT_I += 1 page.screenshot(path=f"samples/{SCREENSHOT_I}.png") def hash_inputs(inputs: List[Input]) -> str: """Returns a unique string identifying the inputs in the website""" return sha256("".join([str(i) for i in inputs]).encode()).hexdigest() def flood_page( page: Page, last_hash: str = "", page_num: int = 0 ) -> Optional[Tuple[str, InputList, Actions]]: """Returns a unique string identifying the inputs in the website""" # Get a first screenshot page.wait_for_timeout(3000) screenshot(page) # Get html and extract the inputs try: html = page.content() except: return None res = extract_inputs(html) if len(res) > 0: fi, form, inputs = res[0] else: print("No inputs found") return None # Calculate the hash of the inputs input_hash = hash_inputs(inputs) print(f"Input hash: {input_hash}") if input_hash == last_hash: print("Already flooded this page") return None form_locator = page.locator(f"form >> nth = {form.meta_id}") actions = [] # Generate the fake credentials for each form and each input for inp in inputs: FILLABLE_INPUTS = [ InputType.TEXT, InputType.EMAIL, InputType.PASSWORD, InputType.NUMBER, InputType.TEL, InputType.SEARCH, InputType.URL, ] if inp.type_ in FILLABLE_INPUTS:
text = creds_from_input(inp)
2
2023-12-11 16:38:36+00:00
2k
abing7k/redroid-script
stuffs/ndk.py
[ { "identifier": "General", "path": "stuffs/general.py", "snippet": "class General:\n def download(self):\n loc_md5 = \"\"\n if os.path.isfile(self.dl_file_name):\n with open(self.dl_file_name,\"rb\") as f:\n bytes = f.read()\n loc_md5 = hashlib.m...
import os import shutil from stuffs.general import General from tools.helper import bcolors, get_download_dir, print_color, run
845
class Ndk(General): download_loc = get_download_dir() copy_dir = "./ndk" dl_link = "https://github.com/supremegamers/vendor_google_proprietary_ndk_translation-prebuilt/archive/181d9290a69309511185c4417ba3d890b3caaaa8.zip" dl_file_name = os.path.join(download_loc, "libndktranslation.zip") extract_to = "/tmp/libndkunpack" act_md5 = "0beff55f312492f24d539569d84f5bfb" # init_rc_component = """ # # Enable native bridge for target executables # on early-init # mount binfmt_misc binfmt_misc /proc/sys/fs/binfmt_misc # on property:ro.enable.native.bridge.exec=1 # copy /system/etc/binfmt_misc/arm_exe /proc/sys/fs/binfmt_misc/register # copy /system/etc/binfmt_misc/arm_dyn /proc/sys/fs/binfmt_misc/register # copy /system/etc/binfmt_misc/arm64_exe /proc/sys/fs/binfmt_misc/register # copy /system/etc/binfmt_misc/arm64_dyn /proc/sys/fs/binfmt_misc/register # """ def download(self):
class Ndk(General): download_loc = get_download_dir() copy_dir = "./ndk" dl_link = "https://github.com/supremegamers/vendor_google_proprietary_ndk_translation-prebuilt/archive/181d9290a69309511185c4417ba3d890b3caaaa8.zip" dl_file_name = os.path.join(download_loc, "libndktranslation.zip") extract_to = "/tmp/libndkunpack" act_md5 = "0beff55f312492f24d539569d84f5bfb" # init_rc_component = """ # # Enable native bridge for target executables # on early-init # mount binfmt_misc binfmt_misc /proc/sys/fs/binfmt_misc # on property:ro.enable.native.bridge.exec=1 # copy /system/etc/binfmt_misc/arm_exe /proc/sys/fs/binfmt_misc/register # copy /system/etc/binfmt_misc/arm_dyn /proc/sys/fs/binfmt_misc/register # copy /system/etc/binfmt_misc/arm64_exe /proc/sys/fs/binfmt_misc/register # copy /system/etc/binfmt_misc/arm64_dyn /proc/sys/fs/binfmt_misc/register # """ def download(self):
print_color("Downloading libndk now .....", bcolors.GREEN)
3
2023-12-06 09:03:05+00:00
2k
zvict/papr
dataset/dataset.py
[ { "identifier": "load_meta_data", "path": "dataset/utils.py", "snippet": "def load_meta_data(args, mode=\"train\"):\n \"\"\"\n 0 -----------> W\n |\n |\n |\n ⬇\n H\n [H, W, 4]\n \"\"\"\n image_paths = None\n\n if args.type == \"synthetic\":\n images, poses, hwf, i...
import torch import numpy as np import imageio from torch.utils.data import Dataset from PIL import Image from .utils import load_meta_data, get_rays, extract_patches
1,572
class RINDataset(Dataset): """ Ray Image Normal Dataset """ def __init__(self, args, mode='train'): self.args = args
class RINDataset(Dataset): """ Ray Image Normal Dataset """ def __init__(self, args, mode='train'): self.args = args
images, c2w, H, W, focal_x, focal_y, image_paths = load_meta_data(
0
2023-12-08 19:51:42+00:00
2k
rinnakk/nue-asr
nue_asr/cli.py
[ { "identifier": "transcribe", "path": "nue_asr/transcribe.py", "snippet": "@torch.inference_mode()\ndef transcribe(\n model: NueASRModel,\n tokenizer: PreTrainedTokenizer,\n audio: Union[str, np.ndarray, torch.Tensor],\n **decode_options,\n) -> ASRResult:\n device = model.device\n sr =...
import argparse import os import torch from .transcribe import transcribe from .utils import load_model, load_tokenizer, set_seed, str2bool
1,542
#!/usr/bin/env python3 # Copyright 2023 rinna Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def cli_main(): default_device = "cuda" if torch.cuda.is_available() else "cpu" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument( "audio_files", nargs="+", type=str, help="Audio file paths", ) parser.add_argument( "--model", type=str, default=None, help="Model name or path", ) parser.add_argument( "--device", type=str, default=default_device, help="Device to use for inference.", ) parser.add_argument(
#!/usr/bin/env python3 # Copyright 2023 rinna Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def cli_main(): default_device = "cuda" if torch.cuda.is_available() else "cpu" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument( "audio_files", nargs="+", type=str, help="Audio file paths", ) parser.add_argument( "--model", type=str, default=None, help="Model name or path", ) parser.add_argument( "--device", type=str, default=default_device, help="Device to use for inference.", ) parser.add_argument(
"--fp16", type=str2bool, default=True, help="Whether to fp16 inference."
4
2023-12-07 01:37:23+00:00
2k
AdaCheng/EgoThink
models/instruct_blip/processors/blip_processors.py
[ { "identifier": "registry", "path": "models/instruct_blip/common/registry.py", "snippet": "class Registry:\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_lr_scheduler(cls, name):\n def w...
import re from ..common.registry import registry from .base_processor import BaseProcessor from .randaugment import RandomAugment from omegaconf import OmegaConf from torchvision import transforms from torchvision.transforms.functional import InterpolationMode
805
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ class BlipImageBaseProcessor(BaseProcessor): def __init__(self, mean=None, std=None): if mean is None: mean = (0.48145466, 0.4578275, 0.40821073) if std is None: std = (0.26862954, 0.26130258, 0.27577711) self.normalize = transforms.Normalize(mean, std)
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ class BlipImageBaseProcessor(BaseProcessor): def __init__(self, mean=None, std=None): if mean is None: mean = (0.48145466, 0.4578275, 0.40821073) if std is None: std = (0.26862954, 0.26130258, 0.27577711) self.normalize = transforms.Normalize(mean, std)
@registry.register_processor("blip_caption")
0
2023-12-05 14:17:17+00:00
2k
TristanBilot/mlx-GCN
main_torch.py
[ { "identifier": "download_cora", "path": "datasets.py", "snippet": "def download_cora():\n \"\"\"Downloads the cora dataset into a local cora folder.\"\"\"\n\n url = \"https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz\"\n extract_to = \".\"\n\n if os.path.exists(os.path.join(extract_to, \"...
from argparse import ArgumentParser from time import time from datasets import download_cora, load_data, train_val_test_mask import torch import torch.nn as nn
1,238
class GCNLayer(nn.Module): def __init__(self, x_dim, h_dim, bias=True): super(GCNLayer, self).__init__() self.weight = nn.Parameter(torch.FloatTensor(torch.zeros(size=(x_dim, h_dim)))) if bias: self.bias = nn.Parameter(torch.FloatTensor(torch.zeros(size=(h_dim,)))) else: self.register_parameter('bias', None) self.initialize_weights() def initialize_weights(self): nn.init.xavier_uniform_(self.weight) if self.bias is not None: nn.init.zeros_(self.bias) def forward(self, x, adj): x = x @ self.weight if self.bias is not None: x += self.bias return torch.mm(adj, x) class GCN(nn.Module): def __init__(self, x_dim, h_dim, out_dim, nb_layers=2, dropout=0.5, bias=True): super(GCN, self).__init__() layer_sizes = [x_dim] + [h_dim] * nb_layers + [out_dim] self.gcn_layers = nn.Sequential(*[ GCNLayer(in_dim, out_dim, bias) for in_dim, out_dim in zip(layer_sizes[:-1], layer_sizes[1:]) ]) self.dropout = nn.Dropout(p=dropout) def initialize_weights(self): self.gcn_1.initialize_weights() self.gcn_2.initialize_weights() def forward(self, x, adj): for layer in self.gcn_layers[:-1]: x = torch.relu(layer(x, adj)) x = self.dropout(x) x = self.gcn_layers[-1](x, adj) return x def to_torch(device, x, y, adj, train_mask, val_mask, test_mask): x = torch.tensor(x, dtype=torch.float32, device=device) y = torch.tensor(y, dtype=torch.long, device=device) adj = torch.tensor(adj, dtype=torch.float32, device=device) train_mask = torch.tensor(train_mask, device=device) val_mask = torch.tensor(val_mask, device=device) test_mask = torch.tensor(test_mask, device=device) return x, y, adj, train_mask, val_mask, test_mask def eval_fn(x, y): return torch.mean((torch.argmax(x, axis=1) == y).float()) def main(args, device): # Data loading download_cora() x, y, adj = load_data(args)
class GCNLayer(nn.Module): def __init__(self, x_dim, h_dim, bias=True): super(GCNLayer, self).__init__() self.weight = nn.Parameter(torch.FloatTensor(torch.zeros(size=(x_dim, h_dim)))) if bias: self.bias = nn.Parameter(torch.FloatTensor(torch.zeros(size=(h_dim,)))) else: self.register_parameter('bias', None) self.initialize_weights() def initialize_weights(self): nn.init.xavier_uniform_(self.weight) if self.bias is not None: nn.init.zeros_(self.bias) def forward(self, x, adj): x = x @ self.weight if self.bias is not None: x += self.bias return torch.mm(adj, x) class GCN(nn.Module): def __init__(self, x_dim, h_dim, out_dim, nb_layers=2, dropout=0.5, bias=True): super(GCN, self).__init__() layer_sizes = [x_dim] + [h_dim] * nb_layers + [out_dim] self.gcn_layers = nn.Sequential(*[ GCNLayer(in_dim, out_dim, bias) for in_dim, out_dim in zip(layer_sizes[:-1], layer_sizes[1:]) ]) self.dropout = nn.Dropout(p=dropout) def initialize_weights(self): self.gcn_1.initialize_weights() self.gcn_2.initialize_weights() def forward(self, x, adj): for layer in self.gcn_layers[:-1]: x = torch.relu(layer(x, adj)) x = self.dropout(x) x = self.gcn_layers[-1](x, adj) return x def to_torch(device, x, y, adj, train_mask, val_mask, test_mask): x = torch.tensor(x, dtype=torch.float32, device=device) y = torch.tensor(y, dtype=torch.long, device=device) adj = torch.tensor(adj, dtype=torch.float32, device=device) train_mask = torch.tensor(train_mask, device=device) val_mask = torch.tensor(val_mask, device=device) test_mask = torch.tensor(test_mask, device=device) return x, y, adj, train_mask, val_mask, test_mask def eval_fn(x, y): return torch.mean((torch.argmax(x, axis=1) == y).float()) def main(args, device): # Data loading download_cora() x, y, adj = load_data(args)
train_mask, val_mask, test_mask = train_val_test_mask(y, args.nb_classes)
2
2023-12-11 09:40:09+00:00
2k
3dlg-hcvc/cage
models/denoiser.py
[ { "identifier": "FinalLayer", "path": "models/utils.py", "snippet": "class FinalLayer(nn.Module):\n def __init__(self, in_ch, out_ch=None, dropout=0.):\n super().__init__()\n out_ch = in_ch if out_ch is None else out_ch\n self.linear = nn.Linear(in_ch, out_ch)\n self.norm ...
import torch import models from torch import nn from models.utils import FinalLayer, PEmbeder, AAB
1,352
@models.register('denoiser') class AABModel(nn.Module): ''' Denoiser based on Attribute Attention Block (AAB) 3 sequential attentions: local -> global -> graph ''' def __init__(self, hparams): super(AABModel, self).__init__() self.hparams = hparams in_ch = hparams.in_ch attn_dim = hparams.attn_dim dropout = hparams.dropout n_head = hparams.n_head head_dim = attn_dim // n_head num_embeds_ada_norm = 6*attn_dim self.K = self.hparams.get('K', 32) self.x_embedding = nn.Linear(in_ch, attn_dim) self.pe_node = PEmbeder(self.K, attn_dim) self.pe_attr = PEmbeder(5, attn_dim) self.attn_layers = nn.ModuleList( [ # to do: refactor this block, customize the eps of layernorm if train with fp16 AAB(dim=attn_dim, num_attention_heads=n_head, attention_head_dim=head_dim, dropout=dropout, activation_fn="geglu", num_embeds_ada_norm=num_embeds_ada_norm, attention_bias=False, norm_elementwise_affine=True, final_dropout=False, ) for d in range(hparams.n_layers) ] )
@models.register('denoiser') class AABModel(nn.Module): ''' Denoiser based on Attribute Attention Block (AAB) 3 sequential attentions: local -> global -> graph ''' def __init__(self, hparams): super(AABModel, self).__init__() self.hparams = hparams in_ch = hparams.in_ch attn_dim = hparams.attn_dim dropout = hparams.dropout n_head = hparams.n_head head_dim = attn_dim // n_head num_embeds_ada_norm = 6*attn_dim self.K = self.hparams.get('K', 32) self.x_embedding = nn.Linear(in_ch, attn_dim) self.pe_node = PEmbeder(self.K, attn_dim) self.pe_attr = PEmbeder(5, attn_dim) self.attn_layers = nn.ModuleList( [ # to do: refactor this block, customize the eps of layernorm if train with fp16 AAB(dim=attn_dim, num_attention_heads=n_head, attention_head_dim=head_dim, dropout=dropout, activation_fn="geglu", num_embeds_ada_norm=num_embeds_ada_norm, attention_bias=False, norm_elementwise_affine=True, final_dropout=False, ) for d in range(hparams.n_layers) ] )
self.final_layer = FinalLayer(attn_dim, in_ch, dropout=dropout)
0
2023-12-06 23:08:41+00:00
2k
modelscope/llmuses
llmuses/benchmarks/data_adapter.py
[ { "identifier": "Benchmark", "path": "llmuses/benchmarks/benchmark.py", "snippet": "class Benchmark(object):\n \"\"\"\n Wrapper for loading datasets from ModelScope or HuggingFace.\n \"\"\"\n\n def __init__(self):\n ...\n\n @staticmethod\n def load(dataset_name: str,\n ...
from abc import ABC, abstractmethod from typing import Any, Optional from llmuses.benchmarks import Benchmark from llmuses.constants import DEFAULT_ROOT_CACHE_DIR, AnswerKeys from llmuses.utils.logger import get_logger import random
1,377
# Copyright (c) Alibaba, Inc. and its affiliates. logger = get_logger() class DataAdapter(ABC): def __init__(self, subset_list: list, metric_list: list, few_shot_num: Optional[int] = 0, train_split: Optional[str] = None, eval_split: Optional[str] = None, **kwargs): """ Args: subset_list: list of subset names for the dataset. metric_list: list, the metric list to evaluate the model on specific benchmark. few_shot_num: int, number of few-shot examples. Default: 0 train_split: str, usually for few-shot examples. e.g. 'train' eval_split: str, the target eval split name. e.g. 'test' """ self.subset_list = subset_list self.metric_list = metric_list self.few_shot_num = few_shot_num self.train_split = train_split self.eval_split = eval_split def load(self, dataset_name_or_path: str, subset_list: list = None, work_dir: Optional[str] = DEFAULT_ROOT_CACHE_DIR, **kwargs) -> dict: """ Load the dataset. Remote and local datasets are supported. You can rewrite this method to support your own local dataset, just follow the format of the output. Returns: {'subset_name': {'train': train_dataset, 'test': test_dataset}} train_dataset, test_dataset: Iterable dataset, object each item of which is a dict. TODO: local data path to be supported. """ data_dict = {} split_list = [split for split in [self.train_split, self.eval_split] if split is not None] if len(split_list) == 0: logger.error(f'Got empty split list: {split_list}') subset_list = subset_list if subset_list is not None else self.subset_list for sub_name in subset_list: data_dict[sub_name] = {} # e.g. train: few-shot, test: target dataset to evaluate for split in split_list:
# Copyright (c) Alibaba, Inc. and its affiliates. logger = get_logger() class DataAdapter(ABC): def __init__(self, subset_list: list, metric_list: list, few_shot_num: Optional[int] = 0, train_split: Optional[str] = None, eval_split: Optional[str] = None, **kwargs): """ Args: subset_list: list of subset names for the dataset. metric_list: list, the metric list to evaluate the model on specific benchmark. few_shot_num: int, number of few-shot examples. Default: 0 train_split: str, usually for few-shot examples. e.g. 'train' eval_split: str, the target eval split name. e.g. 'test' """ self.subset_list = subset_list self.metric_list = metric_list self.few_shot_num = few_shot_num self.train_split = train_split self.eval_split = eval_split def load(self, dataset_name_or_path: str, subset_list: list = None, work_dir: Optional[str] = DEFAULT_ROOT_CACHE_DIR, **kwargs) -> dict: """ Load the dataset. Remote and local datasets are supported. You can rewrite this method to support your own local dataset, just follow the format of the output. Returns: {'subset_name': {'train': train_dataset, 'test': test_dataset}} train_dataset, test_dataset: Iterable dataset, object each item of which is a dict. TODO: local data path to be supported. """ data_dict = {} split_list = [split for split in [self.train_split, self.eval_split] if split is not None] if len(split_list) == 0: logger.error(f'Got empty split list: {split_list}') subset_list = subset_list if subset_list is not None else self.subset_list for sub_name in subset_list: data_dict[sub_name] = {} # e.g. train: few-shot, test: target dataset to evaluate for split in split_list:
dataset = Benchmark.load(dataset_name=dataset_name_or_path,
0
2023-12-07 06:10:49+00:00
2k