repo_name stringlengths 7 71 | file_path stringlengths 5 118 | context list | import_statement stringlengths 45 12.5k | token_num int64 641 99.4k | cropped_code stringlengths 44 17k | all_code stringlengths 43 754k | next_line stringlengths 2 330 | gold_snippet_index int64 0 68 | created_at stringlengths 25 25 | level stringclasses 9 values |
|---|---|---|---|---|---|---|---|---|---|---|
f0uriest/quadax | quadax/adaptive.py | [
{
"identifier": "fixed_quadcc",
"path": "quadax/fixed_order.py",
"snippet": "@functools.partial(jax.jit, static_argnums=(0, 4, 5))\ndef fixed_quadcc(fun, a, b, args=(), norm=jnp.inf, n=32):\n \"\"\"Integrate a function from a to b using a fixed order Clenshaw-Curtis rule.\n\n Integration is perfor... | import jax
import jax.numpy as jnp
from .fixed_order import fixed_quadcc, fixed_quadgk, fixed_quadts
from .utils import QuadratureInfo, bounded_while_loop, errorif, map_interval, wrap_func | 6,991 | * 'e_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the moduli of the absolute error estimates on the
sub-intervals.
Notes
-----
Adaptive algorithms are inherently somewhat sequential, so perfect parallelism
is generally not achievable. The local quadrature rule vmaps integrand evaluation at
``order`` points, so using higher order methods will generally be more efficient on
GPU/TPU.
"""
y, info = adaptive_quadrature(
fixed_quadts,
fun,
interval,
args,
full_output,
epsabs,
epsrel,
max_ninter,
n=order,
norm=norm,
)
info = QuadratureInfo(info.err, info.neval * order, info.status, info.info)
return y, info
def adaptive_quadrature(
rule,
fun,
interval,
args=(),
full_output=False,
epsabs=1.4e-8,
epsrel=1.4e-8,
max_ninter=50,
norm=jnp.inf,
**kwargs,
):
"""Global adaptive quadrature.
This is a lower level routine allowing for custom local quadrature rules. For most
applications the higher order methods ``quadgk``, ``quadcc``, ``quadts`` are
preferable.
Parameters
----------
rule : callable
Local quadrature rule to use. It should have a signature of the form
``rule(fun, a, b, **kwargs)`` -> out, where out is a tuple with 4 elements:
#. Estimate of the integral of fun from a to b
#. Estimate of the absolute error in the integral (ie, from nested scheme).
#. Estimate of the integral of abs(fun) from a to b
#. Estimate of the integral of abs(fun - <fun>) from a to b, where <fun> is
the mean value of fun over the interval.
fun : callable
Function to integrate, should have a signature of the form
``fun(x, *args)`` -> float, Array. Should be JAX transformable.
interval : array-like
Lower and upper limits of integration with possible breakpoints. Use np.inf to
denote infinite intervals.
args : tuple, optional
Extra arguments passed to fun.
full_output : bool, optional
If True, return the full state of the integrator. See below for more
information.
epsabs, epsrel : float, optional
Absolute and relative error tolerance. Default is 1.4e-8. Algorithm tries to
obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = integral of `fun` over `interval`, and ``result`` is the
numerical approximation.
max_ninter : int, optional
An upper bound on the number of sub-intervals used in the adaptive
algorithm.
norm : int, callable
Norm to use for measuring error for vector valued integrands. No effect if the
integrand is scalar valued. If an int, uses p-norm of the given order, otherwise
should be callable.
kwargs : dict
Additional keyword arguments passed to ``rule``.
Returns
-------
y : float, Array
The integral of fun from `a` to `b`.
info : QuadratureInfo
Named tuple with the following fields:
* err : (float) Estimate of the error in the approximation.
* neval : (int) Total number of rule evaluations.
* status : (int) Flag indicating reason for termination. status of 0 means
normal termination, any other value indicates a possible error. A human
readable message can be obtained by ``print(quadax.STATUS[status])``
* info : (dict or None) Other information returned by the algorithm.
Only present if ``full_output`` is True. Contains the following:
* 'ninter' : (int) The number, K, of sub-intervals produced in the
subdivision process.
* 'a_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the left end points of the (remapped) sub-intervals
in the partition of the integration range.
* 'b_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the right end points of the (remapped) sub-intervals.
* 'r_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the integral approximations on the sub-intervals.
* 'e_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the moduli of the absolute error estimates on the
sub-intervals.
"""
errorif(
max_ninter < len(interval) - 1,
ValueError,
f"max_ninter={max_ninter} is not enough for {len(interval)-1} breakpoints",
)
_norm = norm if callable(norm) else lambda x: jnp.linalg.norm(x.flatten(), ord=norm)
fun, interval = map_interval(fun, interval)
| """Functions for globally h-adaptive quadrature."""
NORMAL_EXIT = 0
MAX_NINTER = 1
ROUNDOFF = 2
BAD_INTEGRAND = 3
NO_CONVERGE = 4
DIVERGENT = 5
def quadgk(
fun,
interval,
args=(),
full_output=False,
epsabs=1.4e-8,
epsrel=1.4e-8,
max_ninter=50,
order=21,
norm=jnp.inf,
):
"""Global adaptive quadrature using Gauss-Konrod rule.
Integrate fun from `interval[0]` to `interval[-1]` using a h-adaptive scheme with
error estimate. Breakpoints can be specified in `interval` where integration
difficulty may occur.
Basically the same as ``scipy.integrate.quad`` but without extrapolation. A good
general purpose integrator for most reasonably well behaved functions over finite
or infinite intervals.
Parameters
----------
fun : callable
Function to integrate, should have a signature of the form
``fun(x, *args)`` -> float, Array. Should be JAX transformable.
interval : array-like
Lower and upper limits of integration with possible breakpoints. Use np.inf to
denote infinite intervals.
args : tuple, optional
Extra arguments passed to fun.
full_output : bool, optional
If True, return the full state of the integrator. See below for more
information.
epsabs, epsrel : float, optional
Absolute and relative error tolerance. Default is 1.4e-8. Algorithm tries to
obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = integral of `fun` over `interval`, and ``result`` is the
numerical approximation.
max_ninter : int, optional
An upper bound on the number of sub-intervals used in the adaptive
algorithm.
order : {15, 21, 31, 41, 51, 61}
Order of local integration rule.
norm : int, callable
Norm to use for measuring error for vector valued integrands. No effect if the
integrand is scalar valued. If an int, uses p-norm of the given order, otherwise
should be callable.
Returns
-------
y : float, Array
The integral of fun from `a` to `b`.
info : QuadratureInfo
Named tuple with the following fields:
* err : (float) Estimate of the error in the approximation.
* neval : (int) Total number of function evaluations.
* status : (int) Flag indicating reason for termination. status of 0 means
normal termination, any other value indicates a possible error. A human
readable message can be obtained by ``print(quadax.STATUS[status])``
* info : (dict or None) Other information returned by the algorithm.
Only present if ``full_output`` is True. Contains the following:
* 'ninter' : (int) The number, K, of sub-intervals produced in the
subdivision process.
* 'a_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the left end points of the (remapped) sub-intervals
in the partition of the integration range.
* 'b_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the right end points of the (remapped) sub-intervals.
* 'r_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the integral approximations on the sub-intervals.
* 'e_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the moduli of the absolute error estimates on the
sub-intervals.
Notes
-----
Adaptive algorithms are inherently somewhat sequential, so perfect parallelism
is generally not achievable. The local quadrature rule vmaps integrand evaluation at
``order`` points, so using higher order methods will generally be more efficient on
GPU/TPU.
"""
y, info = adaptive_quadrature(
fixed_quadgk,
fun,
interval,
args,
full_output,
epsabs,
epsrel,
max_ninter,
n=order,
norm=norm,
)
info = QuadratureInfo(info.err, info.neval * order, info.status, info.info)
return y, info
def quadcc(
fun,
interval,
args=(),
full_output=False,
epsabs=1.4e-8,
epsrel=1.4e-8,
max_ninter=50,
order=32,
norm=jnp.inf,
):
"""Global adaptive quadrature using Clenshaw-Curtis rule.
Integrate fun from `interval[0]` to `interval[-1]` using a h-adaptive scheme with
error estimate. Breakpoints can be specified in `interval` where integration
difficulty may occur.
A good general purpose integrator for most reasonably well behaved functions over
finite or infinite intervals.
Parameters
----------
fun : callable
Function to integrate, should have a signature of the form
``fun(x, *args)`` -> float, Array. Should be JAX transformable.
interval : array-like
Lower and upper limits of integration with possible breakpoints. Use np.inf to
denote infinite intervals.
args : tuple, optional
Extra arguments passed to fun.
full_output : bool, optional
If True, return the full state of the integrator. See below for more
information.
epsabs, epsrel : float, optional
Absolute and relative error tolerance. Default is 1.4e-8. Algorithm tries to
obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = integral of `fun` over `interval`, and ``result`` is the
numerical approximation.
max_ninter : int, optional
An upper bound on the number of sub-intervals used in the adaptive
algorithm.
order : {8, 16, 32, 64, 128, 256}
Order of local integration rule.
norm : int, callable
Norm to use for measuring error for vector valued integrands. No effect if the
integrand is scalar valued. If an int, uses p-norm of the given order, otherwise
should be callable.
Returns
-------
y : float, Array
The integral of fun from `a` to `b`.
info : QuadratureInfo
Named tuple with the following fields:
* err : (float) Estimate of the error in the approximation.
* neval : (int) Total number of function evaluations.
* status : (int) Flag indicating reason for termination. status of 0 means
normal termination, any other value indicates a possible error. A human
readable message can be obtained by ``print(quadax.STATUS[status])``
* info : (dict or None) Other information returned by the algorithm.
Only present if ``full_output`` is True. Contains the following:
* 'ninter' : (int) The number, K, of sub-intervals produced in the
subdivision process.
* 'a_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the left end points of the (remapped) sub-intervals
in the partition of the integration range.
* 'b_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the right end points of the (remapped) sub-intervals.
* 'r_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the integral approximations on the sub-intervals.
* 'e_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the moduli of the absolute error estimates on the
sub-intervals.
Notes
-----
Adaptive algorithms are inherently somewhat sequential, so perfect parallelism
is generally not achievable. The local quadrature rule vmaps integrand evaluation at
``order`` points, so using higher order methods will generally be more efficient on
GPU/TPU.
"""
y, info = adaptive_quadrature(
fixed_quadcc,
fun,
interval,
args,
full_output,
epsabs,
epsrel,
max_ninter,
n=order,
norm=norm,
)
info = QuadratureInfo(info.err, info.neval * order, info.status, info.info)
return y, info
def quadts(
fun,
interval,
args=(),
full_output=False,
epsabs=1.4e-8,
epsrel=1.4e-8,
max_ninter=50,
order=61,
norm=jnp.inf,
):
"""Global adaptive quadrature using trapezoidal tanh-sinh rule.
Integrate fun from `interval[0]` to `interval[-1]` using a h-adaptive scheme with
error estimate. Breakpoints can be specified in `interval` where integration
difficulty may occur.
Especially good for integrands with singular behavior at an endpoint.
Parameters
----------
fun : callable
Function to integrate, should have a signature of the form
``fun(x, *args)`` -> float, Array. Should be JAX transformable.
interval : array-like
Lower and upper limits of integration with possible breakpoints. Use np.inf to
denote infinite intervals.
args : tuple, optional
Extra arguments passed to fun.
full_output : bool, optional
If True, return the full state of the integrator. See below for more
information.
epsabs, epsrel : float, optional
Absolute and relative error tolerance. Default is 1.4e-8. Algorithm tries to
obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = integral of `fun` over `interval`, and ``result`` is the
numerical approximation.
max_ninter : int, optional
An upper bound on the number of sub-intervals used in the adaptive
algorithm.
order : {41, 61, 81, 101}
Order of local integration rule.
norm : int, callable
Norm to use for measuring error for vector valued integrands. No effect if the
integrand is scalar valued. If an int, uses p-norm of the given order, otherwise
should be callable.
Returns
-------
y : float, Array
The integral of fun from `a` to `b`.
info : QuadratureInfo
Named tuple with the following fields:
* err : (float) Estimate of the error in the approximation.
* neval : (int) Total number of function evaluations.
* status : (int) Flag indicating reason for termination. status of 0 means
normal termination, any other value indicates a possible error. A human
readable message can be obtained by ``print(quadax.STATUS[status])``
* info : (dict or None) Other information returned by the algorithm.
Only present if ``full_output`` is True. Contains the following:
* 'ninter' : (int) The number, K, of sub-intervals produced in the
subdivision process.
* 'a_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the left end points of the (remapped) sub-intervals
in the partition of the integration range.
* 'b_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the right end points of the (remapped) sub-intervals.
* 'r_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the integral approximations on the sub-intervals.
* 'e_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the moduli of the absolute error estimates on the
sub-intervals.
Notes
-----
Adaptive algorithms are inherently somewhat sequential, so perfect parallelism
is generally not achievable. The local quadrature rule vmaps integrand evaluation at
``order`` points, so using higher order methods will generally be more efficient on
GPU/TPU.
"""
y, info = adaptive_quadrature(
fixed_quadts,
fun,
interval,
args,
full_output,
epsabs,
epsrel,
max_ninter,
n=order,
norm=norm,
)
info = QuadratureInfo(info.err, info.neval * order, info.status, info.info)
return y, info
def adaptive_quadrature(
rule,
fun,
interval,
args=(),
full_output=False,
epsabs=1.4e-8,
epsrel=1.4e-8,
max_ninter=50,
norm=jnp.inf,
**kwargs,
):
"""Global adaptive quadrature.
This is a lower level routine allowing for custom local quadrature rules. For most
applications the higher order methods ``quadgk``, ``quadcc``, ``quadts`` are
preferable.
Parameters
----------
rule : callable
Local quadrature rule to use. It should have a signature of the form
``rule(fun, a, b, **kwargs)`` -> out, where out is a tuple with 4 elements:
#. Estimate of the integral of fun from a to b
#. Estimate of the absolute error in the integral (ie, from nested scheme).
#. Estimate of the integral of abs(fun) from a to b
#. Estimate of the integral of abs(fun - <fun>) from a to b, where <fun> is
the mean value of fun over the interval.
fun : callable
Function to integrate, should have a signature of the form
``fun(x, *args)`` -> float, Array. Should be JAX transformable.
interval : array-like
Lower and upper limits of integration with possible breakpoints. Use np.inf to
denote infinite intervals.
args : tuple, optional
Extra arguments passed to fun.
full_output : bool, optional
If True, return the full state of the integrator. See below for more
information.
epsabs, epsrel : float, optional
Absolute and relative error tolerance. Default is 1.4e-8. Algorithm tries to
obtain an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
where ``i`` = integral of `fun` over `interval`, and ``result`` is the
numerical approximation.
max_ninter : int, optional
An upper bound on the number of sub-intervals used in the adaptive
algorithm.
norm : int, callable
Norm to use for measuring error for vector valued integrands. No effect if the
integrand is scalar valued. If an int, uses p-norm of the given order, otherwise
should be callable.
kwargs : dict
Additional keyword arguments passed to ``rule``.
Returns
-------
y : float, Array
The integral of fun from `a` to `b`.
info : QuadratureInfo
Named tuple with the following fields:
* err : (float) Estimate of the error in the approximation.
* neval : (int) Total number of rule evaluations.
* status : (int) Flag indicating reason for termination. status of 0 means
normal termination, any other value indicates a possible error. A human
readable message can be obtained by ``print(quadax.STATUS[status])``
* info : (dict or None) Other information returned by the algorithm.
Only present if ``full_output`` is True. Contains the following:
* 'ninter' : (int) The number, K, of sub-intervals produced in the
subdivision process.
* 'a_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the left end points of the (remapped) sub-intervals
in the partition of the integration range.
* 'b_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the right end points of the (remapped) sub-intervals.
* 'r_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the integral approximations on the sub-intervals.
* 'e_arr' : (ndarray) rank-1 array of length max_ninter, the first K
elements of which are the moduli of the absolute error estimates on the
sub-intervals.
"""
errorif(
max_ninter < len(interval) - 1,
ValueError,
f"max_ninter={max_ninter} is not enough for {len(interval)-1} breakpoints",
)
_norm = norm if callable(norm) else lambda x: jnp.linalg.norm(x.flatten(), ord=norm)
fun, interval = map_interval(fun, interval) | vfunc = wrap_func(fun, args) | 7 | 2023-10-24 04:44:34+00:00 | 8k |
yixinliu233/SIGNET | main.py | [
{
"identifier": "GIN",
"path": "models.py",
"snippet": "class GIN(torch.nn.Module):\n def __init__(self, num_features, dim, num_gc_layers, pooling, readout):\n super(GIN, self).__init__()\n\n self.num_gc_layers = num_gc_layers\n self.pooling = pooling\n self.readout = read... | import torch
import numpy as np
import torch.nn as nn
import random
import warnings
from sklearn.metrics import roc_auc_score
from models import GIN, Explainer_GIN, HyperGNN, Explainer_MLP
from arguments import arg_parse
from get_data_loaders import get_data_loaders
from get_data_loaders_tuad import get_ad_split_TU, get_data_loaders_TU | 4,291 |
warnings.filterwarnings("ignore")
explainable_datasets = ['mutag', 'mnist0', 'mnist1', 'bm_mn', 'bm_ms', 'bm_mt']
class SIGNET(nn.Module):
def __init__(self, input_dim, input_dim_edge, args, device):
super(SIGNET, self).__init__()
self.device = device
self.embedding_dim = args.hidden_dim
if args.readout == 'concat':
self.embedding_dim *= args.encoder_layers
if args.explainer_model == 'mlp':
self.explainer = Explainer_MLP(input_dim, args.explainer_hidden_dim, args.explainer_layers)
else:
self.explainer = Explainer_GIN(input_dim, args.explainer_hidden_dim,
args.explainer_layers, args.explainer_readout)
self.encoder = GIN(input_dim, args.hidden_dim, args.encoder_layers, args.pooling, args.readout)
self.encoder_hyper = HyperGNN(input_dim, input_dim_edge, args.hidden_dim, args.encoder_layers, args.pooling, args.readout)
self.proj_head = nn.Sequential(nn.Linear(self.embedding_dim, self.embedding_dim), nn.ReLU(inplace=True),
nn.Linear(self.embedding_dim, self.embedding_dim))
self.proj_head_hyper = nn.Sequential(nn.Linear(self.embedding_dim, self.embedding_dim), nn.ReLU(inplace=True),
nn.Linear(self.embedding_dim, self.embedding_dim))
self.init_emb()
def init_emb(self):
for m in self.modules():
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, data):
node_imp = self.explainer(data.x, data.edge_index, data.batch)
edge_imp = self.lift_node_score_to_edge_score(node_imp, data.edge_index)
y, _ = self.encoder(data.x, data.edge_index, data.batch, node_imp)
y_hyper, _ = self.encoder_hyper(data.x, data.edge_index, data.edge_attr, data.batch, edge_imp)
y = self.proj_head(y)
y_hyper = self.proj_head_hyper(y_hyper)
return y, y_hyper, node_imp, edge_imp
@staticmethod
def loss_nce(x1, x2, temperature=0.2):
batch_size, _ = x1.size()
x1_abs = x1.norm(dim=1)
x2_abs = x2.norm(dim=1)
sim_matrix = torch.einsum('ik,jk->ij', x1, x2) / torch.einsum('i,j->ij', x1_abs, x2_abs)
sim_matrix = torch.exp(sim_matrix / temperature)
pos_sim = sim_matrix[range(batch_size), range(batch_size)]
loss_0 = pos_sim / (sim_matrix.sum(dim=0) - pos_sim + 1e-10)
loss_1 = pos_sim / (sim_matrix.sum(dim=1) - pos_sim + 1e-10)
loss_0 = - torch.log(loss_0 + 1e-10)
loss_1 = - torch.log(loss_1 + 1e-10)
loss = (loss_0 + loss_1) / 2.0
return loss
def lift_node_score_to_edge_score(self, node_score, edge_index):
src_lifted_att = node_score[edge_index[0]]
dst_lifted_att = node_score[edge_index[1]]
edge_score = src_lifted_att * dst_lifted_att
return edge_score
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def run(args, seed, split=None):
set_seed(seed)
is_xgad = args.dataset in explainable_datasets
if is_xgad:
loaders, meta = get_data_loaders(args.dataset, args.batch_size, args.batch_size_test, random_state=seed)
else:
|
warnings.filterwarnings("ignore")
explainable_datasets = ['mutag', 'mnist0', 'mnist1', 'bm_mn', 'bm_ms', 'bm_mt']
class SIGNET(nn.Module):
def __init__(self, input_dim, input_dim_edge, args, device):
super(SIGNET, self).__init__()
self.device = device
self.embedding_dim = args.hidden_dim
if args.readout == 'concat':
self.embedding_dim *= args.encoder_layers
if args.explainer_model == 'mlp':
self.explainer = Explainer_MLP(input_dim, args.explainer_hidden_dim, args.explainer_layers)
else:
self.explainer = Explainer_GIN(input_dim, args.explainer_hidden_dim,
args.explainer_layers, args.explainer_readout)
self.encoder = GIN(input_dim, args.hidden_dim, args.encoder_layers, args.pooling, args.readout)
self.encoder_hyper = HyperGNN(input_dim, input_dim_edge, args.hidden_dim, args.encoder_layers, args.pooling, args.readout)
self.proj_head = nn.Sequential(nn.Linear(self.embedding_dim, self.embedding_dim), nn.ReLU(inplace=True),
nn.Linear(self.embedding_dim, self.embedding_dim))
self.proj_head_hyper = nn.Sequential(nn.Linear(self.embedding_dim, self.embedding_dim), nn.ReLU(inplace=True),
nn.Linear(self.embedding_dim, self.embedding_dim))
self.init_emb()
def init_emb(self):
for m in self.modules():
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, data):
node_imp = self.explainer(data.x, data.edge_index, data.batch)
edge_imp = self.lift_node_score_to_edge_score(node_imp, data.edge_index)
y, _ = self.encoder(data.x, data.edge_index, data.batch, node_imp)
y_hyper, _ = self.encoder_hyper(data.x, data.edge_index, data.edge_attr, data.batch, edge_imp)
y = self.proj_head(y)
y_hyper = self.proj_head_hyper(y_hyper)
return y, y_hyper, node_imp, edge_imp
@staticmethod
def loss_nce(x1, x2, temperature=0.2):
batch_size, _ = x1.size()
x1_abs = x1.norm(dim=1)
x2_abs = x2.norm(dim=1)
sim_matrix = torch.einsum('ik,jk->ij', x1, x2) / torch.einsum('i,j->ij', x1_abs, x2_abs)
sim_matrix = torch.exp(sim_matrix / temperature)
pos_sim = sim_matrix[range(batch_size), range(batch_size)]
loss_0 = pos_sim / (sim_matrix.sum(dim=0) - pos_sim + 1e-10)
loss_1 = pos_sim / (sim_matrix.sum(dim=1) - pos_sim + 1e-10)
loss_0 = - torch.log(loss_0 + 1e-10)
loss_1 = - torch.log(loss_1 + 1e-10)
loss = (loss_0 + loss_1) / 2.0
return loss
def lift_node_score_to_edge_score(self, node_score, edge_index):
src_lifted_att = node_score[edge_index[0]]
dst_lifted_att = node_score[edge_index[1]]
edge_score = src_lifted_att * dst_lifted_att
return edge_score
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def run(args, seed, split=None):
set_seed(seed)
is_xgad = args.dataset in explainable_datasets
if is_xgad:
loaders, meta = get_data_loaders(args.dataset, args.batch_size, args.batch_size_test, random_state=seed)
else: | loaders, meta = get_data_loaders_TU(args, split) | 7 | 2023-10-18 04:23:35+00:00 | 8k |
claws-lab/XLingEval | verifiability/verifiability_get_answer.py | [
{
"identifier": "args",
"path": "arguments.py",
"snippet": "REDDIT_COMMENTS_DIR = \"E:\\\\data\\\\Reddit\\\\comments\"\nDATA_DIR = \"F:\\\\data\\\\NLP\"\nDEVICE_MAP = {\"\": 0}\n DATA_DIR = osp.join(const.HOME_DIR_LINUX_SERVER, \"Workspace\", \"data\", \"NLP\")\n DEVICE_MAP = {\"\": [0, 1, 2, 3]}\... | import os
import os.path as osp
import traceback
import numpy as np
import pandas as pd
import const
import const_verifiability
from tqdm import trange
from arguments import args
from dataloader.load_data import load_HealthQA, load_LiveQA, load_MedicationQA
from verifiability.Medalpaca.model_medalpaca import init_medalpaca_model
from verifiability.prompts import prompt_verifiability
from verifiability.setup import project_setup, openai_setup
from utils.utils_chatgpt import get_response
from utils.utils_misc import get_model_prefix, capitalize_and_strip_punctuation
from verifiability.Medalpaca.params_medalpaca import *
from utils.utils_misc import map_prediction_to_binary
from utils.utils_chatgpt import get_response | 4,156 | examples = load_HealthQA(args.split, target_language)
else:
path = osp.join(args.output_dir, "verifiability",
f"{get_model_prefix(args)}{dataset_name}_verifiability_temp{temperature}.xlsx")
if dataset_name in ['liveqa']:
examples = load_LiveQA(target_language, task="verifiability")
elif dataset_name in ['medicationqa']:
examples = load_MedicationQA(target_language, task="verifiability")
else:
raise NotImplementedError
def save():
if osp.exists(path):
with pd.ExcelWriter(path, mode='a', engine='openpyxl') as writer:
results_df.to_excel(writer, sheet_name=target_language, index=False)
else:
results_df.to_excel(path, sheet_name=target_language, index=False)
if osp.exists(path):
results_df = pd.read_excel(path)
print(f"Loaded {len(results_df)} examples from {path}")
else:
results_df = pd.DataFrame()
results_df[const.PRED] = np.NaN
results_df[const.ERROR] = np.NaN
idx_start = 0
def format_question(question, answer):
return f"Question: {question}\nResponse: {answer}"
if args.model.startswith("medalpaca"):
questions = examples[const.QUESTION if
args.target_language == "English" else const.QUESTION_TRANSLATED].tolist()
answers = examples[const.ANSWER if
args.target_language == "English" else const.ANSWER_TRANSLATED].tolist()
input_questions = [format_question(question, answer) for question,
answer in
zip(questions, answers)]
sampling['temperature'] = args.temperature
results_df[const.QUESTION] = [None] * len(input_questions)
results_df[const.ANSWER] = [None] * len(input_questions)
for idx_row in trange(idx_start, len(input_questions), args.batch_size):
results_df.loc[idx_row:idx_row + args.batch_size - 1, const.QUESTION] \
= questions[idx_row:idx_row + args.batch_size]
results_df.loc[idx_row:idx_row + args.batch_size - 1, const.ANSWER] = \
answers[idx_row:idx_row + args.batch_size]
try:
batch = input_questions[idx_row:idx_row + args.batch_size]
responses = model.batch_inference(
instruction=f"Answer me 'Yes' or 'No'.",
inputs=batch,
output="The answer to the question is:",
verbose=True,
**sampling
)
except Exception as e:
traceback.print_exc()
continue
results_df.loc[idx_row:idx_row + args.batch_size - 1, const.PRED] = responses
if (idx_row % 20 == 0 or idx_row == len(examples) - 1):
print(f"Saving results to {path}...", end=" ")
# results_df.reset_index(drop=True).drop("Unnamed: 0", axis=1, errors="ignore").to_excel(path, index=False)
save()
print("Done")
save()
else:
# Each row has a question and a sample answer
for idx_row in range(idx_start, len(examples)):
row = examples.loc[idx_row]
# Copy the contents from the original data
results_df.loc[idx_row, const.QUESTION] = row[const.QUESTION]
results_df.loc[idx_row, const.ANSWER] = row[const.ANSWER]
results_df.loc[idx_row, const.ID] = row.name
results_df.loc[idx_row, const.LABEL] = row[const.LABEL]
if args.fill_null_values:
row_pred = results_df.iloc[idx_row]
if row_pred[const.PRED] in ["Yes", "No"]:
continue
prompt = prompt_verifiability(
row[const.QUESTION if target_language == "English" else const.QUESTION_TRANSLATED],
row[const.ANSWER if target_language == "English" else
const.ANSWER_TRANSLATED],
target_language)
print(f"{idx_row}\t{prompt}")
try:
|
project_setup()
openai_setup(args)
RETURN_EXPLANATION = False
results = {}
def run_verifiability(temperature: float, dataset_name: str, target_language: str):
os.makedirs(osp.join(args.output_dir, "verifiability"), exist_ok=True)
if dataset_name in ['healthqa']:
path = osp.join(args.output_dir, "verifiability",
f"{get_model_prefix(args)}{dataset_name}_verifiability_temp{temperature}_{args.split}"
f"_{target_language}.xlsx")
examples = load_HealthQA(args.split, target_language)
else:
path = osp.join(args.output_dir, "verifiability",
f"{get_model_prefix(args)}{dataset_name}_verifiability_temp{temperature}.xlsx")
if dataset_name in ['liveqa']:
examples = load_LiveQA(target_language, task="verifiability")
elif dataset_name in ['medicationqa']:
examples = load_MedicationQA(target_language, task="verifiability")
else:
raise NotImplementedError
def save():
if osp.exists(path):
with pd.ExcelWriter(path, mode='a', engine='openpyxl') as writer:
results_df.to_excel(writer, sheet_name=target_language, index=False)
else:
results_df.to_excel(path, sheet_name=target_language, index=False)
if osp.exists(path):
results_df = pd.read_excel(path)
print(f"Loaded {len(results_df)} examples from {path}")
else:
results_df = pd.DataFrame()
results_df[const.PRED] = np.NaN
results_df[const.ERROR] = np.NaN
idx_start = 0
def format_question(question, answer):
return f"Question: {question}\nResponse: {answer}"
if args.model.startswith("medalpaca"):
questions = examples[const.QUESTION if
args.target_language == "English" else const.QUESTION_TRANSLATED].tolist()
answers = examples[const.ANSWER if
args.target_language == "English" else const.ANSWER_TRANSLATED].tolist()
input_questions = [format_question(question, answer) for question,
answer in
zip(questions, answers)]
sampling['temperature'] = args.temperature
results_df[const.QUESTION] = [None] * len(input_questions)
results_df[const.ANSWER] = [None] * len(input_questions)
for idx_row in trange(idx_start, len(input_questions), args.batch_size):
results_df.loc[idx_row:idx_row + args.batch_size - 1, const.QUESTION] \
= questions[idx_row:idx_row + args.batch_size]
results_df.loc[idx_row:idx_row + args.batch_size - 1, const.ANSWER] = \
answers[idx_row:idx_row + args.batch_size]
try:
batch = input_questions[idx_row:idx_row + args.batch_size]
responses = model.batch_inference(
instruction=f"Answer me 'Yes' or 'No'.",
inputs=batch,
output="The answer to the question is:",
verbose=True,
**sampling
)
except Exception as e:
traceback.print_exc()
continue
results_df.loc[idx_row:idx_row + args.batch_size - 1, const.PRED] = responses
if (idx_row % 20 == 0 or idx_row == len(examples) - 1):
print(f"Saving results to {path}...", end=" ")
# results_df.reset_index(drop=True).drop("Unnamed: 0", axis=1, errors="ignore").to_excel(path, index=False)
save()
print("Done")
save()
else:
# Each row has a question and a sample answer
for idx_row in range(idx_start, len(examples)):
row = examples.loc[idx_row]
# Copy the contents from the original data
results_df.loc[idx_row, const.QUESTION] = row[const.QUESTION]
results_df.loc[idx_row, const.ANSWER] = row[const.ANSWER]
results_df.loc[idx_row, const.ID] = row.name
results_df.loc[idx_row, const.LABEL] = row[const.LABEL]
if args.fill_null_values:
row_pred = results_df.iloc[idx_row]
if row_pred[const.PRED] in ["Yes", "No"]:
continue
prompt = prompt_verifiability(
row[const.QUESTION if target_language == "English" else const.QUESTION_TRANSLATED],
row[const.ANSWER if target_language == "English" else
const.ANSWER_TRANSLATED],
target_language)
print(f"{idx_row}\t{prompt}")
try: | response = get_response(prompt, temperature=temperature, | 8 | 2023-10-18 17:35:42+00:00 | 8k |
vtuber-plan/olah | olah/server.py | [
{
"identifier": "OlahConfig",
"path": "olah/configs.py",
"snippet": "class OlahConfig(object):\n def __init__(self, path: Optional[str] = None) -> None:\n\n # basic\n self.host = \"localhost\"\n self.port = 8090\n self.ssl_key = None\n self.ssl_cert = None\n ... | import datetime
import json
import os
import argparse
import tempfile
import shutil
import httpx
import uvicorn
from typing import Annotated, Union
from fastapi import FastAPI, Header, Request
from fastapi.responses import HTMLResponse, StreamingResponse, Response
from pydantic import BaseSettings
from olah.configs import OlahConfig
from olah.files import file_get_generator, file_head_generator
from olah.lfs import lfs_get_generator
from olah.meta import meta_generator
from olah.utls import check_proxy_rules_hf, check_commit_hf, get_commit_hf, get_newest_commit_hf | 4,208 |
app = FastAPI(debug=False)
class AppSettings(BaseSettings):
# The address of the model controller.
config: OlahConfig = OlahConfig()
repos_path: str = "./repos"
hf_url: str = "https://huggingface.co"
hf_lfs_url: str = "https://cdn-lfs.huggingface.co"
mirror_url: str = "http://localhost:8090"
mirror_lfs_url: str = "http://localhost:8090"
@app.get("/api/{repo_type}s/{org}/{repo}")
async def meta_proxy(repo_type: str, org: str, repo: str, request: Request):
|
app = FastAPI(debug=False)
class AppSettings(BaseSettings):
# The address of the model controller.
config: OlahConfig = OlahConfig()
repos_path: str = "./repos"
hf_url: str = "https://huggingface.co"
hf_lfs_url: str = "https://cdn-lfs.huggingface.co"
mirror_url: str = "http://localhost:8090"
mirror_lfs_url: str = "http://localhost:8090"
@app.get("/api/{repo_type}s/{org}/{repo}")
async def meta_proxy(repo_type: str, org: str, repo: str, request: Request): | if not await check_proxy_rules_hf(app, repo_type, org, repo): | 5 | 2023-10-23 15:01:52+00:00 | 8k |
zju3dv/nr_in_a_room | tools/make_axis_align_real_data.py | [
{
"identifier": "O3dVisualizer",
"path": "tools/O3dVisualizer.py",
"snippet": "class O3dVisualizer:\n def __init__(self):\n self.geometries = []\n\n def add_o3d_geometry(self, geometry):\n self.geometries.append(geometry)\n\n def add_line_set(self, points, lines, colors=None, radi... | import os
import sys
import pyglet
import argparse
import numpy as np
import torch
import json
import imageio
import cv2
import shutil
import glob
import open3d as o3d
import trimesh
import matplotlib.pyplot as plt
from tqdm import tqdm
from tools.O3dVisualizer import O3dVisualizer
from tools.apply_light_map_2d import compute_normal_from_depth
from utils.util import read_json, write_json
from scipy.spatial.transform import Rotation as R
from pyrender import (
PerspectiveCamera,
Mesh,
Node,
Scene,
Viewer,
OffscreenRenderer,
RenderFlags,
)
from scipy.spatial.transform import Rotation as R | 4,792 | rotation = R.from_euler("xyz", [-args.x_rot, 0, 0], degrees=True).as_matrix()
rotation = (
rotation @ R.from_euler("xyz", [0, -args.y_rot, 0], degrees=True).as_matrix()
)
mesh.rotate(rotation, center=(0, 0, 0))
# translate to make bbox center at origin
translate = -mesh.get_axis_aligned_bounding_box().get_center()
mesh.translate(translate)
# compute mesh bbox
bbox = mesh.get_axis_aligned_bounding_box()
bound = np.array([bbox.min_bound, bbox.max_bound])
size = bound[1] - bound[0]
# transform mat for frames
transform_mat = np.eye(4)
transform_mat[:3, :3] = rotation
transform_mat[:3, 3] = translate
visualizer.add_o3d_geometry(mesh)
# visualizer.run_visualize()
output_dir = args.output_dir
os.makedirs(output_dir, exist_ok=True)
# write mesh and bbox info
o3d.io.write_triangle_mesh(os.path.join(output_dir, "aligned.obj"), mesh)
write_json(
{
"max_bound": bbox.max_bound.tolist(),
"min_bound": bbox.min_bound.tolist(),
"size": size.tolist(),
},
os.path.join(output_dir, "bbox.json"),
)
# initialize mask render
obj_trimesh = trimesh.load(os.path.join(output_dir, "aligned.obj"))
obj_mesh = Mesh.from_trimesh(obj_trimesh)
scene = Scene(ambient_light=np.array([0.5, 0.5, 0.5, 1.0]))
obj_node = Node(mesh=obj_mesh, translation=np.zeros(3))
scene.add_node(obj_node)
# pre frame processing
frame_info = {"frames": []}
tracking_quality_th = 1.1
if args.instance_id_for_mask == 34: # desk use larger drop ratio
tracking_quality_th = tracking_quality_filter(all_image_files, drop_ratio=50)
else:
tracking_quality_th = tracking_quality_filter(all_image_files, drop_ratio=20)
print("tracking quality threshold", tracking_quality_th)
os.makedirs(os.path.join(output_dir, "full"), exist_ok=True)
for idx in tqdm(range(len(all_image_files))):
absolute_img_name = all_image_files[idx]
img_name = os.path.basename(absolute_img_name)
arkit_frame_info = read_json(
os.path.join(arkit_raw_dir, img_name[:-3] + "json")
)
if idx == 0:
h, w, _ = imageio.imread(absolute_img_name).shape
# write camera angle
intrinsics = np.array(arkit_frame_info["intrinsics"])
focal, cx, cy = intrinsics[0], intrinsics[2], intrinsics[5]
xfov = np.arctan(w / 2 / focal) * 2
print("xfov =", xfov)
frame_info["camera_angle_x"] = xfov
render = OffscreenRenderer(viewport_width=w, viewport_height=h)
yfov = np.arctan(h / 2 / focal) * 2
cam = PerspectiveCamera(yfov=yfov)
cam_node = scene.add(cam)
if arkit_frame_info["motionQuality"] < tracking_quality_th:
continue
if img_name not in colmap_refined_frames:
continue
# pose_ndc = np.array(arkit_frame_info["cameraPoseARFrame"]).reshape(4, 4)
# read pose from colmap refined, and convert to ndc coordinate
pose_ndc = np.array(colmap_refined_frames[img_name]["W2C"]).reshape(4, 4)
pose_ndc = np.linalg.inv(pose_ndc)
fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3)
pose_ndc[:3, :3] = pose_ndc[:3, :3] @ fix_rot
# transform to arkit pose
s, R, t = decompose_to_sRT(transform_colmap_to_arkit)
# pose_ndc = transform_colmap_to_arkit @ pose_ndc
# print(s, R, t)
pose_ndc[:3, 3] = R @ (pose_ndc[:3, 3] * s) + t
pose_ndc[:3, :3] = R @ pose_ndc[:3, :3]
# apply alignment to poses
pose_ndc = transform_mat @ pose_ndc
# render depth
scene.set_pose(cam_node, pose_ndc)
mesh_proj_color, rendered_depth = render.render(scene)
# use sensor depth
# sensor_depth = cv2.imread(
# os.path.join(arkit_raw_dir, f"depth_{img_name[6:11]}.png"), -1
# )
# sensor_depth = cv2.resize(
# sensor_depth, dsize=(w, h), interpolation=cv2.INTER_NEAREST
# )
# sensor_depth = sensor_depth.astype(np.float32) * 1e-3
# cv2.imwrite(
# os.path.join(output_dir, "full", f"{img_name[:-4]}.depth.png"),
# (sensor_depth * 1000).astype(np.uint16),
# )
cv2.imwrite(
os.path.join(output_dir, "full", f"{img_name[:-4]}.depth.png"),
(rendered_depth * 1000).astype(np.uint16),
)
# compute normal
|
sys.path.append(".") # noqa
pyglet.options["shadow_window"] = False
def decompose_to_sRT(Trans):
t = Trans[:3, 3]
R = Trans[:3, :3]
# assume x y z have the same scale
scale = np.linalg.norm(R[:3, 0])
R = R / scale
return scale, R, t
def align_colmap_pose_to_arkit_coord(
colmap_refined_frames,
arkit_all_image_files,
use_ransac_filter=True,
# use_ransac_filter=False,
):
colmap_centers = []
arkit_centers = []
overlap_image_names = []
for absolute_image_name in arkit_all_image_files:
img_name = os.path.basename(absolute_image_name)
if img_name not in colmap_refined_frames:
continue
overlap_image_names += [img_name]
arkit_frame_info = read_json(absolute_image_name[:-3] + "json")
pose_ndc = np.array(arkit_frame_info["cameraPoseARFrame"]).reshape(4, 4)
arkit_centers += [pose_ndc[:3, 3]]
pose_colmap = np.array(colmap_refined_frames[img_name]["W2C"]).reshape(
4, 4
) # Tcw
pose_colmap = np.linalg.inv(pose_colmap)
colmap_centers.append(pose_colmap[:3, 3])
colmap_centers = np.stack(colmap_centers, axis=0)
arkit_centers = np.stack(arkit_centers, axis=0)
source = o3d.geometry.PointCloud()
source.points = o3d.utility.Vector3dVector(colmap_centers)
target = o3d.geometry.PointCloud()
target.points = o3d.utility.Vector3dVector(arkit_centers)
if use_ransac_filter:
corr = np.arange(colmap_centers.shape[0])
corr = np.stack([corr, corr], axis=1)
# using ransac to filter bad poses
result = o3d.pipelines.registration.registration_ransac_based_on_correspondence(
source,
target,
o3d.utility.Vector2iVector(corr),
0.2,
o3d.pipelines.registration.TransformationEstimationPointToPoint(True),
)
transformation = result.transformation
# filter by resulting correspondence
remaining_corr = np.asarray(result.correspondence_set)
for i, name in enumerate(overlap_image_names):
if i not in remaining_corr:
print("Remove bad frame", name)
del colmap_refined_frames[name]
else:
p2p = o3d.pipelines.registration.TransformationEstimationPointToPoint()
p2p.with_scaling = True
corr = np.arange(colmap_centers.shape[0])
corr = np.stack([corr, corr], axis=1)
transformation = p2p.compute_transformation(
source, target, o3d.utility.Vector2iVector(corr)
)
return transformation, colmap_refined_frames
def read_sense_frame_txt(pose_path):
pose_dict = {}
with open(pose_path) as file:
lines = file.readlines()
lines = lines[4:]
for line in lines:
fname, tx, ty, tz, qx, qy, qz, qw = line.strip().split(" ")
fname += ".jpg"
pose = np.eye(4)
pose[0, 3] = tx
pose[1, 3] = ty
pose[2, 3] = tz
pose[:3, :3] = R.from_quat([qx, qy, qz, qw]).as_matrix()
pose = np.linalg.inv(pose)
pose_dict[fname] = {"W2C": pose}
# print(fname, pose)
return pose_dict
def tracking_quality_filter(arkit_all_image_files, drop_ratio=50.0):
"""
drop frames with bad quality
"""
qualities = []
for absolute_image_name in arkit_all_image_files:
arkit_frame_info = read_json(absolute_image_name[:-3] + "json")
qualities += [arkit_frame_info["motionQuality"]]
qualities = np.array(qualities)
quality_th = np.percentile(qualities, drop_ratio)
return quality_th
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--arkit_raw_dir",
default="/home/ybbbbt/Developer/neural_scene/data/arkit_recon/arkit_box_2",
)
parser.add_argument(
"--obj_in_colmap_coord",
default="/home/ybbbbt/Developer/neural_scene/data/object_capture_recon/box/obj_in_colmap_coord.obj",
)
parser.add_argument("--colmap_refine_dir")
"""
Tune with MeshLab: Filters -> Mesh Layers -> Matrix: set from translation/rotaton/scale
"""
parser.add_argument("--x_rot", default=-90, type=float) # X rotation in meshlab
parser.add_argument("--y_rot", default=0, type=float) # Y rotation in meshlab
parser.add_argument(
"--output_dir", default="debug/processed_real_data", type=str
) # Y rotation in meshlab
parser.add_argument(
"--instance_id_for_mask", default=1, type=int
) # X rotation in meshlab
args = parser.parse_args()
mode = "object_capture_aligned_to_colmap"
# mode = "sense"
visualizer = O3dVisualizer()
mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
size=0.5, origin=[0, 0, 0]
)
visualizer.add_o3d_geometry(mesh_frame)
# read frame info
arkit_raw_dir = args.arkit_raw_dir
all_image_files = sorted(glob.glob(arkit_raw_dir + "/frame_*.jpg"))
if mode == "sense":
colmap_refined_frames = read_sense_frame_txt(
os.path.join(args.colmap_refine_dir, "pose.txt")
)
else:
colmap_refined_frames = read_json(
# os.path.join(args.colmap_refine_dir, "nerfpp_fmt", "nerfpp_cameras.json")
# os.path.join(args.colmap_refine_dir, "posed_images", "nerfpp_cameras.json")
os.path.join(
args.colmap_refine_dir, "output/posed_images", "nerfpp_cameras.json"
)
)
# align colmap to arkit pose
transform_colmap_to_arkit = np.eye(4)
transform_colmap_to_arkit, colmap_refined_frames = align_colmap_pose_to_arkit_coord(
colmap_refined_frames, all_image_files
)
s, R, t = decompose_to_sRT(transform_colmap_to_arkit)
print(s, R, t)
# read and process mesh
mesh = o3d.io.read_triangle_mesh(args.obj_in_colmap_coord)
# if mode == "sense":
mesk = mesh.transform(transform_colmap_to_arkit)
# rotate mesh
# make axis align
rotation = R.from_euler("xyz", [-args.x_rot, 0, 0], degrees=True).as_matrix()
rotation = (
rotation @ R.from_euler("xyz", [0, -args.y_rot, 0], degrees=True).as_matrix()
)
mesh.rotate(rotation, center=(0, 0, 0))
# translate to make bbox center at origin
translate = -mesh.get_axis_aligned_bounding_box().get_center()
mesh.translate(translate)
# compute mesh bbox
bbox = mesh.get_axis_aligned_bounding_box()
bound = np.array([bbox.min_bound, bbox.max_bound])
size = bound[1] - bound[0]
# transform mat for frames
transform_mat = np.eye(4)
transform_mat[:3, :3] = rotation
transform_mat[:3, 3] = translate
visualizer.add_o3d_geometry(mesh)
# visualizer.run_visualize()
output_dir = args.output_dir
os.makedirs(output_dir, exist_ok=True)
# write mesh and bbox info
o3d.io.write_triangle_mesh(os.path.join(output_dir, "aligned.obj"), mesh)
write_json(
{
"max_bound": bbox.max_bound.tolist(),
"min_bound": bbox.min_bound.tolist(),
"size": size.tolist(),
},
os.path.join(output_dir, "bbox.json"),
)
# initialize mask render
obj_trimesh = trimesh.load(os.path.join(output_dir, "aligned.obj"))
obj_mesh = Mesh.from_trimesh(obj_trimesh)
scene = Scene(ambient_light=np.array([0.5, 0.5, 0.5, 1.0]))
obj_node = Node(mesh=obj_mesh, translation=np.zeros(3))
scene.add_node(obj_node)
# pre frame processing
frame_info = {"frames": []}
tracking_quality_th = 1.1
if args.instance_id_for_mask == 34: # desk use larger drop ratio
tracking_quality_th = tracking_quality_filter(all_image_files, drop_ratio=50)
else:
tracking_quality_th = tracking_quality_filter(all_image_files, drop_ratio=20)
print("tracking quality threshold", tracking_quality_th)
os.makedirs(os.path.join(output_dir, "full"), exist_ok=True)
for idx in tqdm(range(len(all_image_files))):
absolute_img_name = all_image_files[idx]
img_name = os.path.basename(absolute_img_name)
arkit_frame_info = read_json(
os.path.join(arkit_raw_dir, img_name[:-3] + "json")
)
if idx == 0:
h, w, _ = imageio.imread(absolute_img_name).shape
# write camera angle
intrinsics = np.array(arkit_frame_info["intrinsics"])
focal, cx, cy = intrinsics[0], intrinsics[2], intrinsics[5]
xfov = np.arctan(w / 2 / focal) * 2
print("xfov =", xfov)
frame_info["camera_angle_x"] = xfov
render = OffscreenRenderer(viewport_width=w, viewport_height=h)
yfov = np.arctan(h / 2 / focal) * 2
cam = PerspectiveCamera(yfov=yfov)
cam_node = scene.add(cam)
if arkit_frame_info["motionQuality"] < tracking_quality_th:
continue
if img_name not in colmap_refined_frames:
continue
# pose_ndc = np.array(arkit_frame_info["cameraPoseARFrame"]).reshape(4, 4)
# read pose from colmap refined, and convert to ndc coordinate
pose_ndc = np.array(colmap_refined_frames[img_name]["W2C"]).reshape(4, 4)
pose_ndc = np.linalg.inv(pose_ndc)
fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3)
pose_ndc[:3, :3] = pose_ndc[:3, :3] @ fix_rot
# transform to arkit pose
s, R, t = decompose_to_sRT(transform_colmap_to_arkit)
# pose_ndc = transform_colmap_to_arkit @ pose_ndc
# print(s, R, t)
pose_ndc[:3, 3] = R @ (pose_ndc[:3, 3] * s) + t
pose_ndc[:3, :3] = R @ pose_ndc[:3, :3]
# apply alignment to poses
pose_ndc = transform_mat @ pose_ndc
# render depth
scene.set_pose(cam_node, pose_ndc)
mesh_proj_color, rendered_depth = render.render(scene)
# use sensor depth
# sensor_depth = cv2.imread(
# os.path.join(arkit_raw_dir, f"depth_{img_name[6:11]}.png"), -1
# )
# sensor_depth = cv2.resize(
# sensor_depth, dsize=(w, h), interpolation=cv2.INTER_NEAREST
# )
# sensor_depth = sensor_depth.astype(np.float32) * 1e-3
# cv2.imwrite(
# os.path.join(output_dir, "full", f"{img_name[:-4]}.depth.png"),
# (sensor_depth * 1000).astype(np.uint16),
# )
cv2.imwrite(
os.path.join(output_dir, "full", f"{img_name[:-4]}.depth.png"),
(rendered_depth * 1000).astype(np.uint16),
)
# compute normal | normal_map = compute_normal_from_depth( | 1 | 2023-10-15 08:41:29+00:00 | 8k |
ShramanPramanick/VoLTA | Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/rpn/vldyhead.py | [
{
"identifier": "make_atss_postprocessor",
"path": "Multimodal_Fine_Grained/maskrcnn_benchmark/modeling/rpn/inference.py",
"snippet": "def make_atss_postprocessor(config, box_coder, is_train=False):\n pre_nms_thresh = config.MODEL.ATSS.INFERENCE_TH\n if is_train:\n pre_nms_thresh = config.M... | import torch
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
import pdb
from torch import nn
from collections import defaultdict
from .inference import make_atss_postprocessor
from .loss import make_atss_loss_evaluator
from .anchor_generator import make_anchor_generator_complex
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
from maskrcnn_benchmark.layers import Scale, DYReLU, SELayer, ModulatedDeformConv
from maskrcnn_benchmark.layers import NaiveSyncBatchNorm2d, FrozenBatchNorm2d
from maskrcnn_benchmark.modeling.backbone.fbnet import *
from maskrcnn_benchmark.engine.inference import create_positive_map_label_to_token_from_positive_map
from ..utils import cat, concat_box_prediction_layers, permute_and_flatten
from maskrcnn_benchmark.utils.fuse_helper import (
FeatureResizer,
func_attention,
_make_mlp,
_make_conv,
_make_coord,
BiAttentionBlock,
AttentionT2I,
BiAttentionBlockForCheckpoint,
BertLMPredictionHead,
)
from transformers.models.bert.modeling_bert import (
BertConfig,
BertAttention,
BertIntermediate,
BertOutput,
BertPreTrainedModel,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.modeling_utils import apply_chunking_to_forward
from maskrcnn_benchmark.modeling.language_backbone.clip_model import QuickGELU, LayerNorm, DropPath
from timm.models.layers import DropPath, trunc_normal_
from maskrcnn_benchmark.modeling.rpn.modeling_bert import BertAttention, BertIntermediate, BertOutput | 5,049 | self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_SHALLOW_CONTRASTIVE_LOSS
or self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_BACKBONE_SHALLOW_CONTRASTIVE_LOSS
):
shallow_img_emb_feats = []
shallow_text_emb = embedding
# print([v.shape for v in x])
# shallow contrastive: use the feature from swint backbone
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_BACKBONE_SHALLOW_CONTRASTIVE_LOSS:
for b, feature in enumerate(swint_feature_c4):
# BF, CF, HF, WF = feat.shape
# shallow_img_emb = permute_and_flatten(feat, BF, -1, CF, HF, WF)
shallow_img_emb_feats.append(feature)
fused_visual_features = None
if self.cfg.MODEL.RPN.RETURN_FUSED_FEATURES:
fused_visual_features = []
# use the feature from FPN
for l, feature in enumerate(x):
logits.append(self.cls_logits(dyhead_tower["visual"][l]))
bbox_pred = self.scales[l](self.bbox_pred(dyhead_tower["visual"][l]))
bbox_reg.append(bbox_pred)
centerness.append(self.centerness(dyhead_tower["visual"][l]))
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_TOKEN_LOSS:
t_logits.append(self.token_logits(dyhead_tower["visual"][l]))
# ABLATION
# b = self.bias.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
# x = dyhead_tower["visual"][l]
# B, C, H, W = x.shape
# bias = b.repeat(B, 1, H, W)
# t_logits.append(self.token_logits(dyhead_tower["visual"][l] + bias) + self.bias0)
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_CONTRASTIVE_ALIGN_LOSS:
x = dyhead_tower["visual"][l]
B, _, H, W = x.shape
C = proj_tokens.shape[2]
proj_queries = self.contrastive_align_projection_image(dyhead_tower["visual"][l])
proj_queries = permute_and_flatten(proj_queries, B, -1, C, H, W)
normalized_img_emb = F.normalize(proj_queries, p=2, dim=-1)
normalized_text_emb = proj_tokens
contrastive_logit = (
torch.matmul(normalized_img_emb, normalized_text_emb.transpose(-1, -2)) / self.log_scale.exp()
)
contrastive_logits.append(contrastive_logit)
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_DOT_PRODUCT_TOKEN_LOSS:
x = dyhead_tower["visual"][l]
if self.cfg.MODEL.RPN.RETURN_FUSED_FEATURES:
fused_visual_features.append(x)
B, C, H, W = x.shape
# add bias (language)
dot_product_proj_queries = self.dot_product_projection_image(x)
dot_product_proj_queries = permute_and_flatten(dot_product_proj_queries, B, -1, C, H, W)
A = dot_product_proj_queries.shape[1]
bias = dot_product_proj_tokens_bias.unsqueeze(1).repeat(1, A, 1)
# add bias (vision)
# b = self.bias.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
# tensor.repeat() is supposed to cost more memory, bias = b.repeat(B, 1, H, W)
# here we replace it with tensor.expand()
# bias = b.repeat(B, 1, H, W)
# dot_product_proj_queries = self.dot_product_projection_image(x) + bias
# print(torch.norm(dot_product_proj_tokens))
# exit()
dot_product_logit = (
torch.matmul(dot_product_proj_queries, dot_product_proj_tokens.transpose(-1, -2))
/ self.log_scale.exp()
) + bias
# dot_product_logit = (torch.matmul(dot_product_proj_queries,
# dot_product_proj_tokens.transpose(-1,
# -2)) / self.log_scale.exp()) + self.bias0
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.CLAMP_DOT_PRODUCT:
dot_product_logit = torch.clamp(dot_product_logit, max=50000)
dot_product_logit = torch.clamp(dot_product_logit, min=-50000)
dot_product_logits.append(dot_product_logit)
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_SHALLOW_CONTRASTIVE_LOSS:
feat = feature
BF, CF, HF, WF = feat.shape
shallow_img_emb = permute_and_flatten(feat, BF, -1, CF, HF, WF)
shallow_img_emb_feats.append(shallow_img_emb)
# no matter the feature is from backboone or from fpn, we use shallow_img_embs all the time
if shallow_img_emb_feats is not None and shallow_text_emb is not None:
# shallow_img_embs = torch.cat(shallow_img_embs, dim=1)
proj_tokens = shallow_text_emb
return (
logits,
bbox_reg,
centerness,
t_logits,
proj_tokens,
contrastive_logits,
dot_product_logits,
mlm_logits,
shallow_img_emb_feats,
fused_visual_features,
)
class VLDyHeadModule(torch.nn.Module):
def __init__(self, cfg):
super(VLDyHeadModule, self).__init__()
self.cfg = cfg
self.head = VLDyHead(cfg)
box_coder = BoxCoder(cfg)
self.loss_evaluator = make_atss_loss_evaluator(cfg, box_coder)
self.box_selector_train = make_atss_postprocessor(cfg, box_coder, is_train=True)
self.box_selector_test = make_atss_postprocessor(cfg, box_coder, is_train=False)
|
class h_sigmoid(nn.Module):
def __init__(self, inplace=True, h_max=1):
super(h_sigmoid, self).__init__()
self.relu = nn.ReLU6(inplace=inplace)
self.h_max = h_max
def forward(self, x):
return self.relu(x + 3) * self.h_max / 6
class BoxCoder(object):
def __init__(self, cfg):
self.cfg = cfg
def encode(self, gt_boxes, anchors):
TO_REMOVE = 1 # TODO remove
ex_widths = anchors[:, 2] - anchors[:, 0] + TO_REMOVE
ex_heights = anchors[:, 3] - anchors[:, 1] + TO_REMOVE
ex_ctr_x = (anchors[:, 2] + anchors[:, 0]) / 2
ex_ctr_y = (anchors[:, 3] + anchors[:, 1]) / 2
gt_widths = gt_boxes[:, 2] - gt_boxes[:, 0] + TO_REMOVE
gt_heights = gt_boxes[:, 3] - gt_boxes[:, 1] + TO_REMOVE
gt_ctr_x = (gt_boxes[:, 2] + gt_boxes[:, 0]) / 2
gt_ctr_y = (gt_boxes[:, 3] + gt_boxes[:, 1]) / 2
wx, wy, ww, wh = (10.0, 10.0, 5.0, 5.0)
if gt_ctr_x.nelement() == 0:
targets_dx = torch.zeros_like(ex_ctr_x)
targets_dy = torch.zeros_like(ex_ctr_y)
targets_dw = torch.zeros_like(ex_widths)
targets_dh = torch.zeros_like(ex_heights)
else:
targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = ww * torch.log(gt_widths / ex_widths)
targets_dh = wh * torch.log(gt_heights / ex_heights)
targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh), dim=1)
return targets
def decode(self, preds, anchors):
anchors = anchors.to(preds.dtype)
TO_REMOVE = 1 # TODO remove
widths = anchors[:, 2] - anchors[:, 0] + TO_REMOVE
heights = anchors[:, 3] - anchors[:, 1] + TO_REMOVE
ctr_x = (anchors[:, 2] + anchors[:, 0]) / 2
ctr_y = (anchors[:, 3] + anchors[:, 1]) / 2
wx, wy, ww, wh = (10.0, 10.0, 5.0, 5.0)
dx = preds[:, 0::4] / wx
dy = preds[:, 1::4] / wy
dw = preds[:, 2::4] / ww
dh = preds[:, 3::4] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=math.log(1000.0 / 16))
dh = torch.clamp(dh, max=math.log(1000.0 / 16))
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
pred_boxes = torch.zeros_like(preds)
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * (pred_w - 1)
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * (pred_h - 1)
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * (pred_w - 1)
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * (pred_h - 1)
return pred_boxes
class Conv3x3Norm(torch.nn.Module):
def __init__(self, in_channels, out_channels, stride, groups=1, deformable=False, bn_type=None):
super(Conv3x3Norm, self).__init__()
if deformable:
self.conv = ModulatedDeformConv(
in_channels, out_channels, kernel_size=3, stride=stride, padding=1, groups=groups
)
else:
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, groups=groups)
if isinstance(bn_type, (list, tuple)):
assert len(bn_type) == 2
assert bn_type[0] == "gn"
gn_group = bn_type[1]
bn_type = bn_type[0]
if bn_type == "bn":
bn_op = nn.BatchNorm2d(out_channels)
elif bn_type == "sbn":
bn_op = nn.SyncBatchNorm(out_channels)
elif bn_type == "nsbn":
bn_op = NaiveSyncBatchNorm2d(out_channels)
elif bn_type == "gn":
bn_op = nn.GroupNorm(num_groups=gn_group, num_channels=out_channels)
elif bn_type == "af":
bn_op = FrozenBatchNorm2d(out_channels)
if bn_type is not None:
self.bn = bn_op
else:
self.bn = None
def forward(self, input, **kwargs):
x = self.conv(input, **kwargs)
if self.bn:
x = self.bn(x)
return x
class DyConv(torch.nn.Module):
def __init__(
self,
in_channels=256,
out_channels=256,
conv_func=nn.Conv2d,
use_dyfuse=True,
use_dyrelu=False,
use_deform=False,
):
super(DyConv, self).__init__()
self.DyConv = nn.ModuleList()
self.DyConv.append(conv_func(in_channels, out_channels, 1))
self.DyConv.append(conv_func(in_channels, out_channels, 1))
self.DyConv.append(conv_func(in_channels, out_channels, 2))
if use_dyfuse:
self.AttnConv = nn.Sequential(
nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels, 1, kernel_size=1), nn.ReLU(inplace=True)
)
self.h_sigmoid = h_sigmoid()
else:
self.AttnConv = None
if use_dyrelu:
self.relu = DYReLU(in_channels, out_channels)
else:
self.relu = nn.ReLU()
if use_deform:
self.offset = nn.Conv2d(in_channels, 27, kernel_size=3, stride=1, padding=1)
else:
self.offset = None
self.init_weights()
def init_weights(self):
for m in self.DyConv.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight.data, 0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
if self.AttnConv is not None:
for m in self.AttnConv.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight.data, 0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, inputs):
visual_feats = inputs["visual"]
language_dict_features = inputs["lang"]
next_x = []
for level, feature in enumerate(visual_feats):
conv_args = dict()
if self.offset is not None:
offset_mask = self.offset(feature)
offset = offset_mask[:, :18, :, :]
mask = offset_mask[:, 18:, :, :].sigmoid()
conv_args = dict(offset=offset, mask=mask)
temp_fea = [self.DyConv[1](feature, **conv_args)]
if level > 0:
temp_fea.append(self.DyConv[2](visual_feats[level - 1], **conv_args))
if level < len(visual_feats) - 1:
temp_fea.append(
F.upsample_bilinear(
self.DyConv[0](visual_feats[level + 1], **conv_args), size=[feature.size(2), feature.size(3)]
)
)
mean_fea = torch.mean(torch.stack(temp_fea), dim=0, keepdim=False)
if self.AttnConv is not None:
attn_fea = []
res_fea = []
for fea in temp_fea:
res_fea.append(fea)
attn_fea.append(self.AttnConv(fea))
res_fea = torch.stack(res_fea)
spa_pyr_attn = self.h_sigmoid(torch.stack(attn_fea))
mean_fea = torch.mean(res_fea * spa_pyr_attn, dim=0, keepdim=False)
next_x.append(mean_fea)
next_x = [self.relu(item) for item in next_x]
features_dict = {"visual": next_x, "lang": language_dict_features}
return features_dict
class BertEncoderLayer(BertPreTrainedModel):
def __init__(self, config, clamp_min_for_underflow=False, clamp_max_for_overflow=False):
super().__init__(config)
self.config = config
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config, clamp_min_for_underflow, clamp_max_for_overflow)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, inputs):
language_dict_features = inputs["lang"]
hidden_states = language_dict_features["hidden"]
attention_mask = language_dict_features["masks"]
device = hidden_states.device
input_shape = hidden_states.size()[:-1]
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
self_attention_outputs = self.attention(
hidden_states,
extended_attention_mask,
None,
output_attentions=False,
past_key_value=None,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
hidden_states = outputs[0]
language_dict_features["hidden"] = hidden_states
features_dict = {"visual": inputs["visual"], "lang": language_dict_features}
return features_dict
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class CLIPTransformerLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
d_model = self.config.MODEL.CLIP.WIDTH
n_head = self.config.MODEL.CLIP.HEADS
drop_path = self.config.MODEL.CLIP.DROP_PATH
self.context_length = self.config.MODEL.CLIP.CONTEXT_LENGTH
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(
OrderedDict(
[
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model)),
]
)
)
self.ln_2 = LayerNorm(d_model)
self.attn_mask = None
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
nn.init.constant_(m.bias, 0)
def attention(self, x: torch.Tensor, key_padding_mask: torch.Tensor = None):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask, key_padding_mask=key_padding_mask)[0]
def forward(self, inputs):
language_dict_features = inputs["lang"]
x = language_dict_features["hidden"]
mask = language_dict_features["masks"]
# get extended attention mask for nn.MultiHeadAttention
key_padding_mask = (1.0 - mask).to(torch.bool)
x = x.permute(1, 0, 2)
x = x + self.drop_path(self.attention(self.ln_1(x), key_padding_mask=key_padding_mask))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
x = x.permute(1, 0, 2)
language_dict_features["hidden"] = x
features_dict = {"visual": inputs["visual"], "lang": language_dict_features}
return features_dict
class DummyLayer(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs):
return inputs
class VLFuse(torch.nn.Module):
"""
Early Fusion Module
"""
def __init__(self, cfg):
super(VLFuse, self).__init__()
self.init_configs(cfg)
self.cfg = cfg
self.use_checkpoint = False
if hasattr(cfg.MODEL.DYHEAD, "USE_CHECKPOINT"):
self.use_checkpoint = cfg.MODEL.DYHEAD.USE_CHECKPOINT
self.dummy_tensor = torch.ones(1, dtype=torch.float32, requires_grad=True)
# early fusion module
print("EARLY FUSION ON, USING {}".format(cfg.MODEL.DYHEAD.FUSE_CONFIG.TYPE))
if cfg.MODEL.DYHEAD.FUSE_CONFIG.TYPE == "MHA-S":
# single-direction (text->image)
# text -> image
self.t2i_attn = AttentionT2I(
q_dim=self.joint_embedding_size,
k_dim=self.lang_dim,
embed_dim=self.embed_dim,
num_heads=self.n_head,
hidden_dim=self.t2i_hidden_dim,
dropout=0.1,
drop_path=0.0,
init_values=1.0 / cfg.MODEL.DYHEAD.NUM_CONVS,
mode="t2i",
use_layer_scale=cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_LAYER_SCALE,
clamp_min_for_underflow=cfg.MODEL.DYHEAD.FUSE_CONFIG.CLAMP_MIN_FOR_UNDERFLOW,
clamp_max_for_overflow=cfg.MODEL.DYHEAD.FUSE_CONFIG.CLAMP_MAX_FOR_OVERFLOW,
)
elif cfg.MODEL.DYHEAD.FUSE_CONFIG.TYPE == "MHA-B":
# bi-direction (text->image, image->text)
self.b_attn = BiAttentionBlockForCheckpoint(
v_dim=self.joint_embedding_size,
l_dim=self.lang_dim,
embed_dim=self.embed_dim,
num_heads=self.n_head,
hidden_dim=self.i2t_hidden_dim,
dropout=0.1,
drop_path=0.0,
init_values=1.0 / cfg.MODEL.DYHEAD.NUM_CONVS,
cfg=cfg,
)
if (
self.cfg.MODEL.DYHEAD.FUSE_CONFIG.SEPARATE_BIDIRECTIONAL
and self.cfg.MODEL.DYHEAD.FUSE_CONFIG.DO_LANG_PROJ_OUTSIDE_CHECKPOINT
):
self.shrink_lang = FeatureResizer(self.lang_dim * 5, self.lang_dim, 0.1)
elif cfg.MODEL.DYHEAD.FUSE_CONFIG.TYPE == "SCAN":
# single-direction (text->image)
self.mapping_lang = _make_mlp(self.lang_dim, self.joint_embedding_size, self.joint_embedding_dropout)
self.joint_fusion = nn.ModuleList([_make_conv(self.joint_inp_dim, self.joint_out_dim, 1) for _ in range(5)])
elif cfg.MODEL.DYHEAD.FUSE_CONFIG.TYPE == "FILM":
# single-direction (text->image)
self.mapping_lang = _make_mlp(self.lang_dim, self.joint_embedding_size, self.joint_embedding_dropout)
self.gamma = nn.ModuleList(nn.Linear(self.joint_embedding_size, self.joint_inp_dim) for _ in range(5))
self.beta = nn.ModuleList(nn.Linear(self.joint_embedding_size, self.joint_inp_dim) for _ in range(5))
self.joint_fusion = nn.ModuleList([_make_conv(self.joint_inp_dim, self.joint_out_dim, 1) for _ in range(5)])
else:
print("NO FUSION INVOLVED.")
def init_configs(self, cfg):
# common params
self.lang_model = cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE
self.joint_embedding_size = cfg.MODEL.DYHEAD.FUSE_CONFIG.JOINT_EMB_SIZE
self.joint_embedding_dropout = cfg.MODEL.DYHEAD.FUSE_CONFIG.JOINT_EMB_DROPOUT
self.joint_mlp_layers = cfg.MODEL.DYHEAD.FUSE_CONFIG.JOINT_MLP_LAYERS
self.max_query_len = cfg.MODEL.LANGUAGE_BACKBONE.MAX_QUERY_LEN
self.n_layers = cfg.MODEL.LANGUAGE_BACKBONE.N_LAYERS
self.coord_dim = 8
self.joint_inp_dim = self.coord_dim + self.joint_embedding_size
self.joint_out_dim = cfg.MODEL.DYHEAD.FUSE_CONFIG.JOINT_OUT_SIZE
# mha params
self.n_head = 8
self.embed_dim = 2048
self.t2i_hidden_dim = 1024 # 256 * 4
self.i2t_hidden_dim = 3072 # 768 * 4
if self.lang_model in ["bert-base-uncased", "roberta-base", "clip", "roberta-fused", "roberta-fused-v2"]:
self.lang_dim = cfg.MODEL.LANGUAGE_BACKBONE.LANG_DIM
else:
self.lang_dim = 1024
def forward(self, x):
visual_features = x["visual"]
language_dict_features = x["lang"]
batch_size = visual_features[0].shape[0]
device = visual_features[0].device
fused_visual_features = None
fused_language_dict_features = None
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.TYPE == "MHA-S":
language_feature = language_dict_features["hidden"]
mask = language_dict_features["masks"]
# text -> image
if self.use_checkpoint:
q0, q1, q2, q3, q4 = checkpoint.checkpoint(
self.t2i_attn,
visual_features[0],
visual_features[1],
visual_features[2],
visual_features[3],
visual_features[4],
language_feature,
language_feature,
mask,
self.dummy_tensor,
)
else:
q0, q1, q2, q3, q4 = self.t2i_attn(
visual_features[0],
visual_features[1],
visual_features[2],
visual_features[3],
visual_features[4],
language_feature,
language_feature,
attention_mask=mask,
)
fused_visual_features = [q0, q1, q2, q3, q4]
fused_language_dict_features = language_dict_features
elif self.cfg.MODEL.DYHEAD.FUSE_CONFIG.TYPE == "MHA-B":
if self.use_checkpoint:
q0, q1, q2, q3, q4, l0, l1, l2, l3, l4 = checkpoint.checkpoint(
self.b_attn,
visual_features[0],
visual_features[1],
visual_features[2],
visual_features[3],
visual_features[4],
language_dict_features["hidden"],
language_dict_features["masks"],
self.dummy_tensor,
)
else:
q0, q1, q2, q3, q4, l0, l1, l2, l3, l4 = self.b_attn(
visual_features[0],
visual_features[1],
visual_features[2],
visual_features[3],
visual_features[4],
language_dict_features["hidden"],
language_dict_features["masks"],
self.dummy_tensor,
)
fused_visual_features = [q0, q1, q2, q3, q4]
if (
self.cfg.MODEL.DYHEAD.FUSE_CONFIG.SEPARATE_BIDIRECTIONAL
and self.cfg.MODEL.DYHEAD.FUSE_CONFIG.DO_LANG_PROJ_OUTSIDE_CHECKPOINT
):
language_features = self.shrink_lang(torch.cat([l0, l1, l2, l3, l4], dim=-1))
else:
language_features = l0
language_dict_features["hidden"] = language_features
fused_language_dict_features = language_dict_features
elif self.cfg.MODEL.DYHEAD.FUSE_CONFIG.TYPE == "SCAN":
# text -> image
language_feature = language_dict_features["aggregate"]
language_feature = self.mapping_lang(language_feature)
visu_feat = []
for ii, feat in enumerate(visual_features):
attn_feat = func_attention(feat, language_feature, smooth=1, raw_feature_norm="softmax")
visu_feat.append(attn_feat)
fused_visual_features = [fusion(feat) for feat, fusion in zip(visu_feat, self.joint_fusion)]
fused_language_dict_features = language_dict_features
elif self.cfg.MODEL.DYHEAD.FUSE_CONFIG.TYPE == "FILM":
# text -> image
# relative position embedding
coord_feats = [_make_coord(batch_size, x.shape[2], x.shape[3]) for x in visual_features]
# I only use a global representation of language
# you can also use more complex modeling using word-level representations
# Usage: lang_feat = lang_feat['words'] shape [seq_len, dim]
language_feature = language_dict_features["aggregate"]
language_feature = self.mapping_lang(language_feature)
# attention mechanism for fusion
gamma = [F.tanh(gamma(language_feature)) for gamma in self.gamma]
beta = [F.tanh(beta(language_feature)) for beta in self.beta]
visu_feat = []
for ii, feat in enumerate(visual_features):
coord_feat = coord_feats[ii].to(device)
feat = torch.cat([feat, coord_feat], dim=1)
b = beta[ii].view(batch_size, -1, 1, 1).expand_as(feat)
g = gamma[ii].view(batch_size, -1, 1, 1).expand_as(feat)
feat = F.relu(g * feat + b)
visu_feat.append(feat)
fused_visual_features = [fusion(feat) for feat, fusion in zip(visu_feat, self.joint_fusion)]
fused_language_dict_features = language_dict_features
else:
fused_visual_features = visual_features
fused_language_dict_features = language_dict_features
features_dict = {"visual": fused_visual_features, "lang": fused_language_dict_features}
return features_dict
class VLDyHead(torch.nn.Module):
def __init__(self, cfg):
super(VLDyHead, self).__init__()
self.cfg = cfg
# bert_cfg = BertConfig.from_pretrained(cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE)
if cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE in ["bert-base-uncased", "roberta-base"]:
lang_cfg = BertConfig.from_pretrained(cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE)
elif cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE == "clip":
lang_cfg = cfg
elif cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE in ["roberta-fused", "roberta-fused-v2"]:
lang_cfg = RobertaConfig.from_pretrained("roberta-base")
else:
lang_cfg = None
raise NotImplementedError
num_classes = cfg.MODEL.DYHEAD.NUM_CLASSES - 1
num_tokens = cfg.MODEL.LANGUAGE_BACKBONE.MAX_QUERY_LEN
num_anchors = len(cfg.MODEL.RPN.ASPECT_RATIOS) * cfg.MODEL.RPN.SCALES_PER_OCTAVE
in_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS
channels = cfg.MODEL.DYHEAD.CHANNELS
if cfg.MODEL.DYHEAD.USE_GN:
bn_type = ["gn", cfg.MODEL.GROUP_NORM.NUM_GROUPS]
elif cfg.MODEL.DYHEAD.USE_NSYNCBN:
bn_type = "nsbn"
elif cfg.MODEL.DYHEAD.USE_SYNCBN:
bn_type = "sbn"
else:
bn_type = None
use_dyrelu = cfg.MODEL.DYHEAD.USE_DYRELU
use_dyfuse = cfg.MODEL.DYHEAD.USE_DYFUSE
use_deform = cfg.MODEL.DYHEAD.USE_DFCONV
if cfg.MODEL.DYHEAD.CONV_FUNC:
conv_func = lambda i, o, s: eval(cfg.MODEL.DYHEAD.CONV_FUNC)(i, o, s, bn_type=bn_type)
else:
conv_func = lambda i, o, s: Conv3x3Norm(i, o, s, deformable=use_deform, bn_type=bn_type)
dyhead_tower = []
for i in range(cfg.MODEL.DYHEAD.NUM_CONVS):
if cfg.MODEL.DYHEAD.FUSE_CONFIG.EARLY_FUSE_ON:
# cross-modality fusion
dyhead_tower.append(VLFuse(cfg))
# self language path
if i < cfg.MODEL.DYHEAD.NUM_CONVS - 1 or cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_FUSED_FEATURES_DOT_PRODUCT:
# dyhead_tower.append(
# BertEncoderLayer(
# bert_cfg,
# clamp_min_for_underflow=cfg.MODEL.DYHEAD.FUSE_CONFIG.CLAMP_BERTATTN_MIN_FOR_UNDERFLOW,
# clamp_max_for_overflow=cfg.MODEL.DYHEAD.FUSE_CONFIG.CLAMP_BERTATTN_MAX_FOR_OVERFLOW)
# )
if cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE in [
"bert-base-uncased",
"roberta-fused",
"roberta-fused-v2",
"roberta-base",
]:
dyhead_tower.append(
BertEncoderLayer(
lang_cfg,
clamp_min_for_underflow=cfg.MODEL.DYHEAD.FUSE_CONFIG.CLAMP_BERTATTN_MIN_FOR_UNDERFLOW,
clamp_max_for_overflow=cfg.MODEL.DYHEAD.FUSE_CONFIG.CLAMP_BERTATTN_MAX_FOR_OVERFLOW,
)
)
elif cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE == "clip":
dyhead_tower.append(CLIPTransformerLayer(lang_cfg))
else:
raise NotImplementedError
else:
dyhead_tower.append(DummyLayer())
# self vision path
dyhead_tower.append(
DyConv(
in_channels if i == 0 else channels,
channels,
conv_func=conv_func,
use_dyrelu=(use_dyrelu and in_channels == channels) if i == 0 else use_dyrelu,
use_dyfuse=(use_dyfuse and in_channels == channels) if i == 0 else use_dyfuse,
use_deform=(use_deform and in_channels == channels) if i == 0 else use_deform,
)
)
self.add_module("dyhead_tower", nn.Sequential(*dyhead_tower))
self.cls_logits = nn.Conv2d(channels, num_anchors * num_classes, kernel_size=1)
self.bbox_pred = nn.Conv2d(channels, num_anchors * 4, kernel_size=1)
self.centerness = nn.Conv2d(channels, num_anchors * 1, kernel_size=1)
# initialize the bias for focal loss
prior_prob = cfg.MODEL.DYHEAD.PRIOR_PROB
bias_value = -math.log((1 - prior_prob) / prior_prob)
log_scale = self.cfg.MODEL.DYHEAD.LOG_SCALE
# soft token head
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_TOKEN_LOSS:
self.token_logits = nn.Conv2d(channels, num_anchors * num_tokens, kernel_size=1)
# ABLATION
# self.token_logits = nn.Conv2d(channels, num_anchors * num_tokens, kernel_size=1, bias=False)
# self.bias = nn.Parameter(torch.zeros(channels), requires_grad=True)
# self.bias0 = nn.Parameter(torch.Tensor([bias_value]), requires_grad=True)
# contrastive alignment head
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_CONTRASTIVE_ALIGN_LOSS:
assert self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_DOT_PRODUCT_TOKEN_LOSS == False
contrastive_hdim = cfg.MODEL.DYHEAD.FUSE_CONFIG.CONTRASTIVE_HIDDEN_DIM
self.contrastive_align_projection_image = nn.Conv2d(channels, num_anchors * contrastive_hdim, kernel_size=1)
self.contrastive_align_projection_text = nn.Linear(channels, contrastive_hdim, bias=True)
self.log_scale = nn.Parameter(torch.Tensor([log_scale]), requires_grad=True)
# dot product soft token head
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_DOT_PRODUCT_TOKEN_LOSS:
assert self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_CONTRASTIVE_ALIGN_LOSS == False
self.dot_product_projection_image = nn.Identity()
self.dot_product_projection_text = nn.Linear(
self.cfg.MODEL.LANGUAGE_BACKBONE.LANG_DIM, num_anchors * channels, bias=True
)
self.log_scale = nn.Parameter(torch.Tensor([log_scale]), requires_grad=True)
# DEBUG
# self.bias = nn.Parameter(torch.zeros(channels), requires_grad=True)
self.bias_lang = nn.Parameter(torch.zeros(self.cfg.MODEL.LANGUAGE_BACKBONE.LANG_DIM), requires_grad=True)
self.bias0 = nn.Parameter(torch.Tensor([bias_value]), requires_grad=True)
# initialization
for modules in [self.cls_logits, self.bbox_pred, self.centerness]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
self.scales = nn.ModuleList([Scale(init_value=1.0) for _ in range(5)])
torch.nn.init.constant_(self.cls_logits.bias, bias_value)
# if use soft token loss
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_TOKEN_LOSS:
for modules in [self.token_logits]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
torch.nn.init.constant_(self.token_logits.bias, bias_value)
# print(torch.norm(self.token_logits.weight))
# if use contrastive loss
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_CONTRASTIVE_ALIGN_LOSS:
for modules in [self.contrastive_align_projection_image]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
# if use dot product token loss
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_DOT_PRODUCT_TOKEN_LOSS:
for modules in [self.dot_product_projection_image]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, bias_value)
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.MLM_LOSS:
if cfg.MODEL.LANGUAGE_BACKBONE.MODEL_TYPE == "clip":
lang_cfg = BertConfig.from_pretrained("bert-base-uncased")
lang_cfg.hidden_size = cfg.MODEL.CLIP.WIDTH
lang_cfg.vocab_size = cfg.MODEL.CLIP.VOCAB_SIZE
self.mlm_head = BertLMPredictionHead(lang_cfg) # nn.Linear(hidden_size, config.vocab_size, bias=False)
def forward(self, x, language_dict_features=None, embedding=None, swint_feature_c4=None):
logits = []
bbox_reg = []
centerness = []
feat_inputs = {"visual": x, "lang": language_dict_features}
dyhead_tower = self.dyhead_tower(feat_inputs)
# soft token
t_logits = None
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_TOKEN_LOSS:
t_logits = []
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_FUSED_FEATURES_DOT_PRODUCT:
embedding = dyhead_tower["lang"]["hidden"]
# MLM loss
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.MLM_LOSS:
mlm_logits = self.mlm_head(embedding)
else:
mlm_logits = None
# contrastive
contrastive_logits = None
proj_tokens = None
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_CONTRASTIVE_ALIGN_LOSS:
contrastive_logits = []
# follow MDETR's way
proj_tokens = F.normalize(self.contrastive_align_projection_text(embedding), p=2, dim=-1)
# dot product soft token
dot_product_logits = None
dot_product_proj_tokens = None
dot_product_proj_tokens_bias = None
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_DOT_PRODUCT_TOKEN_LOSS:
dot_product_logits = []
# norm
embedding = F.normalize(embedding, p=2, dim=-1)
dot_product_proj_tokens = self.dot_product_projection_text(embedding / 2.0)
# w/o norm
# dot_product_proj_tokens = self.dot_product_projection_text(embedding / 28.0)
dot_product_proj_tokens_bias = torch.matmul(embedding, self.bias_lang) + self.bias0
# shallow contrastive (original feature from image & text encoder)
shallow_img_emb_feats = None
shallow_text_emb = None
if (
self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_SHALLOW_CONTRASTIVE_LOSS
or self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_BACKBONE_SHALLOW_CONTRASTIVE_LOSS
):
shallow_img_emb_feats = []
shallow_text_emb = embedding
# print([v.shape for v in x])
# shallow contrastive: use the feature from swint backbone
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_BACKBONE_SHALLOW_CONTRASTIVE_LOSS:
for b, feature in enumerate(swint_feature_c4):
# BF, CF, HF, WF = feat.shape
# shallow_img_emb = permute_and_flatten(feat, BF, -1, CF, HF, WF)
shallow_img_emb_feats.append(feature)
fused_visual_features = None
if self.cfg.MODEL.RPN.RETURN_FUSED_FEATURES:
fused_visual_features = []
# use the feature from FPN
for l, feature in enumerate(x):
logits.append(self.cls_logits(dyhead_tower["visual"][l]))
bbox_pred = self.scales[l](self.bbox_pred(dyhead_tower["visual"][l]))
bbox_reg.append(bbox_pred)
centerness.append(self.centerness(dyhead_tower["visual"][l]))
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_TOKEN_LOSS:
t_logits.append(self.token_logits(dyhead_tower["visual"][l]))
# ABLATION
# b = self.bias.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
# x = dyhead_tower["visual"][l]
# B, C, H, W = x.shape
# bias = b.repeat(B, 1, H, W)
# t_logits.append(self.token_logits(dyhead_tower["visual"][l] + bias) + self.bias0)
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_CONTRASTIVE_ALIGN_LOSS:
x = dyhead_tower["visual"][l]
B, _, H, W = x.shape
C = proj_tokens.shape[2]
proj_queries = self.contrastive_align_projection_image(dyhead_tower["visual"][l])
proj_queries = permute_and_flatten(proj_queries, B, -1, C, H, W)
normalized_img_emb = F.normalize(proj_queries, p=2, dim=-1)
normalized_text_emb = proj_tokens
contrastive_logit = (
torch.matmul(normalized_img_emb, normalized_text_emb.transpose(-1, -2)) / self.log_scale.exp()
)
contrastive_logits.append(contrastive_logit)
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_DOT_PRODUCT_TOKEN_LOSS:
x = dyhead_tower["visual"][l]
if self.cfg.MODEL.RPN.RETURN_FUSED_FEATURES:
fused_visual_features.append(x)
B, C, H, W = x.shape
# add bias (language)
dot_product_proj_queries = self.dot_product_projection_image(x)
dot_product_proj_queries = permute_and_flatten(dot_product_proj_queries, B, -1, C, H, W)
A = dot_product_proj_queries.shape[1]
bias = dot_product_proj_tokens_bias.unsqueeze(1).repeat(1, A, 1)
# add bias (vision)
# b = self.bias.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
# tensor.repeat() is supposed to cost more memory, bias = b.repeat(B, 1, H, W)
# here we replace it with tensor.expand()
# bias = b.repeat(B, 1, H, W)
# dot_product_proj_queries = self.dot_product_projection_image(x) + bias
# print(torch.norm(dot_product_proj_tokens))
# exit()
dot_product_logit = (
torch.matmul(dot_product_proj_queries, dot_product_proj_tokens.transpose(-1, -2))
/ self.log_scale.exp()
) + bias
# dot_product_logit = (torch.matmul(dot_product_proj_queries,
# dot_product_proj_tokens.transpose(-1,
# -2)) / self.log_scale.exp()) + self.bias0
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.CLAMP_DOT_PRODUCT:
dot_product_logit = torch.clamp(dot_product_logit, max=50000)
dot_product_logit = torch.clamp(dot_product_logit, min=-50000)
dot_product_logits.append(dot_product_logit)
if self.cfg.MODEL.DYHEAD.FUSE_CONFIG.USE_SHALLOW_CONTRASTIVE_LOSS:
feat = feature
BF, CF, HF, WF = feat.shape
shallow_img_emb = permute_and_flatten(feat, BF, -1, CF, HF, WF)
shallow_img_emb_feats.append(shallow_img_emb)
# no matter the feature is from backboone or from fpn, we use shallow_img_embs all the time
if shallow_img_emb_feats is not None and shallow_text_emb is not None:
# shallow_img_embs = torch.cat(shallow_img_embs, dim=1)
proj_tokens = shallow_text_emb
return (
logits,
bbox_reg,
centerness,
t_logits,
proj_tokens,
contrastive_logits,
dot_product_logits,
mlm_logits,
shallow_img_emb_feats,
fused_visual_features,
)
class VLDyHeadModule(torch.nn.Module):
def __init__(self, cfg):
super(VLDyHeadModule, self).__init__()
self.cfg = cfg
self.head = VLDyHead(cfg)
box_coder = BoxCoder(cfg)
self.loss_evaluator = make_atss_loss_evaluator(cfg, box_coder)
self.box_selector_train = make_atss_postprocessor(cfg, box_coder, is_train=True)
self.box_selector_test = make_atss_postprocessor(cfg, box_coder, is_train=False) | self.anchor_generator = make_anchor_generator_complex(cfg) | 2 | 2023-10-23 04:07:08+00:00 | 8k |
WenzhengZhang/Seq2seqCoref | data.py | [
{
"identifier": "global_align",
"path": "alignment.py",
"snippet": "def global_align(input_ids, rec_ids):\n cost = np.zeros((len(input_ids) + 1, len(\n rec_ids) + 1)) # cost of alignment between tokens[:i]\n # and output_tokens[:j]\n best = np.zeros_like(cost,\n ... | import networkx as nx
import json
import os
import random
import re
import torch
import numpy as np
from torch.utils.data import Dataset, DataLoader
from transformers import DataCollatorForSeq2Seq
from collections import defaultdict
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, List, Optional, Tuple, Union
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.utils import PaddingStrategy
from alignment import global_align, affine_global_align
from utils import split_list | 4,754 | (item[0], new_id))
else:
# a normal token
# if output_ids[i] == special_ids['sep']:
# status = "ent"
if len(ment_start_stack) > 0:
# inside some entities
if output_ids[i] == special_ids['sep']:
ment_start_stack[-1][1] = "ent"
if is_tagging:
new_output_ids.append(output_ids[i])
else:
if ment_start_stack[-1][1] == 'ent':
ment_start_stack[-1][2].append(output_ids[i])
if is_tagging:
new_output_ids.append(output_ids[i])
elif ment_start_stack[-1][1] == 'name':
new_id += 1
rec_ids.append(output_ids[i])
if is_tagging:
new_output_ids.append(input_ids[new_id])
else:
raise ValueError('wrong status')
else:
# outside
new_id += 1
rec_ids.append(output_ids[i])
if is_tagging:
new_output_ids.append(input_ids[new_id])
if output_ids[i] == special_ids['mention_start']:
new_id -= 1
# thred = 1 if allow_singletons else 2
# Needleman-Wunsch text alignment algorithm
wrong_reconstruction = (rec_ids != new_input_ids)
if wrong_reconstruction:
print(f'new input ids {new_input_ids}')
print(f'reconstructed ids {rec_ids}')
print(f'out ids {output_ids}')
print('wrong reconstruction! please debug')
matching = global_align(new_input_ids, rec_ids)
# update predicted entities with the positions in the original sentence
clusters = defaultdict(list)
for ent_id, ments in unmatched_clusters.items():
for start, end in ments:
new_start = None # start in the original sequence
new_end = None # end in the original sequence
for j in range(start, end + 1):
if j in matching:
if new_start is None:
new_start = matching[j]
new_end = matching[j]
if new_start is not None:
# predict entity
clusters[ent_id].append((
subtoken_map[new_start], subtoken_map[new_end]))
token_mentions.append((new_start, new_end))
predict_clusters = [list(set(v)) for k, v in clusters.items() if
len(set(v)) >= thred]
token_mentions = list(set(token_mentions))
else:
clusters = [[(subtoken_map[m[0]], subtoken_map[m[1]]) for m in v] for v
in
unmatched_clusters.values()]
predict_clusters = [list(set(v)) for v in clusters if len(set(v)) >=
thred]
token_mentions = [(m[0], m[1]) for v in unmatched_clusters.values()
for m in v]
token_mentions = list(set(token_mentions))
if not is_tagging:
new_output_ids = output_ids
return predict_clusters, token_mentions, new_output_ids
def parse_short_target_tokens(input_ids, output_ids,
special_ids, subtoken_map, tokenizer,
align_mode, thred, split_sentence):
# support mark sentence, align sentence by sentence
rec_ids, new_id = [], -1
ment_start_stack = []
unmatched_clusters = defaultdict(list)
new_input_ids = [t for t in input_ids if t != tokenizer.pad_token_id]
for i in range(len(output_ids)):
if output_ids[i] == tokenizer.pad_token_id:
break
if output_ids[i] == special_ids['mention_start']:
ment_start_stack.append([new_id + 1, 'name', []])
elif output_ids[i] == special_ids['mention_end']:
if len(ment_start_stack) > 0:
item = ment_start_stack.pop()
if item[1] == "ent":
unmatched_clusters[tuple(item[-1])].append(
(item[0], new_id))
else:
# a normal token
if len(ment_start_stack) > 0:
# inside some entities
if output_ids[i] == special_ids['sep']:
ment_start_stack[-1][1] = "ent"
else:
if ment_start_stack[-1][1] == 'ent':
ment_start_stack[-1][2].append(output_ids[i])
elif ment_start_stack[-1][1] == 'name':
new_id += 1
rec_ids.append(output_ids[i])
else:
raise ValueError('wrong status')
else:
# outside
new_id += 1
rec_ids.append(output_ids[i])
# mapping.append(new_id)
# thred = 1 if allow_singletons else 2
# Affine global text alignment algorithm
if split_sentence:
|
class JointDataset(Dataset):
def __init__(self, tokenizer,
data_args, train_args, split):
self.tokenizer = tokenizer
self.data_args = data_args
self.train_args = train_args
self.split = split
self.all_samples, self.doc_labels, self.id_to_name = self.load_dataset()
self.samples = None if self.split == 'train' else [
s for data_samples in self.all_samples.values() for s in
data_samples
]
def __len__(self):
if self.split == 'train':
num_samples = 0
for s in self.all_samples.values():
num_samples += min(self.data_args.joint_num_samples, len(s))
else:
num_samples = len(self.samples)
return num_samples
def set_samples(self, epoch):
# subsample larger datasets and then concat them
sample_seed = self.train_args.seed + epoch
min_num_samples = min(len(s) for s in self.all_samples.values())
samples = []
for data_name, data_samples in self.all_samples.items():
if len(data_samples) > min_num_samples:
subsamples = random.Random(sample_seed).sample(
data_samples, self.data_args.joint_num_samples)
else:
subsamples = data_samples
samples += subsamples
self.samples = samples
def _load_single_data(self, data_dir,
data_name,
max_len,
thred):
samples = []
doc_labels = {}
id_to_name = {}
data_path = os.path.join(
data_dir,
f'{self.split}.t5-small.english.{max_len}.jsonlines')
with open(data_path, 'r') as f:
for line in f:
item = json.loads(line)
doc_key = item['doc_key']
doc_id = re.sub(r'_\d+$', '', doc_key)
id_to_name[doc_id] = data_name
if self.train_args.action_type == "integer":
target_sent = self.tokenizer.convert_tokens_to_ids(
item['target_sentence'])
elif self.train_args.action_type == "non_integer":
if self.train_args.add_mention_end:
target_sent = self.tokenizer.convert_tokens_to_ids(
item["target_non_int_mention_end_sentence"])
else:
target_sent = self.tokenizer.convert_tokens_to_ids(
item["target_non_int_sentence"])
else:
raise ValueError(f"wrong action type "
f"{self.train_args.action_type}")
if self.train_args.seq2seq_type == 'action' or \
self.train_args.seq2seq_type == 'input_feed':
if self.train_args.action_type == 'integer':
target_seq = self.tokenizer.convert_tokens_to_ids(
item['target_action'])
elif self.train_args.action_type == 'non_integer':
if self.train_args.add_mention_end:
target_seq = self.tokenizer.convert_tokens_to_ids(
item["target_non_int_mention_end_action"])
else:
target_seq = self.tokenizer.convert_tokens_to_ids(
item["target_non_int_action"])
else:
raise ValueError("wrong action type ("
"integer/non_integer)")
elif self.train_args.seq2seq_type == 'short_seq':
target_seq = self.tokenizer.convert_tokens_to_ids(
item['target_short_sentence'])
elif self.train_args.seq2seq_type == 'full_seq':
target_seq = deepcopy(target_sent)
elif self.train_args.seq2seq_type == 'tagging':
target_seq = self.tokenizer.convert_tokens_to_ids(
item['target_action'])
# set the last token as eos token
target_seq[-1] = self.tokenizer.eos_token_id
else:
raise ValueError('wrong seq2seq type')
sample = {'doc_key': doc_key,
'sentence': self.tokenizer.convert_tokens_to_ids(
item['sentence']),
'target_sentence': target_sent,
'target_seq': target_seq,
'subtoken_map': item['subtoken_map'],
'seg_clusters': [[tuple(m) for m in c] for c in item[
'seg_clusters'] if len(c) >= thred],
'offset': item['offset']
}
doc_labels[doc_id] = [[tuple(m) for m in c] for c in item[
'gold_clusters']]
samples.append(sample)
return samples, doc_labels, id_to_name
def load_dataset(self):
doc_labels = {}
id_to_name = {}
samples = {}
max_lens = self.data_args.joint_max_train_lens.split(
',') if self.split == 'train' else \
self.data_args.joint_max_eval_lens.split(',')
max_lens = [int(l) for l in max_lens]
threds = self.train_args.joint_min_num_mentions.split(',')
threds = [int(t) for t in threds]
data_dirs = self.data_args.joint_data_dirs.split(',')
data_names = self.train_args.joint_data_names.split(',')
for data_dir, data_name, max_len, thred in zip(
data_dirs, data_names, max_lens, threds):
single_samples, single_doc_labels, single_id_to_name = \
self._load_single_data(data_dir, data_name, max_len, thred)
samples[data_name] = single_samples
doc_labels.update(single_doc_labels)
id_to_name.update(single_id_to_name)
return samples, doc_labels, id_to_name
def __getitem__(self, index):
sample = self.samples[index]
input_ids = torch.tensor(sample['sentence'], dtype=torch.long)
if self.train_args.seq2seq_type == 'action' or \
self.train_args.seq2seq_type == 'input_feed':
label_ids = torch.tensor(sample['target_sentence'],
dtype=torch.long)
target_ids = torch.tensor(sample['target_seq'], dtype=torch.long)
input_len, tgt_len = input_ids.size(0), label_ids.size(0)
attention_mask = torch.tensor([1] * input_len, dtype=torch.long)
src_encoding = {'input_ids': input_ids,
'attention_mask': attention_mask,
'decoder_labels': label_ids,
'labels': target_ids
}
else:
label_ids = torch.tensor(sample['target_seq'],
dtype=torch.long)
input_len, tgt_len = input_ids.size(0), label_ids.size(0)
attention_mask = torch.tensor([1] * input_len, dtype=torch.long)
src_encoding = {'input_ids': input_ids,
'attention_mask': attention_mask,
'labels': label_ids,
}
return src_encoding
class CorefDataset(Dataset):
def __init__(self, tokenizer,
data_args, train_args, split):
self.tokenizer = tokenizer
self.data_args = data_args
self.train_args = train_args
self.split = split
# self.task_prefix = self.data_args.task_prefix
# convert tokens to ids for each sample
self.samples, self.doc_labels = self.load_dataset()
def __len__(self):
return len(self.samples)
def load_dataset(self):
max_len = self.data_args.max_train_len if self.split == 'train' else \
self.data_args.max_eval_len
data_path = os.path.join(
self.data_args.data_dir,
f'{self.split}.t5-small.english.{max_len}.jsonlines')
samples = []
doc_labels = {}
thred = self.train_args.min_num_mentions
with open(data_path, 'r') as f:
for line in f:
item = json.loads(line)
doc_key = item['doc_key']
doc_id = re.sub(r'_\d+$', '', doc_key)
if self.train_args.action_type == "integer":
target_sent = self.tokenizer.convert_tokens_to_ids(
item['target_sentence'])
elif self.train_args.action_type == "non_integer":
if self.train_args.add_mention_end:
target_sent = self.tokenizer.convert_tokens_to_ids(
item["target_non_int_mention_end_sentence"])
else:
target_sent = self.tokenizer.convert_tokens_to_ids(
item["target_non_int_sentence"])
else:
raise ValueError(f"wrong action type "
f"{self.train_args.action_type}")
if self.train_args.seq2seq_type == 'action' or \
self.train_args.seq2seq_type == 'input_feed':
if self.train_args.action_type == 'integer':
target_seq = self.tokenizer.convert_tokens_to_ids(
item['target_action'])
elif self.train_args.action_type == 'non_integer':
if self.train_args.add_mention_end:
target_seq = self.tokenizer.convert_tokens_to_ids(
item["target_non_int_mention_end_action"])
else:
target_seq = self.tokenizer.convert_tokens_to_ids(
item["target_non_int_action"])
else:
raise ValueError("wrong action type ("
"integer/non_integer)")
elif self.train_args.seq2seq_type == 'short_seq':
target_seq = self.tokenizer.convert_tokens_to_ids(
item['target_short_sentence'])
elif self.train_args.seq2seq_type == 'full_seq':
target_seq = deepcopy(target_sent)
elif self.train_args.seq2seq_type == 'tagging':
target_seq = self.tokenizer.convert_tokens_to_ids(
item['target_action'])
# set the last token as eos token
target_seq[-1] = self.tokenizer.eos_token_id
else:
raise ValueError('wrong seq2seq type')
sample = {'doc_key': doc_key,
'sentence': self.tokenizer.convert_tokens_to_ids(
item['sentence']),
'target_sentence': target_sent,
'target_seq': target_seq,
'subtoken_map': item['subtoken_map'],
'seg_clusters': [[tuple(m) for m in c] for c in item[
'seg_clusters'] if len(c) >= thred],
'offset': item['offset']
}
doc_labels[doc_id] = [[tuple(m) for m in c] for c in item[
'gold_clusters']]
samples.append(sample)
return samples, doc_labels
def __getitem__(self, index):
sample = self.samples[index]
input_ids = torch.tensor(sample['sentence'], dtype=torch.long)
if self.train_args.seq2seq_type == 'action' or \
self.train_args.seq2seq_type == 'input_feed':
label_ids = torch.tensor(sample['target_sentence'],
dtype=torch.long)
target_ids = torch.tensor(sample['target_seq'], dtype=torch.long)
input_len, tgt_len = input_ids.size(0), label_ids.size(0)
attention_mask = torch.tensor([1] * input_len, dtype=torch.long)
src_encoding = {'input_ids': input_ids,
'attention_mask': attention_mask,
'decoder_labels': label_ids,
'labels': target_ids
}
else:
label_ids = torch.tensor(sample['target_seq'],
dtype=torch.long)
input_len, tgt_len = input_ids.size(0), label_ids.size(0)
attention_mask = torch.tensor([1] * input_len, dtype=torch.long)
src_encoding = {'input_ids': input_ids,
'attention_mask': attention_mask,
'labels': label_ids,
}
return src_encoding
def get_document_predicts(doc_preds: List[List]) -> List[
List[Tuple[int, int]]]:
"""
Aggregate predictions for each chunk into document-level predictions.
"""
if len(doc_preds) == 0:
return []
graph = nx.compose_all([nx.complete_graph(p) for p in doc_preds])
processed_groups = []
for component in nx.connected_components(graph):
processed_group = []
for start, end in sorted(component, key=lambda x: (x[0], -x[1])):
# add this entity if it does not overlap with the previous one
condition = not any(
[s < start < e < end for (s, e) in processed_group])
# if len(processed_group) == 0 or start >= processed_group[-1][1]:
# processed_group.append((start, end))
if len(processed_group) == 0 or condition:
processed_group.append((start, end))
processed_groups.append(processed_group)
return [[(start, end) for start, end in group] for group in
processed_groups]
# adapted from https://github.com/lyutyuh/ASP/blob/12b80a7cacc0edf33b77b507102f583380e7e1f1/data/t5minimize_coref.py#L259
def normalize_word(word, use_br_dict=False):
br_dict = {"-LRB-": "(", "-RRB-": ")", "-LSB-": "[", "-RSB-": "]"}
# br_dict = {"(": "-LRB-", ")": "-RRB-", "[": "-LSB-", ']': "-RSB-"}
# br_dict = {"(": "[", ")": "]", "-LRB-": "[", "-RRB-": "]",
# "-LSB-": "[", "-RSB-": "]"}
if use_br_dict and word in br_dict:
word = br_dict[word]
return word
elif word == "/." or word == "/?":
return word[1:]
elif word == "''" or word == "``": # <unk> otherwise
return "\""
elif word == "`": # <unk> otherwise
return "\'"
else:
return word.replace('{', '(').replace('}', ')')
def parse_int_output_tokens(input_ids, output_ids,
special_ids, subtoken_map, tokenizer,
thred, is_tagging):
rec_ids, new_id = [], -1
ment_start_stack = []
unmatched_clusters = defaultdict(list)
new_output_ids = []
if is_tagging:
new_input_ids = [special_ids['copy'] for t in input_ids if
t != tokenizer.pad_token_id and t != special_ids[
'eos']]
new_input_ids.append(special_ids['eos'])
else:
new_input_ids = [t for t in input_ids if t != tokenizer.pad_token_id]
token_mentions = []
for i in range(len(output_ids)):
if output_ids[i] == tokenizer.pad_token_id:
break
if output_ids[i] == special_ids['mention_start']:
new_id += 1
ment_start_stack.append([new_id, 'name', []])
if is_tagging:
new_output_ids.append(output_ids[i])
elif output_ids[i] == special_ids['mention_end']:
new_id += 0
if is_tagging:
new_output_ids.append(output_ids[i])
if len(ment_start_stack) > 0:
item = ment_start_stack.pop()
if item[1] == "ent":
unmatched_clusters[tuple(item[-1])].append(
(item[0], new_id))
else:
# a normal token
# if output_ids[i] == special_ids['sep']:
# status = "ent"
if len(ment_start_stack) > 0:
# inside some entities
if output_ids[i] == special_ids['sep']:
ment_start_stack[-1][1] = "ent"
if is_tagging:
new_output_ids.append(output_ids[i])
else:
if ment_start_stack[-1][1] == 'ent':
ment_start_stack[-1][2].append(output_ids[i])
if is_tagging:
new_output_ids.append(output_ids[i])
elif ment_start_stack[-1][1] == 'name':
new_id += 1
rec_ids.append(output_ids[i])
if is_tagging:
new_output_ids.append(input_ids[new_id])
else:
raise ValueError('wrong status')
else:
# outside
new_id += 1
rec_ids.append(output_ids[i])
if is_tagging:
new_output_ids.append(input_ids[new_id])
if output_ids[i] == special_ids['mention_start']:
new_id -= 1
# thred = 1 if allow_singletons else 2
# Needleman-Wunsch text alignment algorithm
wrong_reconstruction = (rec_ids != new_input_ids)
if wrong_reconstruction:
print(f'new input ids {new_input_ids}')
print(f'reconstructed ids {rec_ids}')
print(f'out ids {output_ids}')
print('wrong reconstruction! please debug')
matching = global_align(new_input_ids, rec_ids)
# update predicted entities with the positions in the original sentence
clusters = defaultdict(list)
for ent_id, ments in unmatched_clusters.items():
for start, end in ments:
new_start = None # start in the original sequence
new_end = None # end in the original sequence
for j in range(start, end + 1):
if j in matching:
if new_start is None:
new_start = matching[j]
new_end = matching[j]
if new_start is not None:
# predict entity
clusters[ent_id].append((
subtoken_map[new_start], subtoken_map[new_end]))
token_mentions.append((new_start, new_end))
predict_clusters = [list(set(v)) for k, v in clusters.items() if
len(set(v)) >= thred]
token_mentions = list(set(token_mentions))
else:
clusters = [[(subtoken_map[m[0]], subtoken_map[m[1]]) for m in v] for v
in
unmatched_clusters.values()]
predict_clusters = [list(set(v)) for v in clusters if len(set(v)) >=
thred]
token_mentions = [(m[0], m[1]) for v in unmatched_clusters.values()
for m in v]
token_mentions = list(set(token_mentions))
if not is_tagging:
new_output_ids = output_ids
return predict_clusters, token_mentions, new_output_ids
def parse_short_target_tokens(input_ids, output_ids,
special_ids, subtoken_map, tokenizer,
align_mode, thred, split_sentence):
# support mark sentence, align sentence by sentence
rec_ids, new_id = [], -1
ment_start_stack = []
unmatched_clusters = defaultdict(list)
new_input_ids = [t for t in input_ids if t != tokenizer.pad_token_id]
for i in range(len(output_ids)):
if output_ids[i] == tokenizer.pad_token_id:
break
if output_ids[i] == special_ids['mention_start']:
ment_start_stack.append([new_id + 1, 'name', []])
elif output_ids[i] == special_ids['mention_end']:
if len(ment_start_stack) > 0:
item = ment_start_stack.pop()
if item[1] == "ent":
unmatched_clusters[tuple(item[-1])].append(
(item[0], new_id))
else:
# a normal token
if len(ment_start_stack) > 0:
# inside some entities
if output_ids[i] == special_ids['sep']:
ment_start_stack[-1][1] = "ent"
else:
if ment_start_stack[-1][1] == 'ent':
ment_start_stack[-1][2].append(output_ids[i])
elif ment_start_stack[-1][1] == 'name':
new_id += 1
rec_ids.append(output_ids[i])
else:
raise ValueError('wrong status')
else:
# outside
new_id += 1
rec_ids.append(output_ids[i])
# mapping.append(new_id)
# thred = 1 if allow_singletons else 2
# Affine global text alignment algorithm
if split_sentence: | input_sents = split_list( | 2 | 2023-10-17 17:39:16+00:00 | 8k |
oven-lab/tuya_cloud_map_extractor | custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/main.py | [
{
"identifier": "decode_v0",
"path": "custom_components/tuya_cloud_map_extractor/tuya_vacuum_map_extractor/v0.py",
"snippet": "def decode_v0(data: str, header: dict):\n encodeDataArray = bytes(_hexStringToNumber(data[48:]))\n decodeDataArray = uncompress(encodeDataArray)\n mapArea = header[\"wi... | import base64
import requests
import math
import json
import logging
from requests.exceptions import JSONDecodeError
from datetime import datetime
from PIL import Image, ImageDraw
from .v0 import decode_v0, to_array_v0
from .v1 import decode_v1, to_array_v1, decode_path_v1, _format_path_point
from .custom0 import decode_custom0, to_array_custom0, decode_path_custom0, map_to_image
from .tuya import get_download_link
from .const import NotSupportedError
from .common import decode_header | 4,349 |
width = header["width"]
height = header["height"]
if isinstance(header["version"], list):
protoVer = str(header["version"][0])
else:
protoVer = header["version"]
pixellist = []
for i in raw_map:
pixellist.append(i)
if protoVer == "custom0":
array = to_array_custom0(pixellist, width, height, colors)
elif protoVer == "0":
array = to_array_v0(pixellist, width, height, colors)
elif protoVer == "1":
rooms = header["roominfo"]
array = to_array_v1(pixellist, width, height, rooms, colors)
image = Image.fromarray(array)
return image
def get_map(
server: str, client_id: str, secret_key: str, device_id: str, colors={}, settings={}, urls={}
) -> Image:
"""Downloads and parses vacuum map from tuya cloud."""
render_path = settings["path_enabled"]
last = settings["last"]
if urls != {}:
time = datetime.strptime(urls["time"], "%H:%M:%S")
now = datetime.now().strftime("%H:%M:%S")
now = datetime.strptime(now, "%H:%M:%S")
delta = now-time
minutes_delta = math.ceil(delta.total_seconds() / 60)
if minutes_delta < 59:
link = {}
link["result"] = urls["links"]
else:
link = get_download_link(server, client_id, secret_key, device_id)
else:
link = get_download_link(server, client_id, secret_key, device_id)
try:
map_link = link["result"][0]["map_url"]
response = download(map_link)
except Exception as e:
_LOGGER.error("Encountered an error, please include the following data in your github issue: " + str(base64.b64encode(json.dumps(link).encode())))
raise e
if response.status_code != 200:
_LOGGER.warning("Got " + str(response.status_code) + " from server while downloading map.")
_LOGGER.debug(
"Response: "
+ str(response.status_code)
+ str(base64.b64encode(response.content))
+ str(base64.b64encode(bytes(str(link), "utf-8")))
)
try:
header, mapDataArr = parse_map(response)
image = render_layout(raw_map=mapDataArr, header=header, colors=colors)
except Exception as e:
_LOGGER.error(
"Unsupported data type. Include the following data in a github issue to request the data format to be added: "
+ str(response.status_code)
+ str(base64.b64encode(response.content))
+ str(base64.b64encode(bytes(str(link), "utf-8")))
+ " Thank you!"
)
raise e
if urls == {}:
header["urls"] = {
"links": link["result"],
"time": datetime.now().strftime("%H:%M:%S"),
}
else:
header["urls"] = urls
if render_path:
_LOGGER.debug("Rendering path")
try:
path_link = link["result"][1]["map_url"]
except:
_LOGGER.error("Your vacuum doesn't return a path")
return flip(header, image, settings)
if "path_color" not in colors:
colors["path_color"] = [0, 255, 0]
scale = int(1080/image.size[0])
image = image.resize((image.size[0]*scale, image.size[1]*scale), resample=Image.BOX)
response = download(path_link)
if response.status_code != 200:
_LOGGER.warning("Got " + str(response.status_code) + " from server while downloading path.")
raise FileNotFoundError
_LOGGER.debug(
"Response path: "
+ str(response.status_code)
+ str(base64.b64encode(response.content))
)
try:
path = parse_path(response, scale=scale, header=header)
except Exception as e:
_LOGGER.error("Failed to parse path: " + str(base64.b64encode(response.content)))
raise e
draw = ImageDraw.Draw(image, 'RGBA')
draw.line(path, fill=tuple(colors["path_color"]), width=2)
x, y = header["pileX"], header["pileY"]
if header["version"] in [[0], [1]]:
| """Downloads and renders vacuum map from tuya servers."""
# import lz4.block
_LOGGER = logging.getLogger(__name__)
def download(url: str) -> requests.models.Response:
"""Downloads map and converts it to a dictionary and bytes object."""
response = requests.get(url=url, timeout=2.5)
return response
def parse_map(response: requests.models.Response):
try:
data = response.json()
header, mapDataArr = decode_custom0(data)
except JSONDecodeError:
data = response.content.hex()
header = decode_header(data[0:48])
if header["version"] == [0]:
mapDataArr = decode_v0(data, header)
elif header["version"] == [1]:
mapDataArr = decode_v1(data, header)
else:
raise NotSupportedError("Map version " + str(header["version"]) +" is not supported.")
return header, mapDataArr
def parse_path(response: requests.models.Response, scale=2.0, header={}):
try:
data = response.json()
path_data = decode_path_custom0(data, header)
except JSONDecodeError:
data = response.content.hex()
path_data = decode_path_v1(data)
coords = []
for coord in path_data:
for i in coord:
coords.append(i*scale)
return coords
def flip(headers: dict, image: Image.Image, settings: dict):
rotate = settings["rotate"]
flip_vertical = settings["flip_vertical"]
flip_horizontal = settings["flip_horizontal"]
if rotate == 90:
image = image.transpose(Image.ROTATE_90)
elif rotate == 180:
image = image.transpose(Image.ROTATE_180)
elif rotate == -90:
image = image.transpose(Image.ROTATE_270)
if flip_vertical:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
if flip_horizontal:
image = image.transpose(Image.FLIP_TOP_BOTTOM)
return headers, image
def render_layout(raw_map: bytes, header: dict, colors: dict) -> Image.Image:
"""Renders the layout map."""
width = header["width"]
height = header["height"]
if isinstance(header["version"], list):
protoVer = str(header["version"][0])
else:
protoVer = header["version"]
pixellist = []
for i in raw_map:
pixellist.append(i)
if protoVer == "custom0":
array = to_array_custom0(pixellist, width, height, colors)
elif protoVer == "0":
array = to_array_v0(pixellist, width, height, colors)
elif protoVer == "1":
rooms = header["roominfo"]
array = to_array_v1(pixellist, width, height, rooms, colors)
image = Image.fromarray(array)
return image
def get_map(
server: str, client_id: str, secret_key: str, device_id: str, colors={}, settings={}, urls={}
) -> Image:
"""Downloads and parses vacuum map from tuya cloud."""
render_path = settings["path_enabled"]
last = settings["last"]
if urls != {}:
time = datetime.strptime(urls["time"], "%H:%M:%S")
now = datetime.now().strftime("%H:%M:%S")
now = datetime.strptime(now, "%H:%M:%S")
delta = now-time
minutes_delta = math.ceil(delta.total_seconds() / 60)
if minutes_delta < 59:
link = {}
link["result"] = urls["links"]
else:
link = get_download_link(server, client_id, secret_key, device_id)
else:
link = get_download_link(server, client_id, secret_key, device_id)
try:
map_link = link["result"][0]["map_url"]
response = download(map_link)
except Exception as e:
_LOGGER.error("Encountered an error, please include the following data in your github issue: " + str(base64.b64encode(json.dumps(link).encode())))
raise e
if response.status_code != 200:
_LOGGER.warning("Got " + str(response.status_code) + " from server while downloading map.")
_LOGGER.debug(
"Response: "
+ str(response.status_code)
+ str(base64.b64encode(response.content))
+ str(base64.b64encode(bytes(str(link), "utf-8")))
)
try:
header, mapDataArr = parse_map(response)
image = render_layout(raw_map=mapDataArr, header=header, colors=colors)
except Exception as e:
_LOGGER.error(
"Unsupported data type. Include the following data in a github issue to request the data format to be added: "
+ str(response.status_code)
+ str(base64.b64encode(response.content))
+ str(base64.b64encode(bytes(str(link), "utf-8")))
+ " Thank you!"
)
raise e
if urls == {}:
header["urls"] = {
"links": link["result"],
"time": datetime.now().strftime("%H:%M:%S"),
}
else:
header["urls"] = urls
if render_path:
_LOGGER.debug("Rendering path")
try:
path_link = link["result"][1]["map_url"]
except:
_LOGGER.error("Your vacuum doesn't return a path")
return flip(header, image, settings)
if "path_color" not in colors:
colors["path_color"] = [0, 255, 0]
scale = int(1080/image.size[0])
image = image.resize((image.size[0]*scale, image.size[1]*scale), resample=Image.BOX)
response = download(path_link)
if response.status_code != 200:
_LOGGER.warning("Got " + str(response.status_code) + " from server while downloading path.")
raise FileNotFoundError
_LOGGER.debug(
"Response path: "
+ str(response.status_code)
+ str(base64.b64encode(response.content))
)
try:
path = parse_path(response, scale=scale, header=header)
except Exception as e:
_LOGGER.error("Failed to parse path: " + str(base64.b64encode(response.content)))
raise e
draw = ImageDraw.Draw(image, 'RGBA')
draw.line(path, fill=tuple(colors["path_color"]), width=2)
x, y = header["pileX"], header["pileY"]
if header["version"] in [[0], [1]]: | point = _format_path_point({'x': x, 'y': y}, False) | 5 | 2023-10-22 10:48:25+00:00 | 8k |
lwaekfjlk/TRAMS | utils/src.py | [
{
"identifier": "TransfoXLLMHeadModel",
"path": "utils/modeling_transfo_xl.py",
"snippet": "_CHECKPOINT_FOR_DOC = \"transfo-xl-wt103\"\n_CONFIG_FOR_DOC = \"TransfoXLConfig\"\n_TOKENIZER_FOR_DOC = \"TransfoXLTokenizer\"\nTRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"transfo-xl-wt103\",\n # See a... | import os
import logging
import wandb
import torch
import sys
from torch.nn.parallel import DistributedDataParallel
from torch.optim import Adam
from utils.modeling_transfo_xl import TransfoXLLMHeadModel, TransfoXLConfig
from torch.optim.lr_scheduler import ExponentialLR, LambdaLR
from transformers import get_linear_schedule_with_warmup, get_cosine_schedule_with_warmup
from data_utils import get_lm_corpus
from earlystopping import EarlyStopper | 4,086 |
def judge_earlystopping(self, metric, model, optimizer, metric_direction='small'):
if self.args.local_rank in [-1, 0]:
self.earlystopper(metric, model, optimizer, metric_direction)
return self.earlystopper.early_stop
else:
return
def get_config(self):
# adaptive softmax / embedding
cutoffs, tie_projs = [], [False]
if self.args.adaptive:
assert self.args.dataset in ['wt103']
if self.args.dataset == 'wt103':
cutoffs = [20000, 40000, 200000]
tie_projs += [True] * len(cutoffs)
config = TransfoXLConfig(
vocab_size=self.args.vocab_size,
d_model=self.args.d_model,
d_embed=self.args.d_model,
n_head=self.args.n_head,
d_head=self.args.d_head,
d_inner=self.args.d_inner,
div_val=self.args.div_val,
pre_lnorm=self.args.pre_lnorm,
n_layer=self.args.n_layer,
tgt_len=self.args.tgt_len,
mem_len=self.args.mem_len,
ext_len=self.args.ext_len,
clamp_len=self.args.clamp_len,
same_length=self.args.same_length,
attn_type=self.args.attn_type,
sample_softmax=self.args.sample_softmax,
adaptive=self.args.adaptive,
dropout=self.args.dropout,
dropatt=self.args.dropatt,
untie_r=self.args.untie_r,
init_range=self.args.init_range,
proj_init_std=self.args.proj_init_std,
init_std=self.args.init_std,
layer_norm_epsilon=self.args.layer_norm_epsilon,
eos_token_id=self.vocab.get_idx('<eos>'),
cutoffs=cutoffs,
tie_projs=tie_projs,
)
return config
def get_model(self, use_checkpoint=False):
config = self.get_config()
if use_checkpoint:
model = TransfoXLLMHeadModel(
config=config,
args=self.args
).to(self.device)
model.load_state_dict(torch.load(self.args.pretrained_model_name), strict=False)
else:
model = TransfoXLLMHeadModel(config=config, args=self.args).to(self.device)
if self.args.distributed:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(self.device)
model = DistributedDataParallel(model, device_ids=[self.args.local_rank], output_device=self.args.local_rank)
return model
def load_model_ft(self, name):
config = self.get_config()
model = TransfoXLLMHeadModel(
config=config,
args=self.args
).to(self.device)
# TODO (haofeiyu): current text8 and enwik8 has problems with adaptive
# 2022/11/25 actually train text8 and enwik8 with adaptive
model.load_state_dict(torch.load(name), strict=False)
return model
def get_scheduler(self):
if self.args.scheduler == "noam":
def noam_lambda(step):
step = max(step, 1)
coef = self.args.model_size ** (-0.5) * min(
step ** (-0.5),
step * self.args.warmup_steps ** (-1.5)
)
return coef
self.log(
'====used GPU number: {}====='.format(torch.cuda.device_count())
)
self.args.warmup_steps = min(
len(self.train_iter)//self.args.grad_acc_steps+1,
self.args.warmup_steps
)
scheduler = LambdaLR(
self.optimizer,
lr_lambda=noam_lambda
)
elif self.args.scheduler == "linear":
scheduler = get_linear_schedule_with_warmup(
optimizer=self.optimizer,
num_warmup_steps=self.args.warmup_steps,
num_training_steps=self.args.max_training_steps,
)
elif self.args.scheduler == "cosine":
scheduler = get_cosine_schedule_with_warmup(
optimizer=self.optimizer,
num_warmup_steps=self.args.warmup_steps,
num_training_steps=self.args.max_training_steps,
)
else:
scheduler = ExponentialLR(self.optimizer, gamma=0.9)
return scheduler
def prepare_data(self):
self.log('Preparing data...')
|
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
class Trainer(object):
def __init__(self, args):
super().__init__()
self.args = args
self.set_tool()
self.set_dist()
self.set_seed()
self.train_iter, self.valid_iter, self.test_iter = self.prepare_data()
self.model = self.get_model(use_checkpoint=self.args.use_checkpoint)
self.optimizer = Adam(params=self.model.parameters(), lr=self.args.lr)
self.scheduler = self.get_scheduler()
self.earlystopper = EarlyStopper(args, self.logger)
def avg_rank(self, scalar):
if self.args.local_rank == -1:
return scalar
scalar_t = torch.tensor(
scalar,
dtype=torch.float,
device=self.device
) / torch.distributed.get_world_size()
torch.distributed.all_reduce(
scalar_t,
op=torch.distributed.ReduceOp.SUM
)
return scalar_t.item()
def set_tool(self):
if self.args.local_rank in [-1, 0]:
os.environ['WANDB_API_KEY'] = '972035264241fb0f6cc3cab51a5d82f47ca713db'
#wandb.init(project="LTDecoder", name=self.args.timestamp, config=self.args, dir='./tmp')
wandb.init(mode='disabled')
self.logger = logging.getLogger(__file__)
def set_dist(self):
self.args.distributed = self.args.local_rank != -1
logging.basicConfig(
level=logging.INFO
if self.args.local_rank in [-1, 0]
else logging.WARN
)
if self.args.distributed:
self.device = torch.device("cuda", self.args.local_rank)
torch.distributed.init_process_group(
backend="nccl",
init_method="env://"
)
else:
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu'
)
def set_seed(self):
if self.args.distributed:
rank = torch.distributed.get_rank()
torch.manual_seed(self.args.seed_id + rank_id)
torch.cuda.manual_seed(self.args.seed_id + rank_id)
torch.cuda.manual_seed_all(self.args.seed_id + rank_id)
else:
torch.manual_seed(self.args.seed_id)
torch.cuda.manual_seed(self.args.seed_id)
torch.cuda.manual_seed_all(self.args.seed_id)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def log(self, str):
if self.args.local_rank in [-1, 0]:
self.logger.info(str)
def wandb_log(self, dict):
if self.args.local_rank in [-1, 0]:
wandb.log(dict)
def judge_earlystopping(self, metric, model, optimizer, metric_direction='small'):
if self.args.local_rank in [-1, 0]:
self.earlystopper(metric, model, optimizer, metric_direction)
return self.earlystopper.early_stop
else:
return
def get_config(self):
# adaptive softmax / embedding
cutoffs, tie_projs = [], [False]
if self.args.adaptive:
assert self.args.dataset in ['wt103']
if self.args.dataset == 'wt103':
cutoffs = [20000, 40000, 200000]
tie_projs += [True] * len(cutoffs)
config = TransfoXLConfig(
vocab_size=self.args.vocab_size,
d_model=self.args.d_model,
d_embed=self.args.d_model,
n_head=self.args.n_head,
d_head=self.args.d_head,
d_inner=self.args.d_inner,
div_val=self.args.div_val,
pre_lnorm=self.args.pre_lnorm,
n_layer=self.args.n_layer,
tgt_len=self.args.tgt_len,
mem_len=self.args.mem_len,
ext_len=self.args.ext_len,
clamp_len=self.args.clamp_len,
same_length=self.args.same_length,
attn_type=self.args.attn_type,
sample_softmax=self.args.sample_softmax,
adaptive=self.args.adaptive,
dropout=self.args.dropout,
dropatt=self.args.dropatt,
untie_r=self.args.untie_r,
init_range=self.args.init_range,
proj_init_std=self.args.proj_init_std,
init_std=self.args.init_std,
layer_norm_epsilon=self.args.layer_norm_epsilon,
eos_token_id=self.vocab.get_idx('<eos>'),
cutoffs=cutoffs,
tie_projs=tie_projs,
)
return config
def get_model(self, use_checkpoint=False):
config = self.get_config()
if use_checkpoint:
model = TransfoXLLMHeadModel(
config=config,
args=self.args
).to(self.device)
model.load_state_dict(torch.load(self.args.pretrained_model_name), strict=False)
else:
model = TransfoXLLMHeadModel(config=config, args=self.args).to(self.device)
if self.args.distributed:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(self.device)
model = DistributedDataParallel(model, device_ids=[self.args.local_rank], output_device=self.args.local_rank)
return model
def load_model_ft(self, name):
config = self.get_config()
model = TransfoXLLMHeadModel(
config=config,
args=self.args
).to(self.device)
# TODO (haofeiyu): current text8 and enwik8 has problems with adaptive
# 2022/11/25 actually train text8 and enwik8 with adaptive
model.load_state_dict(torch.load(name), strict=False)
return model
def get_scheduler(self):
if self.args.scheduler == "noam":
def noam_lambda(step):
step = max(step, 1)
coef = self.args.model_size ** (-0.5) * min(
step ** (-0.5),
step * self.args.warmup_steps ** (-1.5)
)
return coef
self.log(
'====used GPU number: {}====='.format(torch.cuda.device_count())
)
self.args.warmup_steps = min(
len(self.train_iter)//self.args.grad_acc_steps+1,
self.args.warmup_steps
)
scheduler = LambdaLR(
self.optimizer,
lr_lambda=noam_lambda
)
elif self.args.scheduler == "linear":
scheduler = get_linear_schedule_with_warmup(
optimizer=self.optimizer,
num_warmup_steps=self.args.warmup_steps,
num_training_steps=self.args.max_training_steps,
)
elif self.args.scheduler == "cosine":
scheduler = get_cosine_schedule_with_warmup(
optimizer=self.optimizer,
num_warmup_steps=self.args.warmup_steps,
num_training_steps=self.args.max_training_steps,
)
else:
scheduler = ExponentialLR(self.optimizer, gamma=0.9)
return scheduler
def prepare_data(self):
self.log('Preparing data...') | self.corpus = get_lm_corpus(self.args.dataset_dir, self.args.dataset) | 1 | 2023-10-19 00:49:29+00:00 | 8k |
npgrosser/autowired | autowired/_container.py | [
{
"identifier": "component_scan",
"path": "autowired/_component_scan.py",
"snippet": "def component_scan(root_module: ModuleType) -> Iterable[ClassComponentInfo]:\n scanner = ClassScanner(root_module)\n component_infos = (get_component_info(cls) for cls in scanner.get_classes())\n return (c for... | import dataclasses
import inspect
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass
from types import FunctionType, ModuleType
from typing import (
Type,
Callable,
Any,
List,
Optional,
Union,
Generic,
Dict,
TypeVar,
)
from ._component_scan import component_scan
from ._exceptions import (
MissingTypeAnnotation,
AmbiguousDependencyException,
IllegalAutoWireType,
InstantiationError,
UnresolvableDependencyException,
AutowiredException,
)
from ._logging import logger
from ._typing_utils import is_subtype, get_sequence_type | 3,636 | """
Remove a provider from the container.
:param provider: Provider name or provider instance
"""
def predicate(p: Provider) -> bool:
if isinstance(provider, Provider):
return p == provider
else:
return p.get_name() == provider
remove_index = None
for i, p in enumerate(self._providers):
if predicate(p):
remove_index = i
break
if remove_index is not None:
self._providers.pop(remove_index)
def resolve(self, dependency: Union[Dependency, Type[_T]]) -> _T:
"""
Resolves a dependency from the container.
If no existing provider satisfies the dependency specification,
the container tries to auto-wire the object as defined by `self.autowire(...)`
and stores the result instance as a new singleton provider.
The same is true for the dependencies of the object (recursively).
If multiple matching providers are found,
the name of the dependency is compared to the provider name to try to resolve the ambiguity.
:param dependency: Dependency specification or target type
:return: the resolved dependency
:raises UnresolvableDependencyException: if the dependency cannot be resolved
:raises AmbiguousDependencyException: if multiple matching providers are found and there is no name match
"""
if not isinstance(dependency, Dependency):
logger.trace(f"Resolving type {dependency.__name__} for container {self}")
dependency = Dependency(
_camel_to_snake(dependency.__name__), dependency, True
)
logger.trace(f"Resolving {dependency} for container {self}")
existing = self.get_provider(dependency)
if existing:
logger.trace(f"Found existing {existing}")
return existing.get_instance(dependency, self)
logger.trace(f"Existing not found, auto-wiring {dependency}")
# region list injection special case
# check if the dependency type is a list
sequence_type, element_type = get_sequence_type(dependency.type)
if (
element_type is not None
and sequence_type is not None
and not _is_illegal_type(element_type)
):
element_dependency = Dependency(dependency.name, element_type, True)
elements = []
for provider in self.get_providers(element_dependency):
elements.append(provider.get_instance(element_dependency, self))
if len(elements) > 0:
return sequence_type(elements)
# endregion
result = self.autowire(dependency.type)
self.add(
Provider.from_supplier(lambda: result, dependency.type, dependency.name)
)
logger.trace(f"Successfully autowired {dependency} to {result}")
return result
def autowire(
self,
t: Type[_T],
**explicit_kw_args,
) -> _T:
"""
Auto-wires an object of the given type. Meaning that all dependencies of the object are resolved
as defined by `self.resolve(...)` and the object is initialized with the resolved dependencies.
In contrast to `self.resolve(...)`, this function does not store the result as a singleton provider.
:param t:
:param explicit_kw_args:
:return: The auto-wired object
:raises AutowiredException: if the object cannot be auto-wired
"""
logger.trace(f"Auto-wiring {t} with {len(explicit_kw_args)} explicit args")
if _is_illegal_type(t):
raise IllegalAutoWireType(f"Cannot auto-wire object of type {t}")
dependencies = _get_dependencies_for_type(t)
resolved_kw_args = dict(explicit_kw_args) if explicit_kw_args else {}
for dep in dependencies:
if dep.name in resolved_kw_args:
continue
existing = self.get_provider(dep)
if existing:
logger.trace(f"Found existing {existing} provider for {dep}")
resolved_kw_args[dep.name] = existing.get_instance(dep, self)
elif dep.default_factory is not None:
logger.trace(f"Using default factory for {dep}")
resolved_kw_args[dep.name] = dep.default_factory()
else:
# try to resolve dependency
try:
auto: Any = self.resolve(dep)
resolved_kw_args[dep.name] = auto
except AutowiredException as e:
if dep.required:
|
_T = TypeVar("_T")
@dataclass(frozen=True)
class Dependency(Generic[_T]):
"""
A dependency specification.
"""
name: str
type: Type[_T]
required: bool = True
default_factory: Optional[Callable[[], _T]] = None
class Provider(ABC, Generic[_T]):
@abstractmethod
def get_instance(
self, dependency: Dependency, container: "Container"
) -> _T: # pragma: no cover
"""
Returns an instance that satisfies the given dependency specification.
:param dependency: The dependency specification.
:param container: The container that is currently resolving the dependency.
:return: An instance that satisfies the given dependency specification
"""
...
@abstractmethod
def get_name(self) -> str: # pragma: no cover
"""
Returns the name of the provider.
Used by the container to resolve ambiguous dependencies.
If a container contains multiple dependencies that satisfy the same dependency specification,
the name of the dependency is compared to the provider name to try to resolve the ambiguity.
:return: The name of the provider
"""
...
@abstractmethod
def satisfies(self, dependency: Dependency) -> bool: # pragma: no cover
"""
Returns whether this provider satisfies the given dependency specification.
:param dependency: The dependency specification.
:return: Whether this provider satisfies the given dependency specification
"""
...
@staticmethod
def from_instance(instance: _T, name: Optional[str] = None) -> "Provider[_T]":
"""
Creates a singleton provider from the given instance.
:param instance: The instance. Will always be returned by self.get_instance(...)
:param name: The name of the provider. If None, the type name of the instance is used (snake case).
:return: The newly created provider
"""
if name is None:
name = _camel_to_snake(type(instance).__name__)
return _SimpleProvider(name, type(instance), lambda: instance)
# noinspection PyShadowingBuiltins
@staticmethod
def from_supplier(
supplier: Callable[[], _T],
type: Optional[Type[_T]] = None,
name: Optional[str] = None,
) -> "Provider[_T]":
"""
Creates a provider from the given supplier function.
:param supplier: The supplier function. Will be called every time self.get_instance(...) is called.
:param type: The type of the component this provider provides.
If None, the return type of the supplier function is used, or if supplier is a class,
the class itself is used.
:param name: The name of the provider. If None, the type name of the supplier is used (snake case).
:return: The newly created provider
"""
if type is None:
# if getter is a class, use the class as a type
if inspect.isclass(supplier):
type = supplier
else:
type = inspect.signature(supplier).return_annotation
if type == inspect.Signature.empty:
raise MissingTypeAnnotation(
f"Failed to determine type of {supplier.__name__}. "
)
if name is None:
name = _camel_to_snake(type.__name__)
return _SimpleProvider(name, type, supplier)
@staticmethod
def from_class(cls, container: "Container", transient: bool) -> "Provider[_T]":
def supplier():
return container.autowire(cls)
if not transient:
supplier = _cached(supplier)
return _SimpleProvider(_camel_to_snake(cls.__name__), cls, supplier)
def _cached(supplier: Callable[[], _T]) -> Callable[[], _T]:
cached = False
result = None
def wrapper():
nonlocal cached
nonlocal result
if not cached:
result = supplier()
cached = True
return result
return wrapper
@dataclass(frozen=True)
class _SimpleProvider(Provider[_T]):
name: str
type: Type[_T]
getter: Callable[[], _T] = dataclasses.field(repr=False)
def get_instance(self, dependency: Dependency, container: "Container") -> _T:
return self.getter()
def get_name(self) -> str:
return self.name
def satisfies(self, dependency: Dependency) -> bool:
return is_subtype(self.type, dependency.type)
_illegal_autowiredType_modules = ["builtins", "typing", "dataclasses", "abc", "object"]
def _is_illegal_type(t: Type[_T]) -> bool:
return t.__module__.split(".")[0] in _illegal_autowiredType_modules
class Container:
"""
A container for resolving and storing dependencies.
"""
_providers: List[Provider]
def __init__(self):
self._providers = []
def get_providers(self, dependency: Optional[Dependency] = None) -> List[Provider]:
"""
Returns all providers that match the given dependency specification.
:param dependency: Optional dependency specification, if None, all providers are returned
:return:
"""
if dependency is None:
return list(self._providers)
else:
return [p for p in self._providers if p.satisfies(dependency)]
def get_provider(self, dependency: Dependency) -> Optional[Provider]:
"""
Returns an existing provider that matches the given dependency specification.
:param dependency:
:return:
:raises AmbiguousDependencyException: If multiple matching providers are found and there is no name match
"""
candidates = self.get_providers(dependency)
if len(candidates) == 1:
return candidates[0]
if len(candidates) > 1:
by_name = _group_by(lambda obj: obj.name, candidates)
if dependency.name in by_name and len(by_name[dependency.name]) == 1:
return by_name[dependency.name][0]
else:
raise AmbiguousDependencyException(
f"Failed to resolve dependency {dependency.name}"
f" of type {dependency.type.__name__}."
f" Multiple candidates found: {candidates}"
)
return None
def add(self, provider_or_instance: Union[Provider, Any], /):
"""
Adds a provider or instance (as singleton provider) to the container.
:param provider_or_instance: If not a provider, a singleton provider is created from the instance.
The name of the provider is derived from the type name of the instance.
"""
if not isinstance(provider_or_instance, Provider):
provider = Provider.from_instance(provider_or_instance)
else:
provider = provider_or_instance
self._providers.append(provider)
def remove(self, provider: Union[str, Provider, Type[_T]], /):
"""
Remove a provider from the container.
:param provider: Provider name or provider instance
"""
def predicate(p: Provider) -> bool:
if isinstance(provider, Provider):
return p == provider
else:
return p.get_name() == provider
remove_index = None
for i, p in enumerate(self._providers):
if predicate(p):
remove_index = i
break
if remove_index is not None:
self._providers.pop(remove_index)
def resolve(self, dependency: Union[Dependency, Type[_T]]) -> _T:
"""
Resolves a dependency from the container.
If no existing provider satisfies the dependency specification,
the container tries to auto-wire the object as defined by `self.autowire(...)`
and stores the result instance as a new singleton provider.
The same is true for the dependencies of the object (recursively).
If multiple matching providers are found,
the name of the dependency is compared to the provider name to try to resolve the ambiguity.
:param dependency: Dependency specification or target type
:return: the resolved dependency
:raises UnresolvableDependencyException: if the dependency cannot be resolved
:raises AmbiguousDependencyException: if multiple matching providers are found and there is no name match
"""
if not isinstance(dependency, Dependency):
logger.trace(f"Resolving type {dependency.__name__} for container {self}")
dependency = Dependency(
_camel_to_snake(dependency.__name__), dependency, True
)
logger.trace(f"Resolving {dependency} for container {self}")
existing = self.get_provider(dependency)
if existing:
logger.trace(f"Found existing {existing}")
return existing.get_instance(dependency, self)
logger.trace(f"Existing not found, auto-wiring {dependency}")
# region list injection special case
# check if the dependency type is a list
sequence_type, element_type = get_sequence_type(dependency.type)
if (
element_type is not None
and sequence_type is not None
and not _is_illegal_type(element_type)
):
element_dependency = Dependency(dependency.name, element_type, True)
elements = []
for provider in self.get_providers(element_dependency):
elements.append(provider.get_instance(element_dependency, self))
if len(elements) > 0:
return sequence_type(elements)
# endregion
result = self.autowire(dependency.type)
self.add(
Provider.from_supplier(lambda: result, dependency.type, dependency.name)
)
logger.trace(f"Successfully autowired {dependency} to {result}")
return result
def autowire(
self,
t: Type[_T],
**explicit_kw_args,
) -> _T:
"""
Auto-wires an object of the given type. Meaning that all dependencies of the object are resolved
as defined by `self.resolve(...)` and the object is initialized with the resolved dependencies.
In contrast to `self.resolve(...)`, this function does not store the result as a singleton provider.
:param t:
:param explicit_kw_args:
:return: The auto-wired object
:raises AutowiredException: if the object cannot be auto-wired
"""
logger.trace(f"Auto-wiring {t} with {len(explicit_kw_args)} explicit args")
if _is_illegal_type(t):
raise IllegalAutoWireType(f"Cannot auto-wire object of type {t}")
dependencies = _get_dependencies_for_type(t)
resolved_kw_args = dict(explicit_kw_args) if explicit_kw_args else {}
for dep in dependencies:
if dep.name in resolved_kw_args:
continue
existing = self.get_provider(dep)
if existing:
logger.trace(f"Found existing {existing} provider for {dep}")
resolved_kw_args[dep.name] = existing.get_instance(dep, self)
elif dep.default_factory is not None:
logger.trace(f"Using default factory for {dep}")
resolved_kw_args[dep.name] = dep.default_factory()
else:
# try to resolve dependency
try:
auto: Any = self.resolve(dep)
resolved_kw_args[dep.name] = auto
except AutowiredException as e:
if dep.required: | raise UnresolvableDependencyException( | 5 | 2023-10-16 09:22:20+00:00 | 8k |
chenxn2020/GOSE | GOSEfinetune/models/layoutlmv2/modeling_layoutlmv2 copy.py | [
{
"identifier": "ReOutput",
"path": "GOSEfinetune/utils.py",
"snippet": "class ReOutput(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = No... | import math
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import detectron2
import os
import json
from torch import nn
from torch.nn import CrossEntropyLoss
from detectron2.modeling import META_ARCH_REGISTRY
from transformers import PreTrainedModel
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
TokenClassifierOutput,
)
from transformers.modeling_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMIntermediate as LayoutLMv2Intermediate
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMOutput as LayoutLMv2Output
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMPooler as LayoutLMv2Pooler
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMSelfOutput as LayoutLMv2SelfOutput
from transformers.utils import logging
from ...modules.decoders.re import REDecoder
from ...utils import ReOutput
from .configuration_layoutlmv2 import LayoutLMv2Config
from .detectron2_config import add_layoutlmv2_config
from ...modules.decoders.gare import GARE
from IPython import embed;embed() | 5,002 | rel_pos_y_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_x = F.one_hot(rel_pos_x, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_y = F.one_hot(rel_pos_y, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_x = self.rel_pos_x_bias(rel_pos_x).permute(0, 3, 1, 2)
rel_pos_y = self.rel_pos_y_bias(rel_pos_y).permute(0, 3, 1, 2)
rel_pos_x = rel_pos_x.contiguous()
rel_pos_y = rel_pos_y.contiguous()
rel_2d_pos = rel_pos_x + rel_pos_y
return rel_2d_pos
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
bbox=None,
position_ids=None,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
rel_pos = self._cal_1d_pos_emb(hidden_states, position_ids) if self.has_relative_attention_bias else None
rel_2d_pos = self._cal_2d_pos_emb(hidden_states, bbox) if self.has_spatial_attention_bias else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class LayoutLMv2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
| # coding=utf-8
logger = logging.get_logger(__name__)
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"layoutlmv2-base-uncased",
"layoutlmv2-large-uncased",
]
LayoutLMv2LayerNorm = torch.nn.LayerNorm
class LayoutLMv2Embeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super(LayoutLMv2Embeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = LayoutLMv2LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def _cal_spatial_position_embeddings(self, bbox):
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError("The :obj:`bbox`coordinate values should be within 0-1000 range.") from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
spatial_position_embeddings = torch.cat(
[
left_position_embeddings,
upper_position_embeddings,
right_position_embeddings,
lower_position_embeddings,
h_position_embeddings,
w_position_embeddings,
],
dim=-1,
)
return spatial_position_embeddings
class LayoutLMv2SelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.fast_qkv = config.fast_qkv
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if config.fast_qkv:
self.qkv_linear = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=False)
self.q_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
self.v_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
else:
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def compute_qkv(self, hidden_states):
if self.fast_qkv:
qkv = self.qkv_linear(hidden_states)
q, k, v = torch.chunk(qkv, 3, dim=-1)
if q.ndimension() == self.q_bias.ndimension():
q = q + self.q_bias
v = v + self.v_bias
else:
_sz = (1,) * (q.ndimension() - 1) + (-1,)
q = q + self.q_bias.view(*_sz)
v = v + self.v_bias.view(*_sz)
else:
q = self.query(hidden_states)
k = self.key(hidden_states)
v = self.value(hidden_states)
return q, k, v
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
q, k, v = self.compute_qkv(hidden_states)
# (B, L, H*D) -> (B, H, L, D)
query_layer = self.transpose_for_scores(q)
key_layer = self.transpose_for_scores(k)
value_layer = self.transpose_for_scores(v)
query_layer = query_layer / math.sqrt(self.attention_head_size)
# [BSZ, NAT, L, L]
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.has_relative_attention_bias:
attention_scores += rel_pos
if self.has_spatial_attention_bias:
attention_scores += rel_2d_pos
attention_scores = attention_scores.float().masked_fill_(attention_mask.to(torch.bool), float("-inf"))
attention_probs = F.softmax(attention_scores, dim=-1, dtype=torch.float32).type_as(value_layer)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class LayoutLMv2Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LayoutLMv2SelfAttention(config)
self.output = LayoutLMv2SelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class LayoutLMv2Layer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = LayoutLMv2Attention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = LayoutLMv2Attention(config)
self.intermediate = LayoutLMv2Intermediate(config)
self.output = LayoutLMv2Output(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
ret = 0
if bidirectional:
num_buckets //= 2
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class LayoutLMv2Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LayoutLMv2Layer(config) for _ in range(config.num_hidden_layers)])
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if self.has_relative_attention_bias:
self.rel_pos_bins = config.rel_pos_bins
self.max_rel_pos = config.max_rel_pos
self.rel_pos_onehot_size = config.rel_pos_bins
self.rel_pos_bias = nn.Linear(self.rel_pos_onehot_size, config.num_attention_heads, bias=False)
if self.has_spatial_attention_bias:
self.max_rel_2d_pos = config.max_rel_2d_pos
self.rel_2d_pos_bins = config.rel_2d_pos_bins
self.rel_2d_pos_onehot_size = config.rel_2d_pos_bins
self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
def _cal_1d_pos_emb(self, hidden_states, position_ids):
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat,
num_buckets=self.rel_pos_bins,
max_distance=self.max_rel_pos,
)
rel_pos = F.one_hot(rel_pos, num_classes=self.rel_pos_onehot_size).type_as(hidden_states)
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
rel_pos = rel_pos.contiguous()
return rel_pos
def _cal_2d_pos_emb(self, hidden_states, bbox):
position_coord_x = bbox[:, :, 0]
position_coord_y = bbox[:, :, 3]
rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1)
rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1)
rel_pos_x = relative_position_bucket(
rel_pos_x_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_y = relative_position_bucket(
rel_pos_y_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_x = F.one_hot(rel_pos_x, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_y = F.one_hot(rel_pos_y, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_x = self.rel_pos_x_bias(rel_pos_x).permute(0, 3, 1, 2)
rel_pos_y = self.rel_pos_y_bias(rel_pos_y).permute(0, 3, 1, 2)
rel_pos_x = rel_pos_x.contiguous()
rel_pos_y = rel_pos_y.contiguous()
rel_2d_pos = rel_pos_x + rel_pos_y
return rel_2d_pos
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
bbox=None,
position_ids=None,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
rel_pos = self._cal_1d_pos_emb(hidden_states, position_ids) if self.has_relative_attention_bias else None
rel_2d_pos = self._cal_2d_pos_emb(hidden_states, bbox) if self.has_spatial_attention_bias else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class LayoutLMv2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
| config_class = LayoutLMv2Config | 1 | 2023-10-19 14:36:32+00:00 | 8k |
mklissa/dceo | dopamine/jax/agents/rainbow/rainbow_dceo.py | [
{
"identifier": "losses",
"path": "dopamine/jax/losses.py",
"snippet": "def huber_loss(targets: jnp.array,\n predictions: jnp.array,\n delta: float = 1.0) -> jnp.ndarray:\ndef mse_loss(targets: jnp.array, predictions: jnp.array) -> jnp.ndarray:\ndef softmax_cross_entropy_loss... | import functools
import gin
import jax
import jax.numpy as jnp
import numpy as onp
import optax
import tensorflow as tf
from dopamine.jax import losses
from dopamine.jax import networks
from dopamine.jax.agents.dqn import dqn_agent
from dopamine.metrics import statistics_instance
from dopamine.replay_memory import prioritized_replay_buffer | 4,055 | return self.action
def step(self, reward, observation):
self._last_observation = self._observation
self._record_observation(observation)
if not self.eval_mode:
self._store_transition(self._last_observation, self.action, reward, False)
self._train_step()
( self._rng,
self.action
) = select_action(
self.network_def,
self.online_params,
self.preprocess_fn(self.state),
self._rng,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn,
self._support
)
self.action = onp.asarray(self.action)
return self.action
def _train_step(self):
"""Runs a single training step.
Runs training if both:
(1) A minimum number of frames have been added to the replay buffer.
(2) `training_steps` is a multiple of `update_period`.
Also, syncs weights from online_params to target_network_params if training
steps is a multiple of target update period.
"""
if self._replay.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sample_from_replay_buffer()
states = self.preprocess_fn(self.replay_elements['state'])
next_states = self.preprocess_fn(self.replay_elements['next_state'])
self.rep_optimizer_state, self.rep_params, loss = train_rep(
self.rep_network_def,
self.rep_params,
self.optimizer,
self.rep_optimizer_state,
states,
next_states,)
for o in np.random.choice(self._num_options, 3, replace=False):
option = self.options[o]
self._sample_from_replay_buffer()
if self._replay_scheme == 'prioritized':
probs = self.replay_elements['sampling_probabilities']
# Weight the loss by the inverse priorities.
loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)
loss_weights /= jnp.max(loss_weights)
else:
loss_weights = jnp.ones(self.replay_elements['state'].shape[0])
option.optimizer_state, self.online_params, loss, mean_loss = train(
self.network_def,
option.online_params,
option.target_network_params,
self.optimizer,
option.optimizer_state,
self.preprocess_fn(self.replay_elements['state']),
self.replay_elements['action'],
self.preprocess_fn(self.replay_elements['next_state']),
self.replay_elements['reward'],
self.replay_elements['terminal'],
loss_weights,
self._support,
self.cumulative_gamma)
self._sample_from_replay_buffer()
if self._replay_scheme == 'prioritized':
probs = self.replay_elements['sampling_probabilities']
# Weight the loss by the inverse priorities.
loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)
loss_weights /= jnp.max(loss_weights)
else:
loss_weights = jnp.ones(self.replay_elements['state'].shape[0])
self.optimizer_state, self.online_params, loss, mean_loss = train(
self.network_def,
self.online_params,
self.target_network_params,
self.optimizer,
self.optimizer_state,
self.preprocess_fn(self.replay_elements['state']),
self.replay_elements['action'],
self.preprocess_fn(self.replay_elements['next_state']),
self.replay_elements['reward'],
self.replay_elements['terminal'],
loss_weights,
self._support,
self.cumulative_gamma)
if self._replay_scheme == 'prioritized':
self._replay.set_priority(self.replay_elements['indices'],
jnp.sqrt(loss + 1e-10))
if (self.summary_writer is not None and
self.training_steps > 0 and
self.training_steps % self.summary_writing_frequency == 0):
with self.summary_writer.as_default():
tf.summary.scalar('CrossEntropyLoss', mean_loss,
step=self.training_steps)
self.summary_writer.flush()
if hasattr(self, 'collector_dispatcher'):
self.collector_dispatcher.write(
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
coeff_vector = jnp.arange(lap_dim, 0, -1)
coeff_vector = np.concatenate((coeff_vector, np.zeros(1)))
def neg_loss_fn(phi_u, phi_v):
loss = 0
for dim in range(lap_dim, 0, -1):
coeff = coeff_vector[dim-1] - coeff_vector[dim]
x_norm = jnp.sqrt(jnp.dot(phi_u[:dim], phi_u[:dim]))
y_norm = jnp.sqrt(jnp.dot(phi_v[:dim], phi_v[:dim]))
dot_product = jnp.dot(phi_u[:dim], phi_v[:dim])
loss += coeff * (
dot_product ** 2 - jnp.log(1 + x_norm) - jnp.log(1 + y_norm) )
return loss
neg_loss_vmap = jax.vmap(neg_loss_fn)
def _update_lap(
rng_key, opt_state, params, transitions):#, transitions_u, transitions_v):
"""Computes learning update from batch of replay transitions."""
rng_key, update_key = jax.random.split(rng_key)
def lap_loss_fn(params, update_key):
"""Calculates loss given network parameters and transitions."""
phis = lap_network.apply(params, update_key,
transitions).q_values
phis = jnp.split(phis, 4, axis=0)
phi_tm1 = phis[0]
phi_t = phis[1]
phi_u = phis[2]
phi_v = phis[3]
pos_loss = ((phi_tm1 - phi_t)**2).dot(coeff_vector[:lap_dim])
neg_loss = neg_loss_vmap(phi_u, phi_v)
loss = pos_loss + neg_loss
loss = rlax.clip_gradient(loss, -grad_error_bound, grad_error_bound)
chex.assert_shape(loss, (self._batch_size,))
loss = jnp.mean(loss)
return loss, (jnp.mean(pos_loss), jnp.mean(neg_loss))
grads, (pos_loss, neg_loss) = jax.grad(
lap_loss_fn, has_aux=True)(params, update_key)
updates, new_opt_state = rep_optimizer.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return rng_key, new_opt_state, new_params, pos_loss, neg_loss
@functools.partial(jax.jit, static_argnums=(0, 3, 12))
def train_rep(network_def, rep_params, optimizer, optimizer_state,
states, next_states):
"""Run a training step."""
def loss_fn(params, target, loss_multipliers):
def q_online(state):
return network_def.apply(params, state, support)
logits = jax.vmap(q_online)(states).logits
# Fetch the logits for its selected action. We use vmap to perform this
# indexing across the batch.
chosen_action_logits = jax.vmap(lambda x, y: x[y])(logits, actions)
loss = jax.vmap(losses.softmax_cross_entropy_loss_with_logits)(
target,
chosen_action_logits)
mean_loss = jnp.mean(loss_multipliers * loss)
return mean_loss, loss
def q_target(state):
return network_def.apply(target_params, state, support)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
target = target_distribution(q_target,
next_states,
rewards,
terminals,
support,
cumulative_gamma)
# Get the unweighted loss without taking its mean for updating priorities.
(mean_loss, loss), grad = grad_fn(rep_params, target, loss_weights)
updates, optimizer_state = optimizer.update(grad, optimizer_state,
params=rep_params)
rep_params = optax.apply_updates(rep_params, updates)
return optimizer_state, rep_params, loss, mean_loss
@functools.partial(jax.jit, static_argnums=(0, 3, 12))
def train(network_def, online_params, target_params, optimizer, optimizer_state,
states, actions, next_states, rewards, terminals, loss_weights,
support, cumulative_gamma):
"""Run a training step."""
def loss_fn(params, target, loss_multipliers):
def q_online(state):
return network_def.apply(params, state, support)
logits = jax.vmap(q_online)(states).logits
# Fetch the logits for its selected action. We use vmap to perform this
# indexing across the batch.
chosen_action_logits = jax.vmap(lambda x, y: x[y])(logits, actions)
loss = jax.vmap(losses.softmax_cross_entropy_loss_with_logits)(
target,
chosen_action_logits)
mean_loss = jnp.mean(loss_multipliers * loss)
return mean_loss, loss
def q_target(state):
return network_def.apply(target_params, state, support)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
target = target_distribution(q_target,
next_states,
rewards,
terminals,
support,
cumulative_gamma)
# Get the unweighted loss without taking its mean for updating priorities.
(mean_loss, loss), grad = grad_fn(online_params, target, loss_weights)
updates, optimizer_state = optimizer.update(grad, optimizer_state,
params=online_params)
online_params = optax.apply_updates(online_params, updates)
return optimizer_state, online_params, loss, mean_loss
@functools.partial(jax.vmap, in_axes=(None, 0, 0, 0, None, None))
def target_distribution(target_network, next_states, rewards, terminals,
support, cumulative_gamma):
is_terminal_multiplier = 1. - terminals.astype(jnp.float32)
# Incorporate terminal state to discount factor.
gamma_with_terminal = cumulative_gamma * is_terminal_multiplier
target_support = rewards + gamma_with_terminal * support
next_state_target_outputs = target_network(next_states)
q_values = jnp.squeeze(next_state_target_outputs.q_values)
next_qt_argmax = jnp.argmax(q_values)
probabilities = jnp.squeeze(next_state_target_outputs.probabilities)
next_probabilities = probabilities[next_qt_argmax]
return jax.lax.stop_gradient(
project_distribution(target_support, next_probabilities, support))
@functools.partial(jax.jit, static_argnums=(0, 4, 5, 6, 7, 8, 10, 11))
def select_action(network_def, params, state, rng, num_actions, eval_mode,
epsilon_eval, epsilon_train, epsilon_decay_period,
training_steps, min_replay_history, epsilon_fn, support):
epsilon = jnp.where(eval_mode,
epsilon_eval,
epsilon_fn(epsilon_decay_period,
training_steps,
min_replay_history,
epsilon_train))
rng, rng1, rng2 = jax.random.split(rng, num=3)
p = jax.random.uniform(rng1)
return rng, jnp.where(
p <= epsilon,
jax.random.randint(rng2, (), 0, num_actions),
jnp.argmax(network_def.apply(params, state, support).q_values))
@gin.configurable
class JaxRainbowAgent(dqn_agent.JaxDQNAgent):
"""A compact implementation of a simplified Rainbow agent."""
def __init__(self,
num_actions,
observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE,
observation_dtype=dqn_agent.NATURE_DQN_DTYPE,
stack_size=dqn_agent.NATURE_DQN_STACK_SIZE,
network=networks.RainbowNetwork,
rep_network=networks.NatureDQNNetwork,
num_atoms=51,
vmin=None,
vmax=10.,
gamma=0.99,
update_horizon=1,
min_replay_history=20000,
update_period=4,
target_update_period=8000,
epsilon_fn=dqn_agent.linearly_decaying_epsilon,
epsilon_train=0.01,
epsilon_eval=0.001,
epsilon_decay_period=250000,
replay_scheme='prioritized',
optimizer='adam',
seed=None,
summary_writer=None,
summary_writing_frequency=500,
allow_partial_reload=False,
num_options=0,
option_prob=0.0,
rep_dim=10,):
# We need this because some tools convert round floats into ints.
vmax = float(vmax)
self._num_atoms = num_atoms
# If vmin is not specified, set it to -vmax similar to C51.
vmin = vmin if vmin else -vmax
self._support = jnp.linspace(vmin, vmax, num_atoms)
self._replay_scheme = replay_scheme
self.num_options = num_options
self.option_prob = option_prob
self.rep_dim = rep_dim
if preprocess_fn is None:
self.rep_network_def = rep_network(num_actions=rep_dim)
self.rep_preprocess_fn = networks.identity_preprocess_fn
else:
self.rep_network_def = rep_network(num_actions=rep_dim,
inputs_preprocessed=True)
self.rep_preprocess_fn = preprocess_fn
super(JaxRainbowAgent, self).__init__(
num_actions=num_actions,
observation_shape=observation_shape,
observation_dtype=observation_dtype,
stack_size=stack_size,
network=functools.partial(network,
num_atoms=num_atoms),
gamma=gamma,
update_horizon=update_horizon,
min_replay_history=min_replay_history,
update_period=update_period,
target_update_period=target_update_period,
epsilon_fn=epsilon_fn,
epsilon_train=epsilon_train,
epsilon_eval=epsilon_eval,
epsilon_decay_period=epsilon_decay_period,
optimizer=optimizer,
seed=seed,
summary_writer=summary_writer,
summary_writing_frequency=summary_writing_frequency,
allow_partial_reload=allow_partial_reload)
def _build_networks_and_optimizer(self):
self._rng, rng = jax.random.split(self._rng)
self.online_params = self.network_def.init(rng, x=self.state,
support=self._support)
self.optimizer = dqn_agent.create_optimizer(self._optimizer_name)
self.optimizer_state = self.optimizer.init(self.online_params)
self.target_network_params = self.online_params
self.options = []
for o in range(self.num_options):
self._rng, rng = jax.random.split(self._rng)
online_params = self.network_def.init(rng, x=self.state,
support=self._support)
optimizer_state = self.optimizer.init(self.online_params)
target_network_params = online_params
self.options.append(Option(
online_params=online_params,
target_network_params=target_network_params,
optimizer_state=optimizer_state))
self._rng, rng = jax.random.split(self._rng)
self.rep_params = self.rep_network_def.init(rng, x=self.state,)
self.rep_optimizer_state = self.optimizer.init(self.rep_params)
def _build_replay_buffer(self):
"""Creates the replay buffer used by the agent."""
if self._replay_scheme not in ['uniform', 'prioritized']:
raise ValueError('Invalid replay scheme: {}'.format(self._replay_scheme))
# Both replay schemes use the same data structure, but the 'uniform' scheme
# sets all priorities to the same value (which yields uniform sampling).
return prioritized_replay_buffer.OutOfGraphPrioritizedReplayBuffer(
observation_shape=self.observation_shape,
stack_size=self.stack_size,
update_horizon=self.update_horizon,
gamma=self.gamma,
observation_dtype=self.observation_dtype)
# TODO(psc): Refactor this so we have a class _select_action that calls
# select_action with the right parameters. This will allow us to avoid
# overriding begin_episode.
def begin_episode(self, observation):
"""Returns the agent's first action for this episode.
Args:
observation: numpy array, the environment's initial observation.
Returns:
int, the selected action.
"""
self._reset_state()
self._record_observation(observation)
if not self.eval_mode:
self._train_step()
( self._rng,
self.action
) = select_action(
self.network_def,
self.online_params,
self.preprocess_fn(self.state),
self._rng,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn,
self._support
)
# TODO(psc): Why a numpy array? Why not an int?
self.action = onp.asarray(self.action)
return self.action
def step(self, reward, observation):
self._last_observation = self._observation
self._record_observation(observation)
if not self.eval_mode:
self._store_transition(self._last_observation, self.action, reward, False)
self._train_step()
( self._rng,
self.action
) = select_action(
self.network_def,
self.online_params,
self.preprocess_fn(self.state),
self._rng,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn,
self._support
)
self.action = onp.asarray(self.action)
return self.action
def _train_step(self):
"""Runs a single training step.
Runs training if both:
(1) A minimum number of frames have been added to the replay buffer.
(2) `training_steps` is a multiple of `update_period`.
Also, syncs weights from online_params to target_network_params if training
steps is a multiple of target update period.
"""
if self._replay.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sample_from_replay_buffer()
states = self.preprocess_fn(self.replay_elements['state'])
next_states = self.preprocess_fn(self.replay_elements['next_state'])
self.rep_optimizer_state, self.rep_params, loss = train_rep(
self.rep_network_def,
self.rep_params,
self.optimizer,
self.rep_optimizer_state,
states,
next_states,)
for o in np.random.choice(self._num_options, 3, replace=False):
option = self.options[o]
self._sample_from_replay_buffer()
if self._replay_scheme == 'prioritized':
probs = self.replay_elements['sampling_probabilities']
# Weight the loss by the inverse priorities.
loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)
loss_weights /= jnp.max(loss_weights)
else:
loss_weights = jnp.ones(self.replay_elements['state'].shape[0])
option.optimizer_state, self.online_params, loss, mean_loss = train(
self.network_def,
option.online_params,
option.target_network_params,
self.optimizer,
option.optimizer_state,
self.preprocess_fn(self.replay_elements['state']),
self.replay_elements['action'],
self.preprocess_fn(self.replay_elements['next_state']),
self.replay_elements['reward'],
self.replay_elements['terminal'],
loss_weights,
self._support,
self.cumulative_gamma)
self._sample_from_replay_buffer()
if self._replay_scheme == 'prioritized':
probs = self.replay_elements['sampling_probabilities']
# Weight the loss by the inverse priorities.
loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)
loss_weights /= jnp.max(loss_weights)
else:
loss_weights = jnp.ones(self.replay_elements['state'].shape[0])
self.optimizer_state, self.online_params, loss, mean_loss = train(
self.network_def,
self.online_params,
self.target_network_params,
self.optimizer,
self.optimizer_state,
self.preprocess_fn(self.replay_elements['state']),
self.replay_elements['action'],
self.preprocess_fn(self.replay_elements['next_state']),
self.replay_elements['reward'],
self.replay_elements['terminal'],
loss_weights,
self._support,
self.cumulative_gamma)
if self._replay_scheme == 'prioritized':
self._replay.set_priority(self.replay_elements['indices'],
jnp.sqrt(loss + 1e-10))
if (self.summary_writer is not None and
self.training_steps > 0 and
self.training_steps % self.summary_writing_frequency == 0):
with self.summary_writer.as_default():
tf.summary.scalar('CrossEntropyLoss', mean_loss,
step=self.training_steps)
self.summary_writer.flush()
if hasattr(self, 'collector_dispatcher'):
self.collector_dispatcher.write( | [statistics_instance.StatisticsInstance( | 3 | 2023-10-15 22:14:16+00:00 | 8k |
hsouri/bob-classification | linear_probe.py | [
{
"identifier": "interpolate_pos_embed",
"path": "mae_util/pos_embed.py",
"snippet": "def interpolate_pos_embed(model, checkpoint_model):\n if 'pos_embed' in checkpoint_model:\n pos_embed_checkpoint = checkpoint_model['pos_embed']\n embedding_size = pos_embed_checkpoint.shape[-1]\n ... | import argparse
import datetime
import json
import numpy as np
import os
import time
import torch
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.nn as nn
import sys
import timm
import mae_util.misc as misc
import models
from pathlib import Path
from torch.utils.tensorboard import SummaryWriter
from timm.models.layers import trunc_normal_
from mae_util.pos_embed import interpolate_pos_embed
from mae_util.misc import NativeScalerWithGradNormCount as NativeScaler
from mae_util.lars import LARS
from mae_util.crop import RandomResizedCrop
from timm.models import create_model, safe_model_name, resume_checkpoint, load_checkpoint, \
convert_splitbn_model, convert_sync_batchnorm, model_parameters
from engine_linprobe import train_one_epoch, evaluate | 5,241 | log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
#model = models_vit.__dict__[args.model](
# num_classes=args.nb_classes,
# global_pool=args.global_pool,
#)
model = create_model(
args.model,
pretrained=True,
num_classes=args.nb_classes,
drop_rate=0.0,
drop_path_rate=0.0,
#global_pool=args.global_pool)
global_pool="token")
if args.model == 'resnet50_dino':
if model.fc.out_features != args.nb_classes:
model.fc = nn.Linear(model.fc.in_features, args.nb_classes).to(device)
trunc_normal_(model.fc.weight, std=0.01)
elif args.model == 'stable_diffusion_v1':
trunc_normal_(model.unet.head.weight, std=0.01)
else:
trunc_normal_(model.head.weight, std=0.01)
#if args.finetune and not args.eval:
# checkpoint = torch.load(args.finetune, map_location='cpu')
# print("Load pre-trained checkpoint from: %s" % args.finetune)
# checkpoint_model = checkpoint['model']
# state_dict = model.state_dict()
# for k in ['head.weight', 'head.bias']:
# if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
# print(f"Removing key {k} from pretrained checkpoint")
# del checkpoint_model[k]
# interpolate position embedding
# interpolate_pos_embed(model, checkpoint_model)
# load pre-trained model
# msg = model.load_state_dict(checkpoint_model, strict=False)
# print(msg)
# if args.global_pool:
# assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'}
# else:
# assert set(msg.missing_keys) == {'head.weight', 'head.bias'}
# # manually initialize fc layer: following MoCo v3
# trunc_normal_(model.head.weight, std=0.01)
# for linear prob only
# hack: revise model's head with BN
if "vit" in args.model or "swin" in args.model or "conv" in args.model:
model.head = torch.nn.Sequential(torch.nn.BatchNorm1d(model.head.in_features, affine=False, eps=1e-6), model.head)
elif "stable" in args.model:
model.unet.head = torch.nn.Sequential(torch.nn.BatchNorm1d(model.unet.head.in_features, affine=False, eps=1e-6), model.unet.head)
# freeze all but the head
for _, p in model.named_parameters():
p.requires_grad = False
if args.model == 'resnet50_dino':
for _, p in model.fc.named_parameters():
p.requires_grad = True
elif args.model == 'stable_diffusion_v1':
for _, p in model.unet.head.named_parameters():
p.requires_grad = True
else:
for _, p in model.head.named_parameters():
p.requires_grad = True
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params (M): %.2f' % (n_parameters / 1.e6))
eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
if args.lr is None: # only base_lr is specified
args.lr = args.blr * eff_batch_size / 256
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.accum_iter)
print("effective batch size: %d" % eff_batch_size)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
if args.model == 'resnet50_dino':
optimizer = LARS(model_without_ddp.fc.parameters(), lr=args.lr, weight_decay=args.weight_decay)
elif args.model == 'stable_diffusion_v1':
optimizer = LARS(model_without_ddp.unet.head.parameters(), lr=args.lr, weight_decay=args.weight_decay)
else:
optimizer = LARS(model_without_ddp.head.parameters(), lr=args.lr, weight_decay=args.weight_decay)
print(optimizer)
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# DeiT: https://github.com/facebookresearch/deit
# MoCo v3: https://github.com/facebookresearch/moco-v3
# --------------------------------------------------------
#assert timm.__version__ == "0.3.2" # version check
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'models'))
def get_args_parser():
parser = argparse.ArgumentParser('MAE linear probing for image classification', add_help=False)
parser.add_argument('--batch_size', default=512, type=int,
help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
parser.add_argument('--img_size', default=224, type=int,
help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')
parser.add_argument('--epochs', default=90, type=int)
parser.add_argument('--accum_iter', default=1, type=int,
help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)')
# Model parameters
parser.add_argument('--model', default='vit_large_patch16', type=str, metavar='MODEL',
help='Name of model to train')
# Optimizer parameters
parser.add_argument('--weight_decay', type=float, default=0,
help='weight decay (default: 0 for linear probe following MoCo v1)')
parser.add_argument('--lr', type=float, default=None, metavar='LR',
help='learning rate (absolute lr)')
parser.add_argument('--blr', type=float, default=0.1, metavar='LR',
help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')
parser.add_argument('--min_lr', type=float, default=0., metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0')
parser.add_argument('--warmup_epochs', type=int, default=10, metavar='N',
help='epochs to warmup LR')
# * Finetuning params
parser.add_argument('--finetune', default='',
help='finetune from checkpoint')
parser.add_argument('--global_pool', action='store_true')
parser.set_defaults(global_pool=False)
parser.add_argument('--cls_token', action='store_false', dest='global_pool',
help='Use class token instead of global pool for classification')
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--nb_classes', default=1000, type=int,
help='number of the classification types')
parser.add_argument('--output_dir', default='./output_dir',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default='./output_dir',
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true',
help='Perform evaluation only')
parser.add_argument('--dist_eval', action='store_true', default=False,
help='Enabling distributed evaluation (recommended during training for faster monitor')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
return parser
def main(args):
misc.init_distributed_mode(args)
print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
print("{}".format(args).replace(', ', ',\n'))
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + misc.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
# linear probe: weak augmentation
img_res = args.img_size
eval_res = int(args.img_size * 1.143)
print(img_res, eval_res)
transform_train = transforms.Compose([
RandomResizedCrop(args.img_size, interpolation=3),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
transform_val = transforms.Compose([
transforms.Resize(int(args.img_size * 1.143), interpolation=3),
transforms.CenterCrop(args.img_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
dataset_train = datasets.ImageFolder(os.path.join(args.data_path, 'train'), transform=transform_train)
dataset_val = datasets.ImageFolder(os.path.join(args.data_path, 'val'), transform=transform_val)
print(dataset_train)
print(dataset_val)
if True: # args.distributed:
num_tasks = misc.get_world_size()
global_rank = misc.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True) # shuffle=True to reduce monitor bias
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None and not args.eval:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = SummaryWriter(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
#model = models_vit.__dict__[args.model](
# num_classes=args.nb_classes,
# global_pool=args.global_pool,
#)
model = create_model(
args.model,
pretrained=True,
num_classes=args.nb_classes,
drop_rate=0.0,
drop_path_rate=0.0,
#global_pool=args.global_pool)
global_pool="token")
if args.model == 'resnet50_dino':
if model.fc.out_features != args.nb_classes:
model.fc = nn.Linear(model.fc.in_features, args.nb_classes).to(device)
trunc_normal_(model.fc.weight, std=0.01)
elif args.model == 'stable_diffusion_v1':
trunc_normal_(model.unet.head.weight, std=0.01)
else:
trunc_normal_(model.head.weight, std=0.01)
#if args.finetune and not args.eval:
# checkpoint = torch.load(args.finetune, map_location='cpu')
# print("Load pre-trained checkpoint from: %s" % args.finetune)
# checkpoint_model = checkpoint['model']
# state_dict = model.state_dict()
# for k in ['head.weight', 'head.bias']:
# if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
# print(f"Removing key {k} from pretrained checkpoint")
# del checkpoint_model[k]
# interpolate position embedding
# interpolate_pos_embed(model, checkpoint_model)
# load pre-trained model
# msg = model.load_state_dict(checkpoint_model, strict=False)
# print(msg)
# if args.global_pool:
# assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'}
# else:
# assert set(msg.missing_keys) == {'head.weight', 'head.bias'}
# # manually initialize fc layer: following MoCo v3
# trunc_normal_(model.head.weight, std=0.01)
# for linear prob only
# hack: revise model's head with BN
if "vit" in args.model or "swin" in args.model or "conv" in args.model:
model.head = torch.nn.Sequential(torch.nn.BatchNorm1d(model.head.in_features, affine=False, eps=1e-6), model.head)
elif "stable" in args.model:
model.unet.head = torch.nn.Sequential(torch.nn.BatchNorm1d(model.unet.head.in_features, affine=False, eps=1e-6), model.unet.head)
# freeze all but the head
for _, p in model.named_parameters():
p.requires_grad = False
if args.model == 'resnet50_dino':
for _, p in model.fc.named_parameters():
p.requires_grad = True
elif args.model == 'stable_diffusion_v1':
for _, p in model.unet.head.named_parameters():
p.requires_grad = True
else:
for _, p in model.head.named_parameters():
p.requires_grad = True
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params (M): %.2f' % (n_parameters / 1.e6))
eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()
if args.lr is None: # only base_lr is specified
args.lr = args.blr * eff_batch_size / 256
print("base lr: %.2e" % (args.lr * 256 / eff_batch_size))
print("actual lr: %.2e" % args.lr)
print("accumulate grad iterations: %d" % args.accum_iter)
print("effective batch size: %d" % eff_batch_size)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
if args.model == 'resnet50_dino':
optimizer = LARS(model_without_ddp.fc.parameters(), lr=args.lr, weight_decay=args.weight_decay)
elif args.model == 'stable_diffusion_v1':
optimizer = LARS(model_without_ddp.unet.head.parameters(), lr=args.lr, weight_decay=args.weight_decay)
else:
optimizer = LARS(model_without_ddp.head.parameters(), lr=args.lr, weight_decay=args.weight_decay)
print(optimizer) | loss_scaler = NativeScaler() | 2 | 2023-10-20 16:28:17+00:00 | 8k |
LeoQLi/NeuralGF | train_test.py | [
{
"identifier": "Network",
"path": "network.py",
"snippet": "class Network(nn.Module):\n def __init__(self, num_points, num_knn):\n super(Network, self).__init__()\n self.num_points = num_points\n self.num_knn = num_knn\n self.num_iter = 2\n\n self.net = MLPNet_line... | import os, sys
import argparse
import time
import math
import numpy as np
import torch
import torch.utils.data
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import scipy.spatial as spatial
import torch.multiprocessing as mp
from network import Network
from datasets import BaseDataset
from mesh import extract_mesh
from misc import seed_all, get_log, get_logger, creat_logger, knn_gather_np | 7,154 |
assert pcl_raw.shape == pred_norm.shape
if args.avg_nor:
# k_idex = []
ptree = spatial.cKDTree(pcl_raw)
_, k_idex = ptree.query(pcl_raw, k=1, distance_upper_bound=0.3)
if k_idex.ndim == 1:
k_idex = k_idex[:, None]
pred_norm = knn_gather_np(pred_norm, k_idex)
pred_norm = pred_norm.mean(axis=1)
if args.save_normal_npy or args.save_normal_xyz:
normal_dir = os.path.join(output_dir, 'pred_normal')
os.makedirs(normal_dir, exist_ok=True)
path_save = os.path.join(normal_dir, shape_name)
if args.save_normal_npy:
np.save(path_save + '_normal.npy', pred_norm)
if args.save_normal_xyz:
pc_nor = np.concatenate([pcl_raw, pred_norm], axis=-1)
# k = 1000; n = 50 # 10
# pc_nor = pc_nor[n*k:n*k+k, :]
np.savetxt(path_save + '.xyz', pc_nor, fmt='%.6f')
### evaluation
nn = np.sum(np.multiply(-1 * nor_gt, pred_norm), axis=1)
nn[nn > 1] = 1
nn[nn < -1] = -1
ang = np.rad2deg(np.arccos(np.abs(nn)))
rms = np.sqrt(np.mean(np.square(ang)))
ang_o = np.rad2deg(np.arccos(nn))
ids = ang_o < 90.0
p90 = sum(ids) / pred_norm.shape[0] * 100
### if more than half of points have wrong orientation, then flip all normals
if p90 < 50.0:
nn = np.sum(np.multiply(nor_gt, pred_norm), axis=1)
nn[nn > 1] = 1
nn[nn < -1] = -1
ang_o = np.rad2deg(np.arccos(nn))
ids = ang_o < 90.0
p90 = sum(ids) / pred_norm.shape[0] * 100
rms_o = np.sqrt(np.mean(np.square(ang_o)))
list_rms.append(rms)
list_rms_o.append(rms_o)
list_p90.append(p90)
if np.mean(p90) < 90.0:
list_bad[shape_name] = p90
logger.info('RMSE_U: %.3f, RMSE_O: %.3f, Correct orientation: %.3f %% (%s)' % (rms, rms_o, p90, shape_name))
if args.save_mesh:
mesh_dir = os.path.join(output_dir, 'recon_mesh')
os.makedirs(mesh_dir, exist_ok=True)
mesh = extract_mesh(my_model.net.forward, bbox_min=test_set.bbox_min, bbox_max=test_set.bbox_max,
points_gt=pcl_raw, mesh_far=args.mesh_far)
mesh.export(os.path.join(mesh_dir, '%s.obj' % shape_name))
if len(list_p90) > 0:
logger.info('Time: %.2f sec\n' % time_sum)
logger.info('Average || RMSE_U: %.3f, RMSE_O: %.3f, Correct orientation: %.3f %%' % (np.mean(list_rms), np.mean(list_rms_o), np.mean(list_p90)))
ss = ''
for k, v in list_bad.items():
ss += '%s: %.3f %%\n' % (k, v)
logger.info('Bad results in %d shapes: \n%s' % (len(list_p90), ss))
return 1
### Arguments
args = parse_arguments()
if len(args.testset_list) == 0:
args.testset_list = 'testset_' + args.data_set
if args.data_set in ['SceneNN', 'Semantic3D', 'KITTI_sub', 'Others', '3DScene']:
args.lr = 0.00001
args.dis_k = 64
if args.data_set in ['PCPNet']:
args.dis_k = 25
# args.lr = 0.0007
eval_list = ['testset_no_noise', 'testset_low_noise', 'testset_med_noise', 'testset_high_noise',
'testset_vardensity_striped', 'testset_vardensity_gradient']
if args.data_set in ['FamousShape']:
args.dis_k = 50
args.lr = 0.002
eval_list = ['testset_noise_clean', 'testset_noise_low', 'testset_noise_med', 'testset_noise_high',
'testset_density_stripe', 'testset_density_gradient']
if args.data_set == 'FamousShape5k':
args.num_points = 1000
args.dis_k = 10
if args.data_set == 'WireframePC':
args.max_iter = 10000
args.save_inter = 2500
args.num_points = 300
args.dis_k = 3
args.warn_up = 2000
# args.lr = 0.0001
if args.data_set == 'NestPC':
args.dis_k = 50
# args.num_knn = 6
args.lr = 0.0001
torch.cuda.set_device(args.gpu)
_device = torch.device('cuda')
seed_all(args.seed)
args.tag = args.data_set
if __name__ == '__main__':
if args.mode == 'train':
num_processes = 1
|
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--mode', type=str, default='')
parser.add_argument('--log_root', type=str, default='./log')
parser.add_argument('--data_set', type=str, default='',
choices=['PCPNet', 'FamousShape', 'FamousShape5k', 'SceneNN', 'Others', 'KITTI_sub', 'Semantic3D', '3DScene', 'WireframePC', 'NestPC', 'Plane'])
### Train
parser.add_argument('--seed', type=int, default=2023)
parser.add_argument('--tag', type=str, default=None)
parser.add_argument('--logging', type=eval, default=True, choices=[True, False])
parser.add_argument('--max_iter', type=int, default=20000)
parser.add_argument('--save_inter', type=int, default=10000)
parser.add_argument('--warn_up', type=int, default=10000)
parser.add_argument('--lr', type=float, default=0.001)
### Dataset and loader
parser.add_argument('--dataset_root', type=str, default='/data1/lq/Dataset/')
parser.add_argument('--testset_list', type=str, default='')
parser.add_argument('--batch_size', type=int, default=1)
parser.add_argument('--num_workers', type=int, default=6)
parser.add_argument('--num_points', type=int, default=5000)
parser.add_argument('--num_query', type=int, default=10)
parser.add_argument('--num_knn', type=int, default=64)
parser.add_argument('--dis_k', type=int, default=50)
parser.add_argument('--dis_scale', type=float, default=0.15)
### Test
parser.add_argument('--ckpt_dir', type=str, default='')
parser.add_argument('--ckpt_iter', type=int, default=None)
parser.add_argument('--save_normal_npy', type=eval, default=False, choices=[True, False])
parser.add_argument('--save_normal_xyz', type=eval, default=False, choices=[True, False])
parser.add_argument('--save_mesh', type=eval, default=False, choices=[True, False])
parser.add_argument('--avg_nor', type=eval, default=False, choices=[True, False])
parser.add_argument('--mesh_far', type=float, default=-1.0)
args = parser.parse_args()
return args
def update_learning_rate(optimizer, iter_step, init_lr, max_iter):
warn_up = args.warn_up # 2000, 10000
lr = (iter_step / warn_up) if iter_step < warn_up else 0.5 * (math.cos((iter_step - warn_up)/(max_iter - warn_up) * math.pi) + 1)
lr = lr * init_lr
for g in optimizer.param_groups:
g['lr'] = lr
def train(data_list, log_dir, log_name, ckpt_dir, id=None):
### Dataset
train_set = BaseDataset(root=args.dataset_root,
data_set=args.data_set,
data_list=data_list,
num_points=args.num_points,
num_query=args.num_query,
num_knn=args.num_knn,
dis_k=args.dis_k,
dis_scale=args.dis_scale,
)
dataloader = torch.utils.data.DataLoader(
train_set,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=True, # faster speed
)
log_flag = True
num_shapes = len(train_set.cur_sets)
for shape_idx, shape_name in enumerate(train_set.cur_sets):
### Model
my_model = Network(args.num_points, num_knn=args.num_knn).to(_device).train()
optimizer = optim.Adam(my_model.parameters(), lr=args.lr)
train_set.process_data(shape_name)
iter_dataloader = iter(dataloader)
if log_flag:
log_name = 'train(%s)(%d)' % (log_name, os.getpid())
if id is not None:
log_name = log_name + '-%d' % id
logger = get_logger(args, log_dir, log_name, file_name='log_'+data_list, model=my_model)
log_flag = False
time_sum = 0
for iter_i in range(1, args.max_iter+1):
update_learning_rate(optimizer, iter_i, init_lr=args.lr, max_iter=args.max_iter)
data = iter_dataloader.next()
start_time = time.time()
pcl_raw = data['pcl_raw'].to(_device) # (B, M, 3), M > N
pcl_source = data['pcl_source'].to(_device) # (B, N, 3)
knn_idx = data['knn_idx'].to(_device) # (B, N, K)
pcl_raw_sub = data['pcl_raw_sub'].to(_device) if 'pcl_raw_sub' in data else None # (B, N, 3)
### Reset gradient and model state
my_model.train()
optimizer.zero_grad()
pcl_source = torch.cat([pcl_source, pcl_raw_sub], dim=-2)
grad_norm = my_model(pcl_source)
loss, loss_tuple = my_model.get_loss(pcl_raw=pcl_raw, pcl_source=pcl_source, knn_idx=knn_idx)
### Backward and optimize
loss.backward()
optimizer.step()
elapsed_time = time.time() - start_time
time_sum += elapsed_time
if iter_i % (args.save_inter//10) == 0:
ss = ''
for l in loss_tuple:
ss += '%.6f+' % l.item()
logger.info('shape:%d/%d, iter:%d/%d, loss=%.6f(%s), lr=%.6f' % (
shape_idx+1, num_shapes, iter_i, args.max_iter, loss, ss[:-1], optimizer.param_groups[0]['lr']))
if iter_i % args.save_inter == 0 or iter_i == args.max_iter:
model_filename = os.path.join(ckpt_dir, shape_name + '_%d.pt' % iter_i)
torch.save(my_model.state_dict(), model_filename)
logger.info('Save model: ' + model_filename)
# pc_nor = torch.cat([pcl_source, grad_norm], dim=-1)[0].cpu().detach().numpy()
# np.savetxt(model_filename[:-3] + '.txt', pc_nor, fmt='%.6f')
del my_model, optimizer
logger.info('Time: %.2f sec\n' % time_sum)
return 1
def test(data_list):
ckpt_paths = os.path.join(args.log_root, args.ckpt_dir, 'ckpts/*.pt')
assert len(ckpt_paths) > 0
### Dataset
test_set = BaseDataset(root=args.dataset_root,
data_set=args.data_set,
data_list=data_list,
)
### Model
print('Building model ...')
my_model = Network(args.num_points, num_knn=args.num_knn).to(_device).eval()
### Log
PID = os.getpid()
output_dir = os.path.join(args.log_root, args.ckpt_dir, 'test_%s' % args.ckpt_iter)
os.makedirs(output_dir, exist_ok=True)
logger = creat_logger('test(%d)(%s-%s)' % (PID, args.ckpt_dir, args.ckpt_iter), output_dir)
logger.info('Command: {}'.format(' '.join(sys.argv)))
trainable_num = sum(p.numel() for p in my_model.parameters() if p.requires_grad)
logger.info('Num_params_trainable: %d' % trainable_num)
max_n = int(2e5)
list_bad = {}
list_rms = []
list_rms_o = []
list_p90 = []
time_sum = 0
for shape_idx, shape_name in enumerate(test_set.cur_sets):
### load the trained model
ckpt_path = os.path.join(args.log_root, args.ckpt_dir, 'ckpts/%s_%s.pt' % (shape_name, args.ckpt_iter))
if not os.path.exists(ckpt_path):
logger.info('File not exist: ' + ckpt_path)
continue
my_model.load_state_dict(torch.load(ckpt_path, map_location=_device), strict=False)
### load a point cloud and shuffle the order of points
pcl_raw, nor_gt = test_set.get_data(shape_name) # (N, 3)
start_time = time.time()
num_point = pcl_raw.shape[0]
rand_idxs = np.random.choice(num_point, num_point, replace=False)
pcl = pcl_raw[rand_idxs, :3]
### if there are too many points, the point cloud will be processed in batches,
### the number of output vectors may be less than the number of initial points (decided by remainder).
if num_point <= max_n:
pcl_source = torch.from_numpy(pcl).float().to(_device)
with torch.no_grad():
grad_norm = my_model(pcl_source)
grad_norm = grad_norm.cpu().detach().numpy()
else:
k = math.ceil(num_point / max_n)
remainder = int(max_n * k % num_point)
print('Split data: ', num_point, k, remainder)
pcl_new = np.concatenate((pcl, pcl[:remainder]), axis=0)
pcl_source = torch.from_numpy(pcl_new).float() # (max_n*k, D)
grad_norm = np.zeros((pcl_new.shape[0], 3)) # (N, 3)
with torch.no_grad():
for i in range(k):
grad_norm_s = my_model(pcl_source[max_n*i:max_n*(i+1)].to(_device))
grad_norm[max_n*i:max_n*(i+1)] = grad_norm_s.cpu().detach().numpy()
grad_norm = grad_norm[:max_n*k-remainder]
### reorder and normalize the vectors, eliminate zero values
pred_norm = np.zeros_like(grad_norm)
pred_norm[rand_idxs, :] = grad_norm
pred_norm[np.linalg.norm(pred_norm, axis=-1) == 0.0] = 1.0
pred_norm /= np.linalg.norm(pred_norm, axis=-1, keepdims=True)
elapsed_time = time.time() - start_time
time_sum += elapsed_time
assert pcl_raw.shape == pred_norm.shape
if args.avg_nor:
# k_idex = []
ptree = spatial.cKDTree(pcl_raw)
_, k_idex = ptree.query(pcl_raw, k=1, distance_upper_bound=0.3)
if k_idex.ndim == 1:
k_idex = k_idex[:, None]
pred_norm = knn_gather_np(pred_norm, k_idex)
pred_norm = pred_norm.mean(axis=1)
if args.save_normal_npy or args.save_normal_xyz:
normal_dir = os.path.join(output_dir, 'pred_normal')
os.makedirs(normal_dir, exist_ok=True)
path_save = os.path.join(normal_dir, shape_name)
if args.save_normal_npy:
np.save(path_save + '_normal.npy', pred_norm)
if args.save_normal_xyz:
pc_nor = np.concatenate([pcl_raw, pred_norm], axis=-1)
# k = 1000; n = 50 # 10
# pc_nor = pc_nor[n*k:n*k+k, :]
np.savetxt(path_save + '.xyz', pc_nor, fmt='%.6f')
### evaluation
nn = np.sum(np.multiply(-1 * nor_gt, pred_norm), axis=1)
nn[nn > 1] = 1
nn[nn < -1] = -1
ang = np.rad2deg(np.arccos(np.abs(nn)))
rms = np.sqrt(np.mean(np.square(ang)))
ang_o = np.rad2deg(np.arccos(nn))
ids = ang_o < 90.0
p90 = sum(ids) / pred_norm.shape[0] * 100
### if more than half of points have wrong orientation, then flip all normals
if p90 < 50.0:
nn = np.sum(np.multiply(nor_gt, pred_norm), axis=1)
nn[nn > 1] = 1
nn[nn < -1] = -1
ang_o = np.rad2deg(np.arccos(nn))
ids = ang_o < 90.0
p90 = sum(ids) / pred_norm.shape[0] * 100
rms_o = np.sqrt(np.mean(np.square(ang_o)))
list_rms.append(rms)
list_rms_o.append(rms_o)
list_p90.append(p90)
if np.mean(p90) < 90.0:
list_bad[shape_name] = p90
logger.info('RMSE_U: %.3f, RMSE_O: %.3f, Correct orientation: %.3f %% (%s)' % (rms, rms_o, p90, shape_name))
if args.save_mesh:
mesh_dir = os.path.join(output_dir, 'recon_mesh')
os.makedirs(mesh_dir, exist_ok=True)
mesh = extract_mesh(my_model.net.forward, bbox_min=test_set.bbox_min, bbox_max=test_set.bbox_max,
points_gt=pcl_raw, mesh_far=args.mesh_far)
mesh.export(os.path.join(mesh_dir, '%s.obj' % shape_name))
if len(list_p90) > 0:
logger.info('Time: %.2f sec\n' % time_sum)
logger.info('Average || RMSE_U: %.3f, RMSE_O: %.3f, Correct orientation: %.3f %%' % (np.mean(list_rms), np.mean(list_rms_o), np.mean(list_p90)))
ss = ''
for k, v in list_bad.items():
ss += '%s: %.3f %%\n' % (k, v)
logger.info('Bad results in %d shapes: \n%s' % (len(list_p90), ss))
return 1
### Arguments
args = parse_arguments()
if len(args.testset_list) == 0:
args.testset_list = 'testset_' + args.data_set
if args.data_set in ['SceneNN', 'Semantic3D', 'KITTI_sub', 'Others', '3DScene']:
args.lr = 0.00001
args.dis_k = 64
if args.data_set in ['PCPNet']:
args.dis_k = 25
# args.lr = 0.0007
eval_list = ['testset_no_noise', 'testset_low_noise', 'testset_med_noise', 'testset_high_noise',
'testset_vardensity_striped', 'testset_vardensity_gradient']
if args.data_set in ['FamousShape']:
args.dis_k = 50
args.lr = 0.002
eval_list = ['testset_noise_clean', 'testset_noise_low', 'testset_noise_med', 'testset_noise_high',
'testset_density_stripe', 'testset_density_gradient']
if args.data_set == 'FamousShape5k':
args.num_points = 1000
args.dis_k = 10
if args.data_set == 'WireframePC':
args.max_iter = 10000
args.save_inter = 2500
args.num_points = 300
args.dis_k = 3
args.warn_up = 2000
# args.lr = 0.0001
if args.data_set == 'NestPC':
args.dis_k = 50
# args.num_knn = 6
args.lr = 0.0001
torch.cuda.set_device(args.gpu)
_device = torch.device('cuda')
seed_all(args.seed)
args.tag = args.data_set
if __name__ == '__main__':
if args.mode == 'train':
num_processes = 1
| log_dir, log_name, ckpt_dir = get_log(args) | 4 | 2023-10-22 08:51:50+00:00 | 8k |
Salz0/telegram_flea | main.py | [
{
"identifier": "User",
"path": "models.py",
"snippet": "class User(BaseModel):\n \"\"\"\n The model for the Telegram user.\n\n This model stores all the information about the user.\n It is also used to store all the authentication-related information.\n \"\"\"\n\n id = fields.BigIntFi... | import os
import aiogram
from asyncio import gather
from pathlib import Path
from aiogram import types
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.contrib.middlewares.i18n import I18nMiddleware
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters import CommandStart
from aiogram.dispatcher.filters.state import StatesGroup, State
from aiogram.types.callback_query import CallbackQuery
from dotenv import load_dotenv
from models import User, Message
from po_compile import compile_all_languages
from utils import tortoise_orm
from utils.data_validation import validate_photo_as_document
from utils.generalization import create_message_instance
from utils.loguru_logging import logger
from utils.redis_storage import redis_storage
from keyboards import (
start_keyboard,
sell_keyboard,
cancel_listing_keyboard,
moderator_keyboard,
empty_inline_keyboard,
) | 3,810 |
async def register_publication_into_db(destination, message):
with open(destination, "rb") as file:
binary_data = file.read()
await create_message_instance(message=message, content=binary_data, status="pending")
@dp.message_handler(
state=SellItem.waiting_for_photo, content_types=aiogram.types.ContentTypes.PHOTO
)
async def enter_photo(message: aiogram.types.Message, state: FSMContext):
# get photo
photo = message.photo[-1]
await photo.download(destination_file="item_photo.jpg")
await register_publication_into_db("item_photo.jpg", message)
await gather(publish_post(message, state))
@dp.callback_query_handler(lambda query: query.data[:7] == "cancel ")
async def cancel_sell(query: CallbackQuery):
data = query.data
if not data or len(data.split("cancel ")) != 2:
return await query.answer(i18n.gettext("bot.error"))
channel_msg_id, msg_id = data.lstrip("cancel ").split(".")
try:
await bot.delete_message(f"@{os.environ['CHANNEL_USERNAME']}", channel_msg_id)
except aiogram.utils.exceptions.MessageToDeleteNotFound:
return await query.answer(i18n.gettext("bot.error"))
await gather(
Message.filter(message_id=msg_id, from_user_id=query.from_user.id).update(
status="delisted" # noqa
),
query.answer(i18n.gettext("bot.deleted_successfully")),
bot.send_message(
chat_id=query.from_user.id,
text=i18n.gettext("bot.sell_keyboard_canceled", locale=BOT_LANGUAGE),
),
bot.edit_message_reply_markup(
chat_id=query.message.chat.id,
message_id=query.message.message_id,
reply_markup=empty_inline_keyboard,
),
)
@dp.callback_query_handler(lambda query: query.data[:10] == "moderator:")
async def moderator_callback(query: CallbackQuery):
callback_data = query.data
if len(callback_data.split(" ")) != 2:
await query.answer(i18n.gettext("bot.error"))
return
moderator_response = callback_data.split(" ")[0]
seller_userid, msg_id = callback_data.split(" ")[-1].split(".")
seller_userid = int(seller_userid)
match moderator_response:
case "moderator:approved":
status = "approved"
# Get item photo
photo = query.message.photo[-1]
await gather(
query.answer(i18n.gettext("bot.approved_successfully")),
photo.download(destination_file="item_photo.jpg"),
)
# Send item to channel
data = await bot.send_photo(
"@" + os.environ["CHANNEL_USERNAME"],
aiogram.types.InputFile("item_photo.jpg"),
caption=query.message.caption,
)
reply_markup = cancel_listing_keyboard(data.message_id, msg_id)
await gather(
# Sending item to the user
bot.send_photo(
chat_id=seller_userid,
photo=types.InputFile("item_photo.jpg"),
caption=i18n.gettext(
"bot.listing_approved{listing}", locale=BOT_LANGUAGE
).format(listing=query.message.caption),
reply_markup=reply_markup,
),
# Remove the reply keyboard for moderator
bot.edit_message_reply_markup(
chat_id=query.message.chat.id,
message_id=query.message.message_id,
reply_markup=empty_inline_keyboard,
),
)
case "moderator:declined":
status = "declined"
await gather(
query.answer(i18n.gettext("bot.declined_successfully")),
# Notify user that listing was declined
bot.send_photo(
chat_id=seller_userid,
photo=types.InputFile("item_photo.jpg"),
caption=i18n.gettext(
"bot.listing_declined{listing}", locale=BOT_LANGUAGE
).format(listing=query.message.caption),
),
# Remove the reply keyboard for moderator
bot.edit_message_reply_markup(
chat_id=query.message.chat.id,
message_id=query.message.message_id,
reply_markup=empty_inline_keyboard,
),
)
case _:
status = "moderation error"
logger.info(f"'{moderator_response=}'")
await query.answer(i18n.gettext("bot.error"))
await Message.filter(message_id=msg_id, from_user_id=query.from_user.id).update(status=status)
async def on_startup(*_, **__):
me = await bot.get_me()
logger.info(f"Starting up the https://t.me/{me.username} bot...")
if os.environ.get("POSTGRES_USER") != "":
logger.info("Initializing the database connection...")
|
load_dotenv()
compile_all_languages()
bot = aiogram.Bot(os.environ["TELEGRAM_BOT_TOKEN"])
dp = aiogram.Dispatcher(bot, storage=MemoryStorage())
BASE_DIR = Path(__file__).parent
LOCALES_DIR = BASE_DIR / "locales"
BOT_LANGUAGE = os.environ.get("BOT_LANGUAGE")
i18n = I18nMiddleware("bot", LOCALES_DIR, default="en")
dp.middleware.setup(i18n)
if BOT_LANGUAGE not in i18n.locales:
logger.warning("language is not supported")
BOT_LANGUAGE = "en"
# Define states
class SellItem(StatesGroup):
waiting_description = State()
waiting_for_price = State()
waiting_for_photo = State()
@dp.message_handler(CommandStart(), state="*")
async def start(message: types.Message):
user_dict = message.from_user.to_python()
await User.get_or_create(
id=message.from_user.id,
username=user_dict.get("username"),
first_name=user_dict.get("first_name"),
last_name=user_dict.get("last_name"),
is_bot=message.from_user.is_bot,
phone_number=user_dict.get("phone_number"),
language_code=message.from_user.language_code,
start_payload=message.get_args(),
)
await message.answer(
i18n.gettext("bot.start_message", locale=BOT_LANGUAGE),
reply_markup=start_keyboard, # Attach the reply keyboard here
)
@dp.message_handler(
lambda message: message.text.lower()
== i18n.gettext("bot.sell_keyboard_cancel", locale=BOT_LANGUAGE).lower(),
state="*",
)
async def cancel(message: types.Message, state: FSMContext):
await gather(
state.finish(),
create_message_instance(message),
message.reply(
i18n.gettext("bot.sell_keyboard_canceled", locale=BOT_LANGUAGE),
reply_markup=start_keyboard, # Switch back to the start keyboard
),
)
@dp.message_handler(
lambda message: message.text == i18n.gettext("bot.start_keyboard_help", locale=BOT_LANGUAGE),
state="*",
)
async def help_command(message: aiogram.types.Message):
support_username = os.environ.get("SUPPORT_USERNAME")
# Assuming `get_or_create_user` is a function that handles User instances.
help_text = i18n.gettext("bot.help_message", locale=BOT_LANGUAGE).format(
support_username=support_username
)
await gather(
create_message_instance(message),
message.reply(help_text, reply_markup=start_keyboard),
)
@dp.message_handler(
lambda message: message.text == i18n.gettext("bot.start_keyboard_sell", locale=BOT_LANGUAGE),
state="*",
)
async def enter_sell(message: aiogram.types.Message):
await SellItem.waiting_description.set(),
await gather(
create_message_instance(message),
message.reply(
i18n.gettext("bot.enter_sell_description", locale=BOT_LANGUAGE),
reply_markup=sell_keyboard,
),
)
@dp.message_handler(
state=SellItem.waiting_description, content_types=aiogram.types.ContentTypes.TEXT
)
async def enter_name(message: aiogram.types.Message, state: FSMContext):
await gather(
state.update_data(name=message.text),
SellItem.waiting_for_price.set(),
create_message_instance(
message=message,
content_type="description",
),
message.reply(
i18n.gettext("bot.enter_price", locale=BOT_LANGUAGE), reply_markup=sell_keyboard
),
)
@dp.message_handler(state=SellItem.waiting_for_price, content_types=aiogram.types.ContentTypes.TEXT)
async def enter_price(message: aiogram.types.Message, state: FSMContext):
await gather(
state.update_data(price=message.text),
SellItem.waiting_for_photo.set(),
create_message_instance(
message=message,
content_type="price_or_conditions",
),
message.reply(
i18n.gettext("bot.send_photo", locale=BOT_LANGUAGE), reply_markup=sell_keyboard
),
)
async def publish_post(message: aiogram.types.Message, state: FSMContext):
"""Publishing a post in the channel and sending a notification to the user"""
# get data and reset state
user_data = await state.get_data()
# prepare data
item_name = user_data.get("name")
item_price = user_data.get("price")
username = message.from_user.username or message.from_user.id
userid = message.from_user.id
# Reply keyboard for Moderator
moderator_inline_keyboard = moderator_keyboard(userid, message.message_id)
caption = i18n.gettext(
"bot.item_sale{item_name}-{item_price}-{username}", locale=BOT_LANGUAGE
).format(
item_name=item_name,
item_price=item_price,
username=username,
)
# Send listing to moderation
data = await bot.send_photo(
chat_id=int(os.environ["MODERATOR_CHAT_ID"]),
photo=aiogram.types.InputFile("item_photo.jpg"),
caption=caption,
)
await gather(
bot.edit_message_reply_markup(
chat_id=data.chat.id, message_id=data.message_id, reply_markup=moderator_inline_keyboard
),
state.finish(),
message.reply(
i18n.gettext("bot.sent_to_moderation", locale=BOT_LANGUAGE), reply_markup=start_keyboard
),
)
@dp.message_handler(
state=SellItem.waiting_for_photo, content_types=aiogram.types.ContentTypes.DOCUMENT
)
async def enter_photo_as_document(message: aiogram.types.Message, state: FSMContext):
# get photo as document
document = message.document
# validate photo
if validate_photo_as_document(document) is False:
return await message.reply(
i18n.gettext("bot.invalid_photo_extension", locale=BOT_LANGUAGE),
reply_markup=sell_keyboard,
)
await document.download(destination_file="item_photo.jpg")
await gather(
register_publication_into_db("item_photo.jpg", message), publish_post(message, state)
)
async def register_publication_into_db(destination, message):
with open(destination, "rb") as file:
binary_data = file.read()
await create_message_instance(message=message, content=binary_data, status="pending")
@dp.message_handler(
state=SellItem.waiting_for_photo, content_types=aiogram.types.ContentTypes.PHOTO
)
async def enter_photo(message: aiogram.types.Message, state: FSMContext):
# get photo
photo = message.photo[-1]
await photo.download(destination_file="item_photo.jpg")
await register_publication_into_db("item_photo.jpg", message)
await gather(publish_post(message, state))
@dp.callback_query_handler(lambda query: query.data[:7] == "cancel ")
async def cancel_sell(query: CallbackQuery):
data = query.data
if not data or len(data.split("cancel ")) != 2:
return await query.answer(i18n.gettext("bot.error"))
channel_msg_id, msg_id = data.lstrip("cancel ").split(".")
try:
await bot.delete_message(f"@{os.environ['CHANNEL_USERNAME']}", channel_msg_id)
except aiogram.utils.exceptions.MessageToDeleteNotFound:
return await query.answer(i18n.gettext("bot.error"))
await gather(
Message.filter(message_id=msg_id, from_user_id=query.from_user.id).update(
status="delisted" # noqa
),
query.answer(i18n.gettext("bot.deleted_successfully")),
bot.send_message(
chat_id=query.from_user.id,
text=i18n.gettext("bot.sell_keyboard_canceled", locale=BOT_LANGUAGE),
),
bot.edit_message_reply_markup(
chat_id=query.message.chat.id,
message_id=query.message.message_id,
reply_markup=empty_inline_keyboard,
),
)
@dp.callback_query_handler(lambda query: query.data[:10] == "moderator:")
async def moderator_callback(query: CallbackQuery):
callback_data = query.data
if len(callback_data.split(" ")) != 2:
await query.answer(i18n.gettext("bot.error"))
return
moderator_response = callback_data.split(" ")[0]
seller_userid, msg_id = callback_data.split(" ")[-1].split(".")
seller_userid = int(seller_userid)
match moderator_response:
case "moderator:approved":
status = "approved"
# Get item photo
photo = query.message.photo[-1]
await gather(
query.answer(i18n.gettext("bot.approved_successfully")),
photo.download(destination_file="item_photo.jpg"),
)
# Send item to channel
data = await bot.send_photo(
"@" + os.environ["CHANNEL_USERNAME"],
aiogram.types.InputFile("item_photo.jpg"),
caption=query.message.caption,
)
reply_markup = cancel_listing_keyboard(data.message_id, msg_id)
await gather(
# Sending item to the user
bot.send_photo(
chat_id=seller_userid,
photo=types.InputFile("item_photo.jpg"),
caption=i18n.gettext(
"bot.listing_approved{listing}", locale=BOT_LANGUAGE
).format(listing=query.message.caption),
reply_markup=reply_markup,
),
# Remove the reply keyboard for moderator
bot.edit_message_reply_markup(
chat_id=query.message.chat.id,
message_id=query.message.message_id,
reply_markup=empty_inline_keyboard,
),
)
case "moderator:declined":
status = "declined"
await gather(
query.answer(i18n.gettext("bot.declined_successfully")),
# Notify user that listing was declined
bot.send_photo(
chat_id=seller_userid,
photo=types.InputFile("item_photo.jpg"),
caption=i18n.gettext(
"bot.listing_declined{listing}", locale=BOT_LANGUAGE
).format(listing=query.message.caption),
),
# Remove the reply keyboard for moderator
bot.edit_message_reply_markup(
chat_id=query.message.chat.id,
message_id=query.message.message_id,
reply_markup=empty_inline_keyboard,
),
)
case _:
status = "moderation error"
logger.info(f"'{moderator_response=}'")
await query.answer(i18n.gettext("bot.error"))
await Message.filter(message_id=msg_id, from_user_id=query.from_user.id).update(status=status)
async def on_startup(*_, **__):
me = await bot.get_me()
logger.info(f"Starting up the https://t.me/{me.username} bot...")
if os.environ.get("POSTGRES_USER") != "":
logger.info("Initializing the database connection...") | await tortoise_orm.init() | 3 | 2023-10-19 17:28:55+00:00 | 8k |
ielab/llm-qlm | run.py | [
{
"identifier": "PROMPT_DICT",
"path": "prompts.py",
"snippet": "PROMPT_DICT = {\n \"msmarco-v1-passage\": {\n \"huggyllama/llama-7b\": \"Generate a question that is the most relevant to the given passage.\"\n \"\\nPassage: {doc}\\n\\nHere is a generated r... | from pyserini.search.lucene import LuceneSearcher
from pyserini.search._base import get_topics
from pyserini.output_writer import OutputFormat, get_output_writer
from dataclasses import dataclass, field
from transformers import (
HfArgumentParser,
TrainingArguments,
PreTrainedTokenizer,
AutoTokenizer,
Trainer,
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
logging,
set_seed
)
from torch.utils.data import Dataset
from typing import Dict, Optional, Sequence
from prompts import PROMPT_DICT, PROMPT_DICT_YES_NO, DOC_FORMAT_DIC, MSMARCO_PROMPT, DEFAULT_PROMPT, GBQ_PROMPT
import transformers
import torch
import json
import copy
import logging
import os
import random | 5,503 | model_name_or_path: str = field(metadata={'help': 'HF LLM name or path.'})
in_context: Optional[bool] = field(default=False, metadata={'help': 'Whether to use in-context LLM.'})
self_in_context: Optional[bool] = field(default=False, metadata={'help': 'Whether to use self-in-context.'})
scoring_func: Optional[str] = field(default='qlm', metadata={'help': 'Scoring function.'})
doc_max_length: int = field(default=512, metadata={'help': 'Maximum length of a document.'})
query_max_length: int = field(default=64, metadata={'help': 'Maximum length of a query.'})
cache_dir: Optional[str] = field(default='./cache', metadata={'help': 'Path to cache directory.'})
data_path: Optional[str] = field(default=None, metadata={'help': 'Path to train data directory.'})
first_stage_run_path: Optional[str] = field(default=None, metadata={'help': 'Path to first-stage run file.'})
@dataclass
class SearchResult:
docid: str
score: float
raw: str
def _tokenize_fn(strings: Sequence[str], tokenizer: PreTrainedTokenizer) -> Dict:
"""Tokenize a list of strings."""
tokenized_list = [
tokenizer(
text,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
)
for text in strings
]
input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list]
input_ids_lens = labels_lens = [
tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list
]
return dict(
input_ids=input_ids,
labels=labels,
input_ids_lens=input_ids_lens,
labels_lens=labels_lens,
)
def CausalLMPreprocess(
sources: Sequence[str],
targets: Sequence[str],
tokenizer: PreTrainedTokenizer,
) -> Dict:
"""Preprocess the data by tokenizing."""
examples = [s + t for s, t in zip(sources, targets)]
examples_tokenized, sources_tokenized = [_tokenize_fn(strings, tokenizer) for strings in (examples, sources)]
input_ids = examples_tokenized["input_ids"]
labels = copy.deepcopy(input_ids)
for label, source_len in zip(labels, sources_tokenized["input_ids_lens"]):
label[:source_len - 1] = IGNORE_INDEX
return dict(input_ids=input_ids, labels=labels)
def Seq2SeqPreprocess(
sources: Sequence[str],
targets: Sequence[str],
tokenizer: PreTrainedTokenizer,
) -> Dict:
"""Preprocess the data by tokenizing."""
inputs = tokenizer(
sources,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
)
labels = tokenizer(
targets,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
).input_ids
labels[labels == tokenizer.pad_token_id] = -100
return dict(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'], labels=labels)
class LLMDataset(Dataset):
"""Dataset for supervised fine-tuning."""
def __init__(self, results, topics, data_name, model_args: LLMArguments, tokenizer: PreTrainedTokenizer,
few_shot_prompts=None):
super(LLMDataset, self).__init__()
logging.warning("processing first stage results...")
sources = []
targets = []
for qid, ranking in results:
query = topics[qid]
query = tokenizer.convert_tokens_to_string(tokenizer.tokenize(query)[:model_args.query_max_length])
for doc in ranking:
json_doc = json.loads(doc.raw)
doc = DOC_FORMAT_DIC[data_name].format_map(json_doc)
doc = tokenizer.convert_tokens_to_string(tokenizer.tokenize(doc)[:model_args.doc_max_length])
if model_args.scoring_func == 'qlm':
if model_args.in_context:
doc = doc.replace('\n', ' ')
# sources.append(MSMARCO_PROMPT + DEFAULT_PROMPT.format_map({"doc": doc}))
if 't5' in model_args.model_name_or_path or 'T0' in model_args.model_name_or_path: # Seq2Seq and decoder only will be a bit different.
sources.append(GBQ_PROMPT.format_map({"doc": doc}))
targets.append(f"Good Question: {query}")
else:
sources.append(GBQ_PROMPT.format_map({"doc": doc})+'\nGood Question: ')
targets.append(query)
else:
if few_shot_prompts is not None:
sources.append(
few_shot_prompts + PROMPT_DICT[data_name][model_args.model_name_or_path].format_map(
{"doc": doc}) + '\n')
else:
sources.append(
PROMPT_DICT[data_name][model_args.model_name_or_path].format_map({"doc": doc}))
targets.append(f"{query}{tokenizer.eos_token}")
elif model_args.scoring_func == 'yes_no':
|
transformers.logging.set_verbosity_info()
os.environ["PYSERINI_CACHE"] = "./cache"
IGNORE_INDEX = -100
random.seed(929)
set_seed(929)
logger = logging.getLogger(__name__)
@dataclass
class PyseriniArguments:
index: str = field(metadata={'help': 'Path to Lucene index.'})
topics: str = field(metadata={'help': 'Path to topics file.'})
output: str = field(metadata={'help': 'Path to output file.'})
output_format: Optional[str] = field(default='trec', metadata={'help': 'Output format.'})
hits: int = field(default=1000, metadata={'help': 'Number of hits to retrieve per query.'})
threads: int = field(default=16, metadata={'help': 'Number of threads.'})
remove_query: Optional[bool] = field(default=False, metadata={'help': 'Remove query from output.'})
save_first_stage_run: Optional[bool] = field(default=False, metadata={'help': 'Save first-stage run.'})
remove_duplicates: Optional[bool] = field(default=False, metadata={'help': 'Remove duplicates from output.'})
@dataclass
class LLMArguments:
model_name_or_path: str = field(metadata={'help': 'HF LLM name or path.'})
in_context: Optional[bool] = field(default=False, metadata={'help': 'Whether to use in-context LLM.'})
self_in_context: Optional[bool] = field(default=False, metadata={'help': 'Whether to use self-in-context.'})
scoring_func: Optional[str] = field(default='qlm', metadata={'help': 'Scoring function.'})
doc_max_length: int = field(default=512, metadata={'help': 'Maximum length of a document.'})
query_max_length: int = field(default=64, metadata={'help': 'Maximum length of a query.'})
cache_dir: Optional[str] = field(default='./cache', metadata={'help': 'Path to cache directory.'})
data_path: Optional[str] = field(default=None, metadata={'help': 'Path to train data directory.'})
first_stage_run_path: Optional[str] = field(default=None, metadata={'help': 'Path to first-stage run file.'})
@dataclass
class SearchResult:
docid: str
score: float
raw: str
def _tokenize_fn(strings: Sequence[str], tokenizer: PreTrainedTokenizer) -> Dict:
"""Tokenize a list of strings."""
tokenized_list = [
tokenizer(
text,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
)
for text in strings
]
input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list]
input_ids_lens = labels_lens = [
tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list
]
return dict(
input_ids=input_ids,
labels=labels,
input_ids_lens=input_ids_lens,
labels_lens=labels_lens,
)
def CausalLMPreprocess(
sources: Sequence[str],
targets: Sequence[str],
tokenizer: PreTrainedTokenizer,
) -> Dict:
"""Preprocess the data by tokenizing."""
examples = [s + t for s, t in zip(sources, targets)]
examples_tokenized, sources_tokenized = [_tokenize_fn(strings, tokenizer) for strings in (examples, sources)]
input_ids = examples_tokenized["input_ids"]
labels = copy.deepcopy(input_ids)
for label, source_len in zip(labels, sources_tokenized["input_ids_lens"]):
label[:source_len - 1] = IGNORE_INDEX
return dict(input_ids=input_ids, labels=labels)
def Seq2SeqPreprocess(
sources: Sequence[str],
targets: Sequence[str],
tokenizer: PreTrainedTokenizer,
) -> Dict:
"""Preprocess the data by tokenizing."""
inputs = tokenizer(
sources,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
)
labels = tokenizer(
targets,
return_tensors="pt",
padding="longest",
max_length=tokenizer.model_max_length,
truncation=True,
).input_ids
labels[labels == tokenizer.pad_token_id] = -100
return dict(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'], labels=labels)
class LLMDataset(Dataset):
"""Dataset for supervised fine-tuning."""
def __init__(self, results, topics, data_name, model_args: LLMArguments, tokenizer: PreTrainedTokenizer,
few_shot_prompts=None):
super(LLMDataset, self).__init__()
logging.warning("processing first stage results...")
sources = []
targets = []
for qid, ranking in results:
query = topics[qid]
query = tokenizer.convert_tokens_to_string(tokenizer.tokenize(query)[:model_args.query_max_length])
for doc in ranking:
json_doc = json.loads(doc.raw)
doc = DOC_FORMAT_DIC[data_name].format_map(json_doc)
doc = tokenizer.convert_tokens_to_string(tokenizer.tokenize(doc)[:model_args.doc_max_length])
if model_args.scoring_func == 'qlm':
if model_args.in_context:
doc = doc.replace('\n', ' ')
# sources.append(MSMARCO_PROMPT + DEFAULT_PROMPT.format_map({"doc": doc}))
if 't5' in model_args.model_name_or_path or 'T0' in model_args.model_name_or_path: # Seq2Seq and decoder only will be a bit different.
sources.append(GBQ_PROMPT.format_map({"doc": doc}))
targets.append(f"Good Question: {query}")
else:
sources.append(GBQ_PROMPT.format_map({"doc": doc})+'\nGood Question: ')
targets.append(query)
else:
if few_shot_prompts is not None:
sources.append(
few_shot_prompts + PROMPT_DICT[data_name][model_args.model_name_or_path].format_map(
{"doc": doc}) + '\n')
else:
sources.append(
PROMPT_DICT[data_name][model_args.model_name_or_path].format_map({"doc": doc}))
targets.append(f"{query}{tokenizer.eos_token}")
elif model_args.scoring_func == 'yes_no': | sources.append(PROMPT_DICT_YES_NO[data_name][model_args.model_name_or_path].format_map({"doc": doc, | 1 | 2023-10-18 05:54:47+00:00 | 8k |
andy-man/ps5-wee-tools | tools/Tools.py | [
{
"identifier": "WeeSerial",
"path": "utils/serial.py",
"snippet": "class WeeSerial:\n\t\n\tpatterns = {\n\t\t'error'\t\t: Clr.fg.red,\n\t\t'warn'\t\t: Clr.fg.orange,\n\t\t'release'\t: Clr.fg.green,\n\t\t'network'\t: Clr.fg.blue,\n\t\t'samu'\t\t: Clr.fg.cyan,\n\t\t'standby'\t: Clr.bg.purple,\n\t}\n\t\n\... | import os, sys, time, datetime
import utils.utils as Utils
import utils.slb2 as BLS
import utils.sflash as SFlash
import tools.SFlashTools as SFlashTools
from lang._i18n_ import *
from utils.serial import WeeSerial
from utils.spiway import SpiFlasher | 7,057 | # Show current file info
if act != 'read' and path and os.path.isfile(path):
print(UI.highlight(STR_FILE_INFO)+':\n')
UI.showTable({
'File': os.path.basename(path),
'MD5': Utils.getFileMD5(path),
'Size': '%d MB'%(os.stat(path).st_size // (1024**2)),
})
print(end=('\n' if act else ''))
# Perform action
cfg = flasher.Config
if act:
print(' '+UI.highlight(MENU_SPW_ACTS[act] if act in MENU_SPW_ACTS else STR_UNKNOWN)+'\n')
block, count = chooseBNC(mode, cfg.BLOCK_SIZE)
if act == 'read':
sfx = '_full' if block == 0 and count == 0 else '_b%d-%d'%(block,block+count)
path = os.path.join(os.getcwd(), 'dump_' + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + sfx + '.bin')
data = flasher.readChip(block, count)
print()
if data:
with open(path, "wb") as file:
file.seek(cfg.TOTAL_SIZE - 1)
file.write(b'\x00')
file.seek(cfg.BLOCK_SIZE * block)
file.write(data)
else:
path = ''
elif act == 'write':
if path and os.path.isfile(path):
with open(path,"rb") as file:
file.seek(cfg.BLOCK_SIZE * block)
data = file.read(cfg.BLOCK_SIZE * (count if count > 0 else cfg.BLOCK_COUNT))
flasher.writeChip(data, False, block, count)
print()
else:
UI.setStatus(STR_FILE_NOT_EXISTS%path)
elif act == 'verify':
if path and os.path.isfile(path):
with open(path,"rb") as file:
file.seek(cfg.BLOCK_SIZE * block)
data = file.read(cfg.BLOCK_SIZE * (count if count else cfg.BLOCK_COUNT))
vdata = flasher.readChip(block, count)
print('\n'+STR_VERIFY+': '+(STR_OK if data == vdata else STR_FAIL)+'\n')
else:
UI.setStatus(STR_FILE_NOT_EXISTS%path)
elif act == 'erase':
flasher.eraseChip(block, count)
print()
if act:
print(STR_DONE)
flasher.close()
# Show file info after read action
if act == 'read' and path and os.path.isfile(path):
print('\n'+UI.highlight(STR_FILE_INFO)+':\n')
UI.showTable({
'File': os.path.basename(path),
'MD5': Utils.getFileMD5(path),
'Size': '%d MB'%(os.stat(path).st_size // 1024**2),
})
# Action done
print(UI.getTab(STR_ACTIONS))
UI.showTableEx(UI.getMenu(MENU_FLASHER,1), 4, 17)
print(UI.DIVIDER)
UI.showMenu(MENU_EXTRA_FLASHER)
UI.showStatus()
act = ''
mode = False
choice = input(STR_CHOICE)
if choice == '0':
return
elif choice in ['1','2','3']:
act = 'read'
mode = int(choice) - 1
elif choice in ['4','5','6']:
act = 'write'
mode = int(choice) - 4
elif choice in ['7','8','9']:
act = 'verify'
mode = int(choice) - 7
elif choice in ['10','11','12']:
act = 'erase'
mode = int(choice) - 10
elif choice == 's':
path = screenFileSelect(path, False, True)
elif choice == 'f':
if path and os.path.isfile(path):
return SFlashTools.screenSFlashTools(path)
else:
UI.setStatus(STR_FILE_NOT_EXISTS%path)
elif choice == 'm':
return screenMainMenu()
screenNorFlasher(path, port, act, mode)
def screenSerialMonitor(port = '', emc_mode = False):
port = port if port else screenChoosePort()
if not port:
UI.setStatus(STR_NO_PORTS)
return
| #==============================================================
# Common Tools
# part of ps5 wee tools project
#==============================================================
def screenNorFlasher(path = '', port = '', act = '', mode = False):
port = port if port else screenChoosePort()
if not port:
UI.setStatus(STR_NO_PORTS)
return
flasher = SpiFlasher(port)
flasher.reset()
UI.clearScreen()
print(TITLE+UI.getTab(STR_ABOUT_SPIWAY))
print(UI.warning(STR_INFO_SPIWAY))
print(UI.getTab(STR_SPIWAY))
if flasher.err or flasher.sp.is_open == False:
print(UI.warning(STR_PORT_UNAVAILABLE))
print(UI.warning(flasher.err))
flasher.close()
input(STR_BACK)
return
ping = flasher.ping()
ver_maj, ver_min = ping['VER']
UI.showTable({
'Version':'%d.%02d'%(ver_maj, ver_min),
'Memory':'%d bytes'%ping['RAM'],
})
print()
if ping['VER'] != flasher.VERSION:
flasher.close()
input(STR_BACK)
return
info = flasher.getChipInfo()
if flasher.Config.IC_ID == 0:
UI.showTable({
'Device ID': '0x%02X'%flasher.Config.VENDOR_ID,
'Vendor ID': '0x%04X'%flasher.Config.DEVICE_ID,
})
input(STR_BACK)
return
print(UI.highlight(STR_CHIP_CONFIG)+':\n')
UI.showTable(info)
print()
# Show current file info
if act != 'read' and path and os.path.isfile(path):
print(UI.highlight(STR_FILE_INFO)+':\n')
UI.showTable({
'File': os.path.basename(path),
'MD5': Utils.getFileMD5(path),
'Size': '%d MB'%(os.stat(path).st_size // (1024**2)),
})
print(end=('\n' if act else ''))
# Perform action
cfg = flasher.Config
if act:
print(' '+UI.highlight(MENU_SPW_ACTS[act] if act in MENU_SPW_ACTS else STR_UNKNOWN)+'\n')
block, count = chooseBNC(mode, cfg.BLOCK_SIZE)
if act == 'read':
sfx = '_full' if block == 0 and count == 0 else '_b%d-%d'%(block,block+count)
path = os.path.join(os.getcwd(), 'dump_' + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + sfx + '.bin')
data = flasher.readChip(block, count)
print()
if data:
with open(path, "wb") as file:
file.seek(cfg.TOTAL_SIZE - 1)
file.write(b'\x00')
file.seek(cfg.BLOCK_SIZE * block)
file.write(data)
else:
path = ''
elif act == 'write':
if path and os.path.isfile(path):
with open(path,"rb") as file:
file.seek(cfg.BLOCK_SIZE * block)
data = file.read(cfg.BLOCK_SIZE * (count if count > 0 else cfg.BLOCK_COUNT))
flasher.writeChip(data, False, block, count)
print()
else:
UI.setStatus(STR_FILE_NOT_EXISTS%path)
elif act == 'verify':
if path and os.path.isfile(path):
with open(path,"rb") as file:
file.seek(cfg.BLOCK_SIZE * block)
data = file.read(cfg.BLOCK_SIZE * (count if count else cfg.BLOCK_COUNT))
vdata = flasher.readChip(block, count)
print('\n'+STR_VERIFY+': '+(STR_OK if data == vdata else STR_FAIL)+'\n')
else:
UI.setStatus(STR_FILE_NOT_EXISTS%path)
elif act == 'erase':
flasher.eraseChip(block, count)
print()
if act:
print(STR_DONE)
flasher.close()
# Show file info after read action
if act == 'read' and path and os.path.isfile(path):
print('\n'+UI.highlight(STR_FILE_INFO)+':\n')
UI.showTable({
'File': os.path.basename(path),
'MD5': Utils.getFileMD5(path),
'Size': '%d MB'%(os.stat(path).st_size // 1024**2),
})
# Action done
print(UI.getTab(STR_ACTIONS))
UI.showTableEx(UI.getMenu(MENU_FLASHER,1), 4, 17)
print(UI.DIVIDER)
UI.showMenu(MENU_EXTRA_FLASHER)
UI.showStatus()
act = ''
mode = False
choice = input(STR_CHOICE)
if choice == '0':
return
elif choice in ['1','2','3']:
act = 'read'
mode = int(choice) - 1
elif choice in ['4','5','6']:
act = 'write'
mode = int(choice) - 4
elif choice in ['7','8','9']:
act = 'verify'
mode = int(choice) - 7
elif choice in ['10','11','12']:
act = 'erase'
mode = int(choice) - 10
elif choice == 's':
path = screenFileSelect(path, False, True)
elif choice == 'f':
if path and os.path.isfile(path):
return SFlashTools.screenSFlashTools(path)
else:
UI.setStatus(STR_FILE_NOT_EXISTS%path)
elif choice == 'm':
return screenMainMenu()
screenNorFlasher(path, port, act, mode)
def screenSerialMonitor(port = '', emc_mode = False):
port = port if port else screenChoosePort()
if not port:
UI.setStatus(STR_NO_PORTS)
return
| serial = WeeSerial(port) | 0 | 2023-10-21 23:55:55+00:00 | 8k |
xingchenshanyao/YOLOP-E | lib/core/loss.py | [
{
"identifier": "bbox_iou",
"path": "lib/core/general.py",
"snippet": "def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):\n # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4\n box2 = box2.T\n\n # Get the coordinates of bounding boxes\n if x1y1x2y2:... | import torch.nn as nn
import torch
from .general import bbox_iou
from .postprocess import build_targets
from lib.core.evaluate import SegmentationMetric | 4,102 |
class MultiHeadLoss(nn.Module):
"""
collect all the loss we need
"""
def __init__(self, losses, cfg, lambdas=None):
"""
Inputs:
- losses: (list)[nn.Module, nn.Module, ...]
- cfg: config object
- lambdas: (list) + IoU loss, weight for each loss
"""
super().__init__()
# lambdas: [cls, obj, iou, la_seg, ll_seg, ll_iou]
if not lambdas:
lambdas = [1.0 for _ in range(len(losses) + 3)] # lambdas = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
assert all(lam >= 0.0 for lam in lambdas)
self.losses = nn.ModuleList(losses) # self.losses = ModuleList( (0-2): 3 x BCEWithLogitsLoss())
self.lambdas = lambdas # self.lambdas = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
self.cfg = cfg
def forward(self, head_fields, head_targets, shapes, model):
"""
Inputs:
- head_fields: (list) output from each task head
- head_targets: (list) ground-truth for each task head
- model:
Returns:
- total_loss: sum of all the loss
- head_losses: (tuple) contain all loss[loss1, loss2, ...]
"""
# head_losses = [ll
# for l, f, t in zip(self.losses, head_fields, head_targets)
# for ll in l(f, t)]
#
# assert len(self.lambdas) == len(head_losses)
# loss_values = [lam * l
# for lam, l in zip(self.lambdas, head_losses)
# if l is not None]
# total_loss = sum(loss_values) if loss_values else None
# print(model.nc)
total_loss, head_losses = self._forward_impl(head_fields, head_targets, shapes, model)
return total_loss, head_losses
def _forward_impl(self, predictions, targets, shapes, model):
"""
Args:
predictions: predicts of [[det_head1, det_head2, det_head3], drive_area_seg_head, lane_line_seg_head]
targets: gts [det_targets, segment_targets, lane_targets]
model:
Returns:
total_loss: sum of all the loss
head_losses: list containing losses
"""
cfg = self.cfg
device = targets[0].device
lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
tcls, tbox, indices, anchors = build_targets(cfg, predictions[0], targets[0], model) # targets
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
cp, cn = smooth_BCE(eps=0.0)
BCEcls, BCEobj, BCEseg = self.losses
# Calculate Losses
nt = 0 # number of targets
no = len(predictions[0]) # number of outputs
balance = [4.0, 1.0, 0.4] if no == 3 else [4.0, 1.0, 0.4, 0.1] # P3-5 or P3-6
# calculate detection loss
for i, pi in enumerate(predictions[0]): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
n = b.shape[0] # number of targets
if n:
nt += n # cumulative targets
ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
# Regression
pxy = ps[:, :2].sigmoid() * 2. - 0.5
pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
pbox = torch.cat((pxy, pwh), 1).to(device) # predicted box
|
class MultiHeadLoss(nn.Module):
"""
collect all the loss we need
"""
def __init__(self, losses, cfg, lambdas=None):
"""
Inputs:
- losses: (list)[nn.Module, nn.Module, ...]
- cfg: config object
- lambdas: (list) + IoU loss, weight for each loss
"""
super().__init__()
# lambdas: [cls, obj, iou, la_seg, ll_seg, ll_iou]
if not lambdas:
lambdas = [1.0 for _ in range(len(losses) + 3)] # lambdas = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
assert all(lam >= 0.0 for lam in lambdas)
self.losses = nn.ModuleList(losses) # self.losses = ModuleList( (0-2): 3 x BCEWithLogitsLoss())
self.lambdas = lambdas # self.lambdas = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
self.cfg = cfg
def forward(self, head_fields, head_targets, shapes, model):
"""
Inputs:
- head_fields: (list) output from each task head
- head_targets: (list) ground-truth for each task head
- model:
Returns:
- total_loss: sum of all the loss
- head_losses: (tuple) contain all loss[loss1, loss2, ...]
"""
# head_losses = [ll
# for l, f, t in zip(self.losses, head_fields, head_targets)
# for ll in l(f, t)]
#
# assert len(self.lambdas) == len(head_losses)
# loss_values = [lam * l
# for lam, l in zip(self.lambdas, head_losses)
# if l is not None]
# total_loss = sum(loss_values) if loss_values else None
# print(model.nc)
total_loss, head_losses = self._forward_impl(head_fields, head_targets, shapes, model)
return total_loss, head_losses
def _forward_impl(self, predictions, targets, shapes, model):
"""
Args:
predictions: predicts of [[det_head1, det_head2, det_head3], drive_area_seg_head, lane_line_seg_head]
targets: gts [det_targets, segment_targets, lane_targets]
model:
Returns:
total_loss: sum of all the loss
head_losses: list containing losses
"""
cfg = self.cfg
device = targets[0].device
lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
tcls, tbox, indices, anchors = build_targets(cfg, predictions[0], targets[0], model) # targets
# Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
cp, cn = smooth_BCE(eps=0.0)
BCEcls, BCEobj, BCEseg = self.losses
# Calculate Losses
nt = 0 # number of targets
no = len(predictions[0]) # number of outputs
balance = [4.0, 1.0, 0.4] if no == 3 else [4.0, 1.0, 0.4, 0.1] # P3-5 or P3-6
# calculate detection loss
for i, pi in enumerate(predictions[0]): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
n = b.shape[0] # number of targets
if n:
nt += n # cumulative targets
ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
# Regression
pxy = ps[:, :2].sigmoid() * 2. - 0.5
pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
pbox = torch.cat((pxy, pwh), 1).to(device) # predicted box | iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) | 0 | 2023-10-24 02:08:25+00:00 | 8k |
giulio98/functional-diffusion-processes | src/functional_diffusion_processes/models/uvit.py | [
{
"identifier": "BaseViT",
"path": "src/functional_diffusion_processes/models/base_vit.py",
"snippet": "class BaseViT(nn.Module, abc.ABC):\n \"\"\"Abstract base class for Vision Transformer (ViT) models.\n\n Introduced in the paper \"An Image is Worth 16x16 Words: Transformers for Image Recognitio... | from abc import ABC
from omegaconf import DictConfig
from . import BaseViT
from .blocks import Block
from .embeddings import AddPositionEmbs, get_timestep_embedding
from .embeddings.patch_embedding import PatchEmbeddings
from .encodings.position_encoding import AddPositionEncodings
import einops
import flax.linen as nn
import jax
import jax.numpy as jnp | 5,328 | # MIT License
#
# Copyright (c) 2022 Fan Bao
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# noinspection PyAttributeOutsideInit
class UViT(BaseViT, ABC):
"""Implementation of the UViT model, a variant of Vision Transformer (ViT) introduced.
in the paper "All are Worth Words: A ViT Backbone for Diffusion Models" (https://arxiv.org/abs/2209.12152).
The model employs patch embeddings, position embeddings/encodings, and transformer blocks to process
the input and return a processed tensor.
Attributes:
model_config (DictConfig): Configuration dictionary for setting up the model.
Methods:
setup(): Set up the VisionTransformer module with the provided configuration.
reshape_input(x): Reshape the input tensor based on the model configuration.
separate_data_from_time(x): Separate data and time information from the input tensor.
unpatchify(x: jnp.ndarray): Convert patch embeddings back to image-like or sequence-like tensor.
__call__(inputs: jnp.ndarray, *, train: bool): Process the input tensor through the UViT model.
"""
model_config: DictConfig
def setup(self):
"""Set up the VisionTransformer module based on the provided configuration in `model_config`."""
self.patch_size = self.model_config["patch_size"]
self.in_chans = self.model_config["in_chans"]
self.transformer = self.model_config["transformer"]
self.embeddings_size = self.model_config["embeddings_size"]
self.image_size = self.model_config["image_size"]
self.old_image_size = self.model_config["old_image_size"]
self.is_unidimensional = self.model_config["is_unidimensional"]
self.dtype = jnp.float32
self.Block = Block
if self.model_config["add_position"] == "embedding":
| # MIT License
#
# Copyright (c) 2022 Fan Bao
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# noinspection PyAttributeOutsideInit
class UViT(BaseViT, ABC):
"""Implementation of the UViT model, a variant of Vision Transformer (ViT) introduced.
in the paper "All are Worth Words: A ViT Backbone for Diffusion Models" (https://arxiv.org/abs/2209.12152).
The model employs patch embeddings, position embeddings/encodings, and transformer blocks to process
the input and return a processed tensor.
Attributes:
model_config (DictConfig): Configuration dictionary for setting up the model.
Methods:
setup(): Set up the VisionTransformer module with the provided configuration.
reshape_input(x): Reshape the input tensor based on the model configuration.
separate_data_from_time(x): Separate data and time information from the input tensor.
unpatchify(x: jnp.ndarray): Convert patch embeddings back to image-like or sequence-like tensor.
__call__(inputs: jnp.ndarray, *, train: bool): Process the input tensor through the UViT model.
"""
model_config: DictConfig
def setup(self):
"""Set up the VisionTransformer module based on the provided configuration in `model_config`."""
self.patch_size = self.model_config["patch_size"]
self.in_chans = self.model_config["in_chans"]
self.transformer = self.model_config["transformer"]
self.embeddings_size = self.model_config["embeddings_size"]
self.image_size = self.model_config["image_size"]
self.old_image_size = self.model_config["old_image_size"]
self.is_unidimensional = self.model_config["is_unidimensional"]
self.dtype = jnp.float32
self.Block = Block
if self.model_config["add_position"] == "embedding": | self.add_position = AddPositionEmbs( | 2 | 2023-10-24 22:01:35+00:00 | 8k |
godisboy0/nonebot-adapter-wcf | wcf_test/test_console_adapter.py | [
{
"identifier": "Bot",
"path": "adapters/wechatferry/bot.py",
"snippet": "class Bot(BaseBot):\n \"\"\"\n wechatferry协议适配。\n \"\"\"\n\n send_handler: Callable[[\"Bot\", Event,\n Union[str, MessageSegment]], Any] = send\n\n async def send_private_msg(self, user_id... | import sys
import asyncio
import time
from nonebot.adapters import Adapter as BaseAdapter
from typing import Any, Dict, List, Callable, Optional, Awaitable
from textual.color import Color
from nonebot.drivers import Driver
from nonebot.typing import overrides
from nonechat import Frontend, ConsoleSetting
from nonebot.adapters.console.config import Config
from nonebot.adapters.console.backend import AdapterConsoleBackend
from nonebot.adapters.console.event import Event, MessageEvent
from nonechat.message import Text, ConsoleMessage
from adapters.wechatferry.bot import Bot as WechatFerryBot
from adapters.wechatferry.event import (
PrivateMessageEvent as WcfPrivateMsgEvent,
GroupMessageEvent as WcfGroupMsgEvent,
Sender
)
from adapters.wechatferry.message import MessageSegment as WcfMessageSeg, Message as WcfMessage
from adapters.wechatferry.basemodel import UserInfo as WcfUserInfo
from typing import Literal
from adapters.wechatferry.utils import logger | 4,079 | # 不显示消息id
self.show_msg_id = False
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"不再显示消息id", to_wxid=event.get_user_id()))
return
elif text.startswith(":set"):
# 这里是设置各种参数
asyncio.create_task(self._call_api(
self.bot, "send_text", text="暂不支持的设置"))
return
# 接下来是对消息的各种特殊处理,主要支持不同的消息格式。
at_users = []
msg_id_seq += 1
if self.show_msg_id:
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"发出的消息id: {msg_id_seq}", to_wxid=event.get_user_id()))
final_msg_args = {}
if '@' in text:
# @符号以后的都认为是另一个用户名
at_users = [x for x in text.split('@')[1:] if x]
text = text.split('@')[0].strip()
if text.startswith("image:"):
# 发送一个图片消息过去。
file_path = text.split("image:")[1].strip()
image_msg = WcfMessage(
WcfMessageSeg.image(file_path))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "image", text, image_msg, speaker_uid, None if not self.group_mode else "console_group")
final_msg_args['message'] = image_msg
elif text.startswith("voice:"):
# 发送一个音频消息过去。
file_path = text.split("voice:")[1].strip()
voice_msg = WcfMessage(
WcfMessageSeg.record(file_path))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "voice", text, voice_msg, speaker_uid, None if not self.group_mode else "console_group")
final_msg_args['message'] = voice_msg
elif text.startswith("video:"):
# 发送一个视频消息过去。
file_path = text.split("video:")[1].strip()
video_msg = WcfMessage(
WcfMessageSeg.video(file_path))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "video", text, video_msg, speaker_uid, None if not self.group_mode else "console_group")
elif text.startswith("file:"):
# 发送一个文件消息过去。
file_path = text.split("file:")[1].strip()
file_msg = WcfMessage(
WcfMessageSeg('file', {'file': file_path, 'file_name': file_path.split('/')[-1]}))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "file", text, file_msg, speaker_uid, None if not self.group_mode else "console_group")
final_msg_args['message'] = file_msg
elif text.startswith("link:"):
splited_text = text.split("link:")[1].strip()
splited_text = splited_text.split("#")
if len(splited_text) != 4:
asyncio.create_task(self._call_api(
self.bot, "send_text", text="链接消息格式应当为>> link:title#desc#url#img_path", to_wxid=event.get_user_id()))
return
title, desc, url, img_path = splited_text
link_msg = WcfMessage(
WcfMessageSeg.share(title, desc, url, img_path))
final_msg_args['message'] = link_msg
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "link", text, link_msg, speaker_uid, None if not self.group_mode else "console_group")
elif text.startswith("refer:"):
# 发送一个引用消息过去,refer后面的就是id
refer_content = text.split("refer:")[1].strip()
splited_refer_content = refer_content.split(" ")
if len(splited_refer_content) < 2:
asyncio.create_task(self._call_api(
self.bot, "send_text", text="引用消息格式应当为>> refer:refered_msg_id textmsg。\n输入:set showid true可以显示消息的msg_id", to_wxid=event.get_user_id()))
return
refer_msg = splited_refer_content[0]
refer_text_msg = " ".join(splited_refer_content[1:])
if not refer_msg.isdigit() or int(refer_msg) not in msg_store:
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"引用消息{refer_msg}不存在", to_wxid=event.get_user_id()))
return
referd_msg = extract_refer_msg(
msg_store[int(refer_msg)], refer_text_msg)
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "refer", text, referd_msg, speaker_uid, None if not self.group_mode else "console_group")
if refer_msg is None:
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"引用消息{refer_msg}解析失败,可能是被引用消息的类型未支持", to_wxid=event.get_user_id()))
return
final_msg_args['message'] = referd_msg
else:
# 发送一个文本消息过去。
text_msg = WcfMessage(
WcfMessageSeg.text(text))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "text", text, text_msg, speaker_uid, None if not self.group_mode else "console_group")
final_msg_args['message'] = text_msg
if at_users:
final_msg_args['message'] = final_msg_args['message'] + [WcfMessageSeg.at(
user_id) for user_id in at_users]
final_msg_args['original_message'] = final_msg_args["message"]
final_msg_args.update({
"post_type": "message",
"time": event.time.timestamp(),
"self_id": event.self_id,
"user_id": speaker_uid,
"message_id": msg_id_seq,
"raw_message": text,
"font": 12, # meaningless for wechat, but required by onebot 11
"sender": Sender(user_id=speaker_uid),
"to_me": not self.group_mode or 'bot' in at_users or self.always_at,
})
if self.group_mode:
final_msg_args.update({
"message_type": "group",
"sub_type": "normal",
"group_id": "console_group"
})
|
BOT_ID = "wechatferry_console"
"""
一个简单的想法,把从bot中接收到的onebot格式的消息转换成console格式的消息
这样可以方便地在控制台中测试bot的功能
onebot11标准要求:https://github.com/botuniverse/onebot-11/blob/master/README.md
onebot11 message segment 类型: https://github.com/botuniverse/onebot-11/blob/master/message/segment.md
"""
class SimpleMsg:
def __init__(self, msg_id: int, msg_type: Literal["text", "image", "voice", "refer", "video", "file", "link"],
raw_msg: str, msg: WcfMessage, speaker_id, room_id=None, _time=time.time()):
self.msg_id = msg_id
self.msg_type = msg_type
self.raw_msg = raw_msg
self.msg = msg
self.room_id = room_id
self.speaker_id = speaker_id
self.time = _time
speaker_uid = "User"
msg_id_seq = 0
msg_store: dict[int, SimpleMsg] = {}
class OneBotV11ConsoleAdapter(BaseAdapter):
@overrides(BaseAdapter)
def __init__(self, driver: Driver, **kwargs: Any) -> None:
super().__init__(driver, **kwargs)
self.console_config = Config.parse_obj(self.config)
self.bot = WechatFerryBot(self, BOT_ID)
self._task: Optional[asyncio.Task] = None
self._frontend: Optional[Frontend[AdapterConsoleBackend]] = None
self._stdout = sys.stdout
self.clients: List[Callable[[WechatFerryBot,
str, Dict[str, Any]], Awaitable[Any]]] = []
self.group_mode = False
self.always_at = False
self.show_msg_id = False
self.setup()
@staticmethod
@overrides(BaseAdapter)
def get_name() -> str:
return "Console"
def setup(self):
if not self.console_config.console_headless_mode:
self.driver.on_startup(self._start)
self.driver.on_shutdown(self._shutdown)
async def _start(self) -> None:
self._frontend = Frontend(
AdapterConsoleBackend,
ConsoleSetting(
title="onebot11-adapter-console",
sub_title="welcome using for test",
toolbar_exit="❌",
toolbar_back="⬅",
icon_color=Color.parse("#EA5252"),
),
)
self._frontend.backend.set_adapter(self)
self._task = asyncio.create_task(self._frontend.run_async())
self.bot_connect(self.bot)
async def _shutdown(self) -> None:
self.bot_disconnect(self.bot)
if self._frontend:
self._frontend.exit()
if self._task:
await self._task
def post_event(self, event: Event) -> None:
# 功能越来越多,改成更清晰的流水账写法吧= =
if not isinstance(event, MessageEvent):
asyncio.create_task(self._call_api(
self.bot, "send_text", text="暂不支持非消息事件"))
return
global speaker_uid, msg_id_seq, msg_store
msg = event.get_message()
text: str = msg.extract_plain_text().strip()
if text.startswith(":set"):
# 这是设置模式,用于各种调参。
if text == ":set":
# 这里显示帮助文档
asyncio.create_task(self._call_api(
self.bot, "send_text", text=":set [key] [value]"))
return
elif text == ":set grp":
# 模拟群组消息。
self.group_mode = True
asyncio.create_task(self._call_api(self.bot, "send_text",
text=f"群组模式。当前用户 {speaker_uid}。\n:set qgrp退出群组,\n:set uid xx 使用新用户身份", to_wxid=event.get_user_id()))
return
elif text == ":set qgrp":
self.group_mode = False
asyncio.create_task(self._call_api(
self.bot, "send_text", text="退出群组模式。", to_wxid=event.get_user_id()))
return
elif text.startswith(":set uid "):
uid = text.split(":set uid ")[1].strip()
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"以{uid}发言", to_wxid=event.get_user_id()))
speaker_uid = uid
return
elif text.startswith(":set tome true"):
# 从此就一直at机器人,
self.always_at = True
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"总是at机器人,有时候会造成测试问题,需要时打开", to_wxid=event.get_user_id()))
return
elif text.startswith(":set tome false"):
# 从此在群聊中需要显式at机器人
self.always_at = False
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"不再总是at机器人,在群聊中@bot才会被机器人处理,在测试中很有用", to_wxid=event.get_user_id()))
return
elif text.startswith(":set showid true"):
# 显示消息id
self.show_msg_id = True
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"开始显示消息id", to_wxid=event.get_user_id()))
return
elif text.startswith(":set showid false"):
# 不显示消息id
self.show_msg_id = False
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"不再显示消息id", to_wxid=event.get_user_id()))
return
elif text.startswith(":set"):
# 这里是设置各种参数
asyncio.create_task(self._call_api(
self.bot, "send_text", text="暂不支持的设置"))
return
# 接下来是对消息的各种特殊处理,主要支持不同的消息格式。
at_users = []
msg_id_seq += 1
if self.show_msg_id:
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"发出的消息id: {msg_id_seq}", to_wxid=event.get_user_id()))
final_msg_args = {}
if '@' in text:
# @符号以后的都认为是另一个用户名
at_users = [x for x in text.split('@')[1:] if x]
text = text.split('@')[0].strip()
if text.startswith("image:"):
# 发送一个图片消息过去。
file_path = text.split("image:")[1].strip()
image_msg = WcfMessage(
WcfMessageSeg.image(file_path))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "image", text, image_msg, speaker_uid, None if not self.group_mode else "console_group")
final_msg_args['message'] = image_msg
elif text.startswith("voice:"):
# 发送一个音频消息过去。
file_path = text.split("voice:")[1].strip()
voice_msg = WcfMessage(
WcfMessageSeg.record(file_path))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "voice", text, voice_msg, speaker_uid, None if not self.group_mode else "console_group")
final_msg_args['message'] = voice_msg
elif text.startswith("video:"):
# 发送一个视频消息过去。
file_path = text.split("video:")[1].strip()
video_msg = WcfMessage(
WcfMessageSeg.video(file_path))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "video", text, video_msg, speaker_uid, None if not self.group_mode else "console_group")
elif text.startswith("file:"):
# 发送一个文件消息过去。
file_path = text.split("file:")[1].strip()
file_msg = WcfMessage(
WcfMessageSeg('file', {'file': file_path, 'file_name': file_path.split('/')[-1]}))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "file", text, file_msg, speaker_uid, None if not self.group_mode else "console_group")
final_msg_args['message'] = file_msg
elif text.startswith("link:"):
splited_text = text.split("link:")[1].strip()
splited_text = splited_text.split("#")
if len(splited_text) != 4:
asyncio.create_task(self._call_api(
self.bot, "send_text", text="链接消息格式应当为>> link:title#desc#url#img_path", to_wxid=event.get_user_id()))
return
title, desc, url, img_path = splited_text
link_msg = WcfMessage(
WcfMessageSeg.share(title, desc, url, img_path))
final_msg_args['message'] = link_msg
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "link", text, link_msg, speaker_uid, None if not self.group_mode else "console_group")
elif text.startswith("refer:"):
# 发送一个引用消息过去,refer后面的就是id
refer_content = text.split("refer:")[1].strip()
splited_refer_content = refer_content.split(" ")
if len(splited_refer_content) < 2:
asyncio.create_task(self._call_api(
self.bot, "send_text", text="引用消息格式应当为>> refer:refered_msg_id textmsg。\n输入:set showid true可以显示消息的msg_id", to_wxid=event.get_user_id()))
return
refer_msg = splited_refer_content[0]
refer_text_msg = " ".join(splited_refer_content[1:])
if not refer_msg.isdigit() or int(refer_msg) not in msg_store:
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"引用消息{refer_msg}不存在", to_wxid=event.get_user_id()))
return
referd_msg = extract_refer_msg(
msg_store[int(refer_msg)], refer_text_msg)
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "refer", text, referd_msg, speaker_uid, None if not self.group_mode else "console_group")
if refer_msg is None:
asyncio.create_task(self._call_api(
self.bot, "send_text", text=f"引用消息{refer_msg}解析失败,可能是被引用消息的类型未支持", to_wxid=event.get_user_id()))
return
final_msg_args['message'] = referd_msg
else:
# 发送一个文本消息过去。
text_msg = WcfMessage(
WcfMessageSeg.text(text))
msg_store[msg_id_seq] = SimpleMsg(
msg_id_seq, "text", text, text_msg, speaker_uid, None if not self.group_mode else "console_group")
final_msg_args['message'] = text_msg
if at_users:
final_msg_args['message'] = final_msg_args['message'] + [WcfMessageSeg.at(
user_id) for user_id in at_users]
final_msg_args['original_message'] = final_msg_args["message"]
final_msg_args.update({
"post_type": "message",
"time": event.time.timestamp(),
"self_id": event.self_id,
"user_id": speaker_uid,
"message_id": msg_id_seq,
"raw_message": text,
"font": 12, # meaningless for wechat, but required by onebot 11
"sender": Sender(user_id=speaker_uid),
"to_me": not self.group_mode or 'bot' in at_users or self.always_at,
})
if self.group_mode:
final_msg_args.update({
"message_type": "group",
"sub_type": "normal",
"group_id": "console_group"
}) | new_event = WcfGroupMsgEvent(**final_msg_args) | 0 | 2023-10-22 10:52:27+00:00 | 8k |
R1999RC-official/Reverse1999ResonanceCalculator | python/python_env/Lib/site-packages/pip/_internal/resolution/resolvelib/factory.py | [
{
"identifier": "Candidate",
"path": "python/python_env/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py",
"snippet": "def format_name(project: NormalizedName, extras: FrozenSet[NormalizedName]) -> str:\n def __init__(\n self, specifier: SpecifierSet, hashes: Hashes, links: Frozen... | import contextlib
import functools
import logging
from typing import (
TYPE_CHECKING,
Dict,
FrozenSet,
Iterable,
Iterator,
List,
Mapping,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
cast,
)
from pip._vendor.packaging.requirements import InvalidRequirement
from pip._vendor.packaging.specifiers import SpecifierSet
from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
from pip._vendor.resolvelib import ResolutionImpossible
from pip._internal.cache import CacheEntry, WheelCache
from pip._internal.exceptions import (
DistributionNotFound,
InstallationError,
MetadataInconsistent,
UnsupportedPythonVersion,
UnsupportedWheel,
)
from pip._internal.index.package_finder import PackageFinder
from pip._internal.metadata import BaseDistribution, get_default_environment
from pip._internal.models.link import Link
from pip._internal.models.wheel import Wheel
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.req.constructors import install_req_from_link_and_ireq
from pip._internal.req.req_install import (
InstallRequirement,
check_invalid_constraint_type,
)
from pip._internal.resolution.base import InstallRequirementProvider
from pip._internal.utils.compatibility_tags import get_supported
from pip._internal.utils.hashes import Hashes
from pip._internal.utils.packaging import get_requirement
from pip._internal.utils.virtualenv import running_under_virtualenv
from .base import Candidate, CandidateVersion, Constraint, Requirement
from .candidates import (
AlreadyInstalledCandidate,
BaseCandidate,
EditableCandidate,
ExtrasCandidate,
LinkCandidate,
RequiresPythonCandidate,
as_base_candidate,
)
from .found_candidates import FoundCandidates, IndexCandidateInfo
from .requirements import (
ExplicitRequirement,
RequiresPythonRequirement,
SpecifierRequirement,
SpecifierWithoutExtrasRequirement,
UnsatisfiableRequirement,
)
from typing import Protocol | 6,015 | version=None,
)
if candidate:
yield candidate
def find_candidates(
self,
identifier: str,
requirements: Mapping[str, Iterable[Requirement]],
incompatibilities: Mapping[str, Iterator[Candidate]],
constraint: Constraint,
prefers_installed: bool,
) -> Iterable[Candidate]:
# Collect basic lookup information from the requirements.
explicit_candidates: Set[Candidate] = set()
ireqs: List[InstallRequirement] = []
for req in requirements[identifier]:
cand, ireq = req.get_candidate_lookup()
if cand is not None:
explicit_candidates.add(cand)
if ireq is not None:
ireqs.append(ireq)
# If the current identifier contains extras, add requires and explicit
# candidates from entries from extra-less identifier.
with contextlib.suppress(InvalidRequirement):
parsed_requirement = get_requirement(identifier)
if parsed_requirement.name != identifier:
explicit_candidates.update(
self._iter_explicit_candidates_from_base(
requirements.get(parsed_requirement.name, ()),
frozenset(parsed_requirement.extras),
),
)
for req in requirements.get(parsed_requirement.name, []):
_, ireq = req.get_candidate_lookup()
if ireq is not None:
ireqs.append(ireq)
# Add explicit candidates from constraints. We only do this if there are
# known ireqs, which represent requirements not already explicit. If
# there are no ireqs, we're constraining already-explicit requirements,
# which is handled later when we return the explicit candidates.
if ireqs:
try:
explicit_candidates.update(
self._iter_candidates_from_constraints(
identifier,
constraint,
template=ireqs[0],
),
)
except UnsupportedWheel:
# If we're constrained to install a wheel incompatible with the
# target architecture, no candidates will ever be valid.
return ()
# Since we cache all the candidates, incompatibility identification
# can be made quicker by comparing only the id() values.
incompat_ids = {id(c) for c in incompatibilities.get(identifier, ())}
# If none of the requirements want an explicit candidate, we can ask
# the finder for candidates.
if not explicit_candidates:
return self._iter_found_candidates(
ireqs,
constraint.specifier,
constraint.hashes,
prefers_installed,
incompat_ids,
)
return (
c
for c in explicit_candidates
if id(c) not in incompat_ids
and constraint.is_satisfied_by(c)
and all(req.is_satisfied_by(c) for req in requirements[identifier])
)
def _make_requirements_from_install_req(
self, ireq: InstallRequirement, requested_extras: Iterable[str]
) -> Iterator[Requirement]:
"""
Returns requirement objects associated with the given InstallRequirement. In
most cases this will be a single object but the following special cases exist:
- the InstallRequirement has markers that do not apply -> result is empty
- the InstallRequirement has both a constraint and extras -> result is split
in two requirement objects: one with the constraint and one with the
extra. This allows centralized constraint handling for the base,
resulting in fewer candidate rejections.
"""
if not ireq.match_markers(requested_extras):
logger.info(
"Ignoring %s: markers '%s' don't match your environment",
ireq.name,
ireq.markers,
)
elif not ireq.link:
if ireq.extras and ireq.req is not None and ireq.req.specifier:
yield SpecifierWithoutExtrasRequirement(ireq)
yield SpecifierRequirement(ireq)
else:
self._fail_if_link_is_unsupported_wheel(ireq.link)
cand = self._make_candidate_from_link(
ireq.link,
extras=frozenset(ireq.extras),
template=ireq,
name=canonicalize_name(ireq.name) if ireq.name else None,
version=None,
)
if cand is None:
# There's no way we can satisfy a URL requirement if the underlying
# candidate fails to build. An unnamed URL must be user-supplied, so
# we fail eagerly. If the URL is named, an unsatisfiable requirement
# can make the resolver do the right thing, either backtrack (and
# maybe find some other requirement that's buildable) or raise a
# ResolutionImpossible eventually.
if not ireq.name:
raise self._build_failures[ireq.link]
|
if TYPE_CHECKING:
class ConflictCause(Protocol):
requirement: RequiresPythonRequirement
parent: Candidate
logger = logging.getLogger(__name__)
C = TypeVar("C")
Cache = Dict[Link, C]
class CollectedRootRequirements(NamedTuple):
requirements: List[Requirement]
constraints: Dict[str, Constraint]
user_requested: Dict[str, int]
class Factory:
def __init__(
self,
finder: PackageFinder,
preparer: RequirementPreparer,
make_install_req: InstallRequirementProvider,
wheel_cache: Optional[WheelCache],
use_user_site: bool,
force_reinstall: bool,
ignore_installed: bool,
ignore_requires_python: bool,
py_version_info: Optional[Tuple[int, ...]] = None,
) -> None:
self._finder = finder
self.preparer = preparer
self._wheel_cache = wheel_cache
self._python_candidate = RequiresPythonCandidate(py_version_info)
self._make_install_req_from_spec = make_install_req
self._use_user_site = use_user_site
self._force_reinstall = force_reinstall
self._ignore_requires_python = ignore_requires_python
self._build_failures: Cache[InstallationError] = {}
self._link_candidate_cache: Cache[LinkCandidate] = {}
self._editable_candidate_cache: Cache[EditableCandidate] = {}
self._installed_candidate_cache: Dict[str, AlreadyInstalledCandidate] = {}
self._extras_candidate_cache: Dict[
Tuple[int, FrozenSet[NormalizedName]], ExtrasCandidate
] = {}
if not ignore_installed:
env = get_default_environment()
self._installed_dists = {
dist.canonical_name: dist
for dist in env.iter_installed_distributions(local_only=False)
}
else:
self._installed_dists = {}
@property
def force_reinstall(self) -> bool:
return self._force_reinstall
def _fail_if_link_is_unsupported_wheel(self, link: Link) -> None:
if not link.is_wheel:
return
wheel = Wheel(link.filename)
if wheel.supported(self._finder.target_python.get_unsorted_tags()):
return
msg = f"{link.filename} is not a supported wheel on this platform."
raise UnsupportedWheel(msg)
def _make_extras_candidate(
self,
base: BaseCandidate,
extras: FrozenSet[str],
*,
comes_from: Optional[InstallRequirement] = None,
) -> ExtrasCandidate:
cache_key = (id(base), frozenset(canonicalize_name(e) for e in extras))
try:
candidate = self._extras_candidate_cache[cache_key]
except KeyError:
candidate = ExtrasCandidate(base, extras, comes_from=comes_from)
self._extras_candidate_cache[cache_key] = candidate
return candidate
def _make_candidate_from_dist(
self,
dist: BaseDistribution,
extras: FrozenSet[str],
template: InstallRequirement,
) -> Candidate:
try:
base = self._installed_candidate_cache[dist.canonical_name]
except KeyError:
base = AlreadyInstalledCandidate(dist, template, factory=self)
self._installed_candidate_cache[dist.canonical_name] = base
if not extras:
return base
return self._make_extras_candidate(base, extras, comes_from=template)
def _make_candidate_from_link(
self,
link: Link,
extras: FrozenSet[str],
template: InstallRequirement,
name: Optional[NormalizedName],
version: Optional[CandidateVersion],
) -> Optional[Candidate]:
# TODO: Check already installed candidate, and use it if the link and
# editable flag match.
if link in self._build_failures:
# We already tried this candidate before, and it does not build.
# Don't bother trying again.
return None
if template.editable:
if link not in self._editable_candidate_cache:
try:
self._editable_candidate_cache[link] = EditableCandidate(
link,
template,
factory=self,
name=name,
version=version,
)
except MetadataInconsistent as e:
logger.info(
"Discarding [blue underline]%s[/]: [yellow]%s[reset]",
link,
e,
extra={"markup": True},
)
self._build_failures[link] = e
return None
base: BaseCandidate = self._editable_candidate_cache[link]
else:
if link not in self._link_candidate_cache:
try:
self._link_candidate_cache[link] = LinkCandidate(
link,
template,
factory=self,
name=name,
version=version,
)
except MetadataInconsistent as e:
logger.info(
"Discarding [blue underline]%s[/]: [yellow]%s[reset]",
link,
e,
extra={"markup": True},
)
self._build_failures[link] = e
return None
base = self._link_candidate_cache[link]
if not extras:
return base
return self._make_extras_candidate(base, extras, comes_from=template)
def _iter_found_candidates(
self,
ireqs: Sequence[InstallRequirement],
specifier: SpecifierSet,
hashes: Hashes,
prefers_installed: bool,
incompatible_ids: Set[int],
) -> Iterable[Candidate]:
if not ireqs:
return ()
# The InstallRequirement implementation requires us to give it a
# "template". Here we just choose the first requirement to represent
# all of them.
# Hopefully the Project model can correct this mismatch in the future.
template = ireqs[0]
assert template.req, "Candidates found on index must be PEP 508"
name = canonicalize_name(template.req.name)
extras: FrozenSet[str] = frozenset()
for ireq in ireqs:
assert ireq.req, "Candidates found on index must be PEP 508"
specifier &= ireq.req.specifier
hashes &= ireq.hashes(trust_internet=False)
extras |= frozenset(ireq.extras)
def _get_installed_candidate() -> Optional[Candidate]:
"""Get the candidate for the currently-installed version."""
# If --force-reinstall is set, we want the version from the index
# instead, so we "pretend" there is nothing installed.
if self._force_reinstall:
return None
try:
installed_dist = self._installed_dists[name]
except KeyError:
return None
# Don't use the installed distribution if its version does not fit
# the current dependency graph.
if not specifier.contains(installed_dist.version, prereleases=True):
return None
candidate = self._make_candidate_from_dist(
dist=installed_dist,
extras=extras,
template=template,
)
# The candidate is a known incompatibility. Don't use it.
if id(candidate) in incompatible_ids:
return None
return candidate
def iter_index_candidate_infos() -> Iterator[IndexCandidateInfo]:
result = self._finder.find_best_candidate(
project_name=name,
specifier=specifier,
hashes=hashes,
)
icans = list(result.iter_applicable())
# PEP 592: Yanked releases are ignored unless the specifier
# explicitly pins a version (via '==' or '===') that can be
# solely satisfied by a yanked release.
all_yanked = all(ican.link.is_yanked for ican in icans)
def is_pinned(specifier: SpecifierSet) -> bool:
for sp in specifier:
if sp.operator == "===":
return True
if sp.operator != "==":
continue
if sp.version.endswith(".*"):
continue
return True
return False
pinned = is_pinned(specifier)
# PackageFinder returns earlier versions first, so we reverse.
for ican in reversed(icans):
if not (all_yanked and pinned) and ican.link.is_yanked:
continue
func = functools.partial(
self._make_candidate_from_link,
link=ican.link,
extras=extras,
template=template,
name=name,
version=ican.version,
)
yield ican.version, func
return FoundCandidates(
iter_index_candidate_infos,
_get_installed_candidate(),
prefers_installed,
incompatible_ids,
)
def _iter_explicit_candidates_from_base(
self,
base_requirements: Iterable[Requirement],
extras: FrozenSet[str],
) -> Iterator[Candidate]:
"""Produce explicit candidates from the base given an extra-ed package.
:param base_requirements: Requirements known to the resolver. The
requirements are guaranteed to not have extras.
:param extras: The extras to inject into the explicit requirements'
candidates.
"""
for req in base_requirements:
lookup_cand, _ = req.get_candidate_lookup()
if lookup_cand is None: # Not explicit.
continue
# We've stripped extras from the identifier, and should always
# get a BaseCandidate here, unless there's a bug elsewhere.
base_cand = as_base_candidate(lookup_cand)
assert base_cand is not None, "no extras here"
yield self._make_extras_candidate(base_cand, extras)
def _iter_candidates_from_constraints(
self,
identifier: str,
constraint: Constraint,
template: InstallRequirement,
) -> Iterator[Candidate]:
"""Produce explicit candidates from constraints.
This creates "fake" InstallRequirement objects that are basically clones
of what "should" be the template, but with original_link set to link.
"""
for link in constraint.links:
self._fail_if_link_is_unsupported_wheel(link)
candidate = self._make_candidate_from_link(
link,
extras=frozenset(),
template=install_req_from_link_and_ireq(link, template),
name=canonicalize_name(identifier),
version=None,
)
if candidate:
yield candidate
def find_candidates(
self,
identifier: str,
requirements: Mapping[str, Iterable[Requirement]],
incompatibilities: Mapping[str, Iterator[Candidate]],
constraint: Constraint,
prefers_installed: bool,
) -> Iterable[Candidate]:
# Collect basic lookup information from the requirements.
explicit_candidates: Set[Candidate] = set()
ireqs: List[InstallRequirement] = []
for req in requirements[identifier]:
cand, ireq = req.get_candidate_lookup()
if cand is not None:
explicit_candidates.add(cand)
if ireq is not None:
ireqs.append(ireq)
# If the current identifier contains extras, add requires and explicit
# candidates from entries from extra-less identifier.
with contextlib.suppress(InvalidRequirement):
parsed_requirement = get_requirement(identifier)
if parsed_requirement.name != identifier:
explicit_candidates.update(
self._iter_explicit_candidates_from_base(
requirements.get(parsed_requirement.name, ()),
frozenset(parsed_requirement.extras),
),
)
for req in requirements.get(parsed_requirement.name, []):
_, ireq = req.get_candidate_lookup()
if ireq is not None:
ireqs.append(ireq)
# Add explicit candidates from constraints. We only do this if there are
# known ireqs, which represent requirements not already explicit. If
# there are no ireqs, we're constraining already-explicit requirements,
# which is handled later when we return the explicit candidates.
if ireqs:
try:
explicit_candidates.update(
self._iter_candidates_from_constraints(
identifier,
constraint,
template=ireqs[0],
),
)
except UnsupportedWheel:
# If we're constrained to install a wheel incompatible with the
# target architecture, no candidates will ever be valid.
return ()
# Since we cache all the candidates, incompatibility identification
# can be made quicker by comparing only the id() values.
incompat_ids = {id(c) for c in incompatibilities.get(identifier, ())}
# If none of the requirements want an explicit candidate, we can ask
# the finder for candidates.
if not explicit_candidates:
return self._iter_found_candidates(
ireqs,
constraint.specifier,
constraint.hashes,
prefers_installed,
incompat_ids,
)
return (
c
for c in explicit_candidates
if id(c) not in incompat_ids
and constraint.is_satisfied_by(c)
and all(req.is_satisfied_by(c) for req in requirements[identifier])
)
def _make_requirements_from_install_req(
self, ireq: InstallRequirement, requested_extras: Iterable[str]
) -> Iterator[Requirement]:
"""
Returns requirement objects associated with the given InstallRequirement. In
most cases this will be a single object but the following special cases exist:
- the InstallRequirement has markers that do not apply -> result is empty
- the InstallRequirement has both a constraint and extras -> result is split
in two requirement objects: one with the constraint and one with the
extra. This allows centralized constraint handling for the base,
resulting in fewer candidate rejections.
"""
if not ireq.match_markers(requested_extras):
logger.info(
"Ignoring %s: markers '%s' don't match your environment",
ireq.name,
ireq.markers,
)
elif not ireq.link:
if ireq.extras and ireq.req is not None and ireq.req.specifier:
yield SpecifierWithoutExtrasRequirement(ireq)
yield SpecifierRequirement(ireq)
else:
self._fail_if_link_is_unsupported_wheel(ireq.link)
cand = self._make_candidate_from_link(
ireq.link,
extras=frozenset(ireq.extras),
template=ireq,
name=canonicalize_name(ireq.name) if ireq.name else None,
version=None,
)
if cand is None:
# There's no way we can satisfy a URL requirement if the underlying
# candidate fails to build. An unnamed URL must be user-supplied, so
# we fail eagerly. If the URL is named, an unsatisfiable requirement
# can make the resolver do the right thing, either backtrack (and
# maybe find some other requirement that's buildable) or raise a
# ResolutionImpossible eventually.
if not ireq.name:
raise self._build_failures[ireq.link] | yield UnsatisfiableRequirement(canonicalize_name(ireq.name)) | 7 | 2023-10-24 06:48:58+00:00 | 8k |
mentpy/mentpy | mentpy/mbqc/view.py | [
{
"identifier": "MBQCircuit",
"path": "mentpy/mbqc/mbqcircuit.py",
"snippet": "class MBQCircuit:\n r\"\"\"The MBQCircuit class that deals with operations and manipulations of graph states\n\n Parameters\n ----------\n graph: mp.GraphState\n The graph state of the MBQC circuit.\n in... | from typing import Union
from mentpy.mbqc.mbqcircuit import MBQCircuit
from mentpy.mbqc.states.graphstate import GraphState
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx | 4,924 | # Copyright 2023 Luis Mantilla
#
# Licensed under the Apache License, Version 2.0.
# See <http://www.apache.org/licenses/LICENSE-2.0> for details.
"""A module for drawing MBQC circuits."""
__all__ = ["draw"]
DEFAULT_NODE_COLOR = "#FFBD59"
INPUT_NODE_COLOR = "#ADD8E6"
OUTPUT_NODE_COLOR = "#ADD8E6"
CONTROLLED_NODE_COLOR = "#A88FE8"
UNTRAINABLE_NODE_COLOR = "#CCCCCC"
def get_node_colors(state, style="default"):
"""Return node colors based on the state and style."""
possible_styles = ("default", "black_and_white", "blue_inputs")
assert style in possible_styles, f"Style must be one of {possible_styles}"
node_colors = {}
# Base Coloring
for i in state.graph.nodes():
if i in state.controlled_nodes:
node_colors[i] = CONTROLLED_NODE_COLOR
elif i in state.quantum_output_nodes:
node_colors[i] = OUTPUT_NODE_COLOR
elif i in set(state.nodes()) - set(state.trainable_nodes):
node_colors[i] = UNTRAINABLE_NODE_COLOR
else:
node_colors[i] = DEFAULT_NODE_COLOR
# Style-based Adjustments
if style == "black_and_white":
node_colors = {i: "#FFFFFF" for i in state.graph.nodes()}
elif style == "blue_inputs":
for i in state.input_nodes:
node_colors[i] = INPUT_NODE_COLOR
return node_colors
def get_options(kwargs) -> dict:
"""Returns default options updated with user-defined values."""
default_options = {
"node_color": "white",
"font_family": "Dejavu Sans",
"font_weight": "medium",
"font_size": 10,
"edgecolors": "k",
"node_size": 500,
"edge_color": "grey",
"edge_color_control": "#CCCCCC",
"with_labels": True,
"label": "indices",
"transparent": True,
"figsize": (8, 3),
"show_controls": True,
"show_flow": True,
"pauliop": None,
"style": "default",
}
# Update default options with any provided by the user
default_options.update(kwargs)
return default_options
| # Copyright 2023 Luis Mantilla
#
# Licensed under the Apache License, Version 2.0.
# See <http://www.apache.org/licenses/LICENSE-2.0> for details.
"""A module for drawing MBQC circuits."""
__all__ = ["draw"]
DEFAULT_NODE_COLOR = "#FFBD59"
INPUT_NODE_COLOR = "#ADD8E6"
OUTPUT_NODE_COLOR = "#ADD8E6"
CONTROLLED_NODE_COLOR = "#A88FE8"
UNTRAINABLE_NODE_COLOR = "#CCCCCC"
def get_node_colors(state, style="default"):
"""Return node colors based on the state and style."""
possible_styles = ("default", "black_and_white", "blue_inputs")
assert style in possible_styles, f"Style must be one of {possible_styles}"
node_colors = {}
# Base Coloring
for i in state.graph.nodes():
if i in state.controlled_nodes:
node_colors[i] = CONTROLLED_NODE_COLOR
elif i in state.quantum_output_nodes:
node_colors[i] = OUTPUT_NODE_COLOR
elif i in set(state.nodes()) - set(state.trainable_nodes):
node_colors[i] = UNTRAINABLE_NODE_COLOR
else:
node_colors[i] = DEFAULT_NODE_COLOR
# Style-based Adjustments
if style == "black_and_white":
node_colors = {i: "#FFFFFF" for i in state.graph.nodes()}
elif style == "blue_inputs":
for i in state.input_nodes:
node_colors[i] = INPUT_NODE_COLOR
return node_colors
def get_options(kwargs) -> dict:
"""Returns default options updated with user-defined values."""
default_options = {
"node_color": "white",
"font_family": "Dejavu Sans",
"font_weight": "medium",
"font_size": 10,
"edgecolors": "k",
"node_size": 500,
"edge_color": "grey",
"edge_color_control": "#CCCCCC",
"with_labels": True,
"label": "indices",
"transparent": True,
"figsize": (8, 3),
"show_controls": True,
"show_flow": True,
"pauliop": None,
"style": "default",
}
# Update default options with any provided by the user
default_options.update(kwargs)
return default_options
| def draw(state: Union[MBQCircuit, GraphState], fix_wires=None, **kwargs): | 0 | 2023-10-18 18:29:42+00:00 | 8k |
rnag/cert-hero | tests/integration/test_cert_hero.py | [
{
"identifier": "cert_please",
"path": "cert_hero/cert_hero.py",
"snippet": "def cert_please(hostname: str,\n context: ssl.SSLContext = None,\n user_agent: str | None = _DEFAULT_USER_AGENT,\n default_encoding='latin-1',\n ) -> CertHero[str, str... | import json
from cert_hero import cert_please, certs_please, set_expired | 3,610 |
def test_cert_please():
cert = cert_please('google.com')
print('Cert is Valid Till:', cert.not_after_date.isoformat())
# To get the output as a JSON string, use `str(cert)` or remove `!r` from below
print(f'Cert -> \n{cert!r}')
assert cert['Subject Name']['Common Name'] == '*.google.com'
set_expired(cert)
print(f'Validity ->\n{cert["Validity"]}')
# assert the cert is still valid!
assert not cert['Validity']['Expired']
def test_certs_please():
|
def test_cert_please():
cert = cert_please('google.com')
print('Cert is Valid Till:', cert.not_after_date.isoformat())
# To get the output as a JSON string, use `str(cert)` or remove `!r` from below
print(f'Cert -> \n{cert!r}')
assert cert['Subject Name']['Common Name'] == '*.google.com'
set_expired(cert)
print(f'Validity ->\n{cert["Validity"]}')
# assert the cert is still valid!
assert not cert['Validity']['Expired']
def test_certs_please(): | host_to_cert = certs_please(['google.com', 'cnn.com', 'www.yahoo.co.in', 'youtu.be']) | 1 | 2023-10-16 19:02:05+00:00 | 8k |
KosinskiLab/pyTME | tme/structure.py | [
{
"identifier": "PDBParser",
"path": "tme/parser.py",
"snippet": "class PDBParser(Parser):\n \"\"\"\n A Parser subclass for converting PDB file data into a dictionary representation.\n This class is specifically designed to work with PDB file format.\n\n References\n ----------\n .. [1... | import warnings
import numpy as np
from copy import deepcopy
from collections import namedtuple
from typing import List, Dict, Tuple
from itertools import groupby
from dataclasses import dataclass
from os.path import splitext, basename
from .parser import PDBParser, MMCIFParser
from .matching_utils import (
rigid_transform,
_format_mmcif_colunns,
minimum_enclosing_box,
)
from .helpers import atom_profile
from .types import NDArray | 5,001 | """ Implements class Structure to represent atomic structures.
Copyright (c) 2023 European Molecular Biology Laboratory
Author: Valentin Maurer <valentin.maurer@embl-hamburg.de>
"""
@dataclass(repr=False)
class Structure:
"""Represents atomic structures in accordance with the Protein Data Bank (PDB)
format specification.
Attributes
----------
record_type : NDArray
Type of the record, e.g., ATOM, HETATM. Array shape = (n,)
atom_serial_number : NDArray
Serial number assigned to each atom. Array shape = (n,)
atom_name : NDArray
Standardized names for each atom. Array shape = (n,)
atom_coordinate : NDArray
The 3D Cartesian coordinates of each atom in x, y, z. Array shape = (n,3 )
alternate_location_indicator : NDArray
Indicator for alternate locations of an atom if it exists in multiple places.
Array shape = (n,)
residue_name : NDArray
Standard residue names where each atom belongs. Array shape = (n,)
chain_identifier : NDArray
Identifier for the chain where each atom is located. Array shape = (n,)
residue_sequence_number : NDArray
Sequence number of the residue in the protein chain for each atom.
Array shape = (n,)
code_for_residue_insertion : NDArray
Code to denote any residue insertion. Array shape = (n,)
occupancy : NDArray
Occupancy factor of each atom, indicating the fraction of time the atom
is located at its position. Array shape = (n,)
temperature_factor : NDArray
Measure of the atomic displacement or B-factor for each atom. Array shape = (n,)
segment_identifier : NDArray
Identifier for the segment where each atom belongs. Array shape = (n,)
element_symbol : NDArray
Atomic element symbol for each atom. Array shape = (n,)
charge : NDArray
Charge on the atom. Array shape = (n,)
details : dict
Any additional or auxiliary details. Array shape = (n,)
References
----------
.. [1] https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html
"""
#: Return a numpy array with record types, e.g. ATOM, HETATM.
| """ Implements class Structure to represent atomic structures.
Copyright (c) 2023 European Molecular Biology Laboratory
Author: Valentin Maurer <valentin.maurer@embl-hamburg.de>
"""
@dataclass(repr=False)
class Structure:
"""Represents atomic structures in accordance with the Protein Data Bank (PDB)
format specification.
Attributes
----------
record_type : NDArray
Type of the record, e.g., ATOM, HETATM. Array shape = (n,)
atom_serial_number : NDArray
Serial number assigned to each atom. Array shape = (n,)
atom_name : NDArray
Standardized names for each atom. Array shape = (n,)
atom_coordinate : NDArray
The 3D Cartesian coordinates of each atom in x, y, z. Array shape = (n,3 )
alternate_location_indicator : NDArray
Indicator for alternate locations of an atom if it exists in multiple places.
Array shape = (n,)
residue_name : NDArray
Standard residue names where each atom belongs. Array shape = (n,)
chain_identifier : NDArray
Identifier for the chain where each atom is located. Array shape = (n,)
residue_sequence_number : NDArray
Sequence number of the residue in the protein chain for each atom.
Array shape = (n,)
code_for_residue_insertion : NDArray
Code to denote any residue insertion. Array shape = (n,)
occupancy : NDArray
Occupancy factor of each atom, indicating the fraction of time the atom
is located at its position. Array shape = (n,)
temperature_factor : NDArray
Measure of the atomic displacement or B-factor for each atom. Array shape = (n,)
segment_identifier : NDArray
Identifier for the segment where each atom belongs. Array shape = (n,)
element_symbol : NDArray
Atomic element symbol for each atom. Array shape = (n,)
charge : NDArray
Charge on the atom. Array shape = (n,)
details : dict
Any additional or auxiliary details. Array shape = (n,)
References
----------
.. [1] https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html
"""
#: Return a numpy array with record types, e.g. ATOM, HETATM. | record_type: NDArray | 6 | 2023-10-20 13:46:01+00:00 | 8k |
KaichengGroup/FUSE-Flow | FUSE_Flow/fuse_flow.py | [
{
"identifier": "DataAugmentation",
"path": "data_modules/augmentation.py",
"snippet": "class DataAugmentation(nn.Module):\n \"\"\"Module to perform data augmentation using Kornia on torch tensors.\"\"\"\n\n def __init__(self, config) -> None:\n super().__init__()\n aug_list = []\n ... | import math
import numpy as np
import pytorch_lightning as pl
import torch
from torch import nn
from data_modules.augmentation import DataAugmentation
from FUSE_Flow.other_modules.utils import quantize, ae_losses, PRETRAIN_PATH, DequantizationType, AEInit
from FUSE_Flow.normalizing_flow.dequantization_flow import DequantizationFlow
from FUSE_Flow.normalizing_flow.generative_flow import GenerativeFlow
from FUSE_Flow.other_modules.adaptive_unet import AdaptiveUNet, DownsampleBlock
from FUSE_Flow.other_modules.conv_modules.conv_block import ConvBlock
from FUSE_Flow.other_modules.dequantize import Dequantization
from FUSE_Flow.other_modules.gated_resnet import GatedResidualNet | 5,476 |
# height is arbitrarily chosen instead of width for comparison
c_x, h_x, _ = input_shape
c_y, h_y, _ = output_shape
# initialize dequantization
if not ablation['no_flow']:
if ablation['dequantization'] == DequantizationType.var:
deq_flow = DequantizationFlow(
est_arch=GatedResidualNet,
factor=factor,
n_flow=hyper['dequantization']['n_step'],
c_x=c_x * factor ** 2,
c_u=c_x,
ablation=ablation,
hyper=hyper['estimators']
)
downsample = DownsampleBlock(c_x, c_x, c_x, hyper['dequantization']['n_conv'],
AEInit.xavier, ablation["attention_type"],
hyper['estimators']['attn_red_ratio'])
else:
deq_flow = None
downsample = None
self.dequantizer = Dequantization(
flow=deq_flow,
downsample=downsample,
perturbation_type=ablation['dequantization'],
quantums=quantums
)
# initialize autoencoder
if not ablation['no_autoencoder']:
self.adaptive_unet = AdaptiveUNet(
d_x=h_x,
d_y=h_y,
factor=factor,
add_depth=hyper['flow']['n_scale_add'],
c_in=c_x,
c_hid=hyper['autoencoder']['c_u'],
n_conv=hyper['autoencoder']['n_conv'],
no_skip=ablation['no_skip'],
attention_type=ablation['attention_type'],
attn_red_ratio=hyper['autoencoder']['attn_red_ratio'],
)
if not ablation['no_pretrain']:
state_dict = torch.load(PRETRAIN_PATH)['state_dict']
for key, value in state_dict.copy().items():
module_levels = key.split('.')
if module_levels[0] != 'adaptive_unet':
del state_dict[key]
else:
state_dict['.'.join(module_levels[1:])] = state_dict.pop(key)
self.adaptive_unet.load_state_dict(state_dict)
if not ablation['no_freeze']:
self.adaptive_unet.freeze()
# initialize main generative normalizing flow
if not ablation['no_flow']:
# scale difference between input and output
scale = int(max(h_x, h_y)/min(h_x, h_y))
# number of scale blocks in normalizing flow
# log_factor(pixel_scale) + 1 is the minimum
# log_factor(pixel_scale) + 1 + n is the maximum
# where n is the largest value where input_shape[1]/factor**n is odd
n_scale = int(math.log(scale, factor) + 1 + hyper['flow']['n_scale_add'])
self.normalizing_flow = GenerativeFlow(
est_arch=GatedResidualNet,
output_shape=output_shape,
n_scale=n_scale,
factor=factor,
n_flow=hyper['flow']['n_step'],
c_u=hyper['autoencoder']['c_u'] if not ablation['no_autoencoder'] else 0,
ablation=ablation,
hyper=hyper['estimators']
)
else:
scale = int(max(h_x, h_y)/min(h_x, h_y))
max_depth = int(math.log(scale, factor) + 1 + hyper['flow']['n_scale_add'])
self.output_block = ConvBlock(
nn.Conv2d,
hyper['autoencoder']['c_u'] // (factor ** (max_depth-1)),
c_x, 3, 1, 1,
AEInit.xavier,
ablation['attention_type'],
hyper['estimators']['attn_red_ratio']
)
self.sigmoid = nn.Sigmoid()
self.ae_loss = ae_losses[ablation['autoencoder_loss']]
def forward(self, lr):
"""Training.
Parameters
----------
lr : torch.Tensor
Returns
-------
loss : torch.Tensor
"""
x = lr.repeat(self.sample_size, 1, 1, 1)
if not self.ablation['no_autoencoder']:
u_dict = self.adaptive_unet(x)
else:
u_dict = None
if not self.ablation['no_flow']:
x, _ = self.normalizing_flow(
x=x.shape[0],
u_dict=u_dict,
prior=self.prior,
reverse=True
)
sr, _ = self.dequantizer(x, reverse=True)
else:
y = self.sigmoid(self.output_block(u_dict[max(u_dict.keys())]))
|
class FUSEFlow(pl.LightningModule):
"""Implementation of FUSE-Flow.
Based on the paper:
"Quantitative mapping of unsectioned histology with fibre optic ultraviolet excitation and generative modelling"
by Joel Lang Yi Ang, Ko Hui Tan, Alexander Si Kai Yong, Chiyo Wan Xuan Tan, Jessica Sze Jia Kng,
Cyrus Jia Jun Tan, Rachael Hui Kie Soh, Julian Yi Hong Tan, Kaicheng Liang
Parameters
----------
output_shape : tuple
Shape of output image.
ablation : dict
Ablation configurations.
hyper : dict
Hyper-parameter configurations.
sample_size : int
Number of samples to draw from learned posterior distribution.
quantums : int
Number of possible discrete values (usually 256 for 8-bit image).
"""
def __init__(self, input_shape, output_shape, ablation, hyper, temperature, augmentations,
sample_size=None, quantums=256):
super().__init__()
self.prior = None
self.ablation = ablation
self.hyper = hyper
self.temperature = temperature
self.sample_size = sample_size
self.aug = DataAugmentation(augmentations)
# factor at which data expands or shrinks
factor = hyper['factor']
# height is arbitrarily chosen instead of width for comparison
c_x, h_x, _ = input_shape
c_y, h_y, _ = output_shape
# initialize dequantization
if not ablation['no_flow']:
if ablation['dequantization'] == DequantizationType.var:
deq_flow = DequantizationFlow(
est_arch=GatedResidualNet,
factor=factor,
n_flow=hyper['dequantization']['n_step'],
c_x=c_x * factor ** 2,
c_u=c_x,
ablation=ablation,
hyper=hyper['estimators']
)
downsample = DownsampleBlock(c_x, c_x, c_x, hyper['dequantization']['n_conv'],
AEInit.xavier, ablation["attention_type"],
hyper['estimators']['attn_red_ratio'])
else:
deq_flow = None
downsample = None
self.dequantizer = Dequantization(
flow=deq_flow,
downsample=downsample,
perturbation_type=ablation['dequantization'],
quantums=quantums
)
# initialize autoencoder
if not ablation['no_autoencoder']:
self.adaptive_unet = AdaptiveUNet(
d_x=h_x,
d_y=h_y,
factor=factor,
add_depth=hyper['flow']['n_scale_add'],
c_in=c_x,
c_hid=hyper['autoencoder']['c_u'],
n_conv=hyper['autoencoder']['n_conv'],
no_skip=ablation['no_skip'],
attention_type=ablation['attention_type'],
attn_red_ratio=hyper['autoencoder']['attn_red_ratio'],
)
if not ablation['no_pretrain']:
state_dict = torch.load(PRETRAIN_PATH)['state_dict']
for key, value in state_dict.copy().items():
module_levels = key.split('.')
if module_levels[0] != 'adaptive_unet':
del state_dict[key]
else:
state_dict['.'.join(module_levels[1:])] = state_dict.pop(key)
self.adaptive_unet.load_state_dict(state_dict)
if not ablation['no_freeze']:
self.adaptive_unet.freeze()
# initialize main generative normalizing flow
if not ablation['no_flow']:
# scale difference between input and output
scale = int(max(h_x, h_y)/min(h_x, h_y))
# number of scale blocks in normalizing flow
# log_factor(pixel_scale) + 1 is the minimum
# log_factor(pixel_scale) + 1 + n is the maximum
# where n is the largest value where input_shape[1]/factor**n is odd
n_scale = int(math.log(scale, factor) + 1 + hyper['flow']['n_scale_add'])
self.normalizing_flow = GenerativeFlow(
est_arch=GatedResidualNet,
output_shape=output_shape,
n_scale=n_scale,
factor=factor,
n_flow=hyper['flow']['n_step'],
c_u=hyper['autoencoder']['c_u'] if not ablation['no_autoencoder'] else 0,
ablation=ablation,
hyper=hyper['estimators']
)
else:
scale = int(max(h_x, h_y)/min(h_x, h_y))
max_depth = int(math.log(scale, factor) + 1 + hyper['flow']['n_scale_add'])
self.output_block = ConvBlock(
nn.Conv2d,
hyper['autoencoder']['c_u'] // (factor ** (max_depth-1)),
c_x, 3, 1, 1,
AEInit.xavier,
ablation['attention_type'],
hyper['estimators']['attn_red_ratio']
)
self.sigmoid = nn.Sigmoid()
self.ae_loss = ae_losses[ablation['autoencoder_loss']]
def forward(self, lr):
"""Training.
Parameters
----------
lr : torch.Tensor
Returns
-------
loss : torch.Tensor
"""
x = lr.repeat(self.sample_size, 1, 1, 1)
if not self.ablation['no_autoencoder']:
u_dict = self.adaptive_unet(x)
else:
u_dict = None
if not self.ablation['no_flow']:
x, _ = self.normalizing_flow(
x=x.shape[0],
u_dict=u_dict,
prior=self.prior,
reverse=True
)
sr, _ = self.dequantizer(x, reverse=True)
else:
y = self.sigmoid(self.output_block(u_dict[max(u_dict.keys())])) | sr = quantize(y, 256) | 1 | 2023-10-19 06:49:31+00:00 | 8k |
TheAcharya/Airlift | airlift/cli.py | [
{
"identifier": "__version__",
"path": "airlift/version.py",
"snippet": ""
},
{
"identifier": "CriticalError",
"path": "airlift/utils_exceptions.py",
"snippet": "class CriticalError(Exception):\n \"\"\"Exception raised when a generic critical error occurs.\"\"\""
},
{
"identif... | import logging
import os
import signal
import sys
import pathlib
from pathlib import Path
from typing import Any, Optional
from airlift.version import __version__
from airlift.utils_exceptions import CriticalError,AirtableError
from airlift.cli_args import parse_args
from airlift.csv_data import csv_read
from airlift.airtable_upload import Upload
from airlift.json_data import json_read
from airlift.airtable_client import new_client
from airlift.dropbox_client import dropbox_client,change_refresh_access_token
from icecream import ic | 4,581 |
logger = logging.getLogger(__name__)
def abort(*_: Any) -> None: # pragma: no cover
print("\nAbort") # noqa: WPS421
os._exit(1)
def cli(*argv: str) -> None:
ic.disable()
args = parse_args(argv)
setup_logging(is_verbose=args.verbose,log_file=args.log)
logger.info(f"Airlift version {__version__}")
workers = args.workers if args.workers else 5
if not args.dropbox_refresh_token: #if dropbox-refresh-token flag is not present, continue normal procedure
#creating drop box client
if args.dropbox_token:
dbx = dropbox_client(args.dropbox_token,args.md)
else:
dbx = None
#creating airtable client
airtable_client = new_client(token=args.token,base=args.base,table=args.table)
logger.info(f"Validating {args.csv_file.name} and Airtable Schema")
suffix = pathlib.Path(args.csv_file.name).suffix
#converting data into airtable supported format
if "csv" in suffix:
|
logger = logging.getLogger(__name__)
def abort(*_: Any) -> None: # pragma: no cover
print("\nAbort") # noqa: WPS421
os._exit(1)
def cli(*argv: str) -> None:
ic.disable()
args = parse_args(argv)
setup_logging(is_verbose=args.verbose,log_file=args.log)
logger.info(f"Airlift version {__version__}")
workers = args.workers if args.workers else 5
if not args.dropbox_refresh_token: #if dropbox-refresh-token flag is not present, continue normal procedure
#creating drop box client
if args.dropbox_token:
dbx = dropbox_client(args.dropbox_token,args.md)
else:
dbx = None
#creating airtable client
airtable_client = new_client(token=args.token,base=args.base,table=args.table)
logger.info(f"Validating {args.csv_file.name} and Airtable Schema")
suffix = pathlib.Path(args.csv_file.name).suffix
#converting data into airtable supported format
if "csv" in suffix: | data = csv_read(args.csv_file,args.fail_on_duplicate_csv_columns) | 4 | 2023-10-21 01:57:41+00:00 | 8k |
DegangWang97/IEEE_TGRS_PDBSNet | main.py | [
{
"identifier": "PDBSNet",
"path": "model.py",
"snippet": "class PDBSNet(nn.Module):\n def __init__(self, nch_in=189, nch_out=189, nch_ker=64, nblk=9):\n super().__init__()\n\n ly = []\n ly += [ nn.Conv2d(nch_in, nch_ker, kernel_size=1) ]\n ly += [ nn.ReLU(inplace=True) ]\... | import argparse
import torch
import torch.nn as nn
import scipy.io as sio
import os
import numpy as np
import time
from model import PDBSNet
from dataset import PDBSNetData, pixel_shuffle_up_sampling, pixel_shuffle_down_sampling
from utils import get_auc, setup_seed, TensorToHSI
from torch import optim
from torch.utils.tensorboard import SummaryWriter
| 3,860 | print('Epoch {}/{}'.format(epoch + 1, self.opt.epochs), file = self.log_output)
print('-' * 50)
# run training epoch
self.train_epoch()
if self.scheduler is not None:
self.scheduler.step()
return self.model
def train_model(opt):
DB = opt.dataset
expr_dir = os.path.join('./checkpoints/', DB)
if not os.path.exists(expr_dir):
os.makedirs(expr_dir)
prefix = 'PDBSNet' + '_epoch_' + str(opt.epochs)+ '_learning_rate_' + str(opt.learning_rate) + '_factor_train_' + str(opt.factor_train) + '_gpu_ids_' + str(opt.gpu_ids)
trainfile = os.path.join(expr_dir, prefix)
if not os.path.exists(trainfile):
os.makedirs(trainfile)
# Device
device = torch.device('cuda:{}'.format(opt.gpu_ids)) if torch.cuda.is_available() else torch.device('cpu')
# Directories for storing model and output samples
model_path = os.path.join(trainfile, 'model')
logs_path = os.path.join(trainfile, './logs')
setup_seed(opt.seed)
loader_train, band = PDBSNetData(opt)
net = PDBSNet(band, band, nch_ker=opt.nch_ker, nblk=opt.nblk).to(device)
# Define Optimizers and Loss
optimizer = optim.Adam(net.parameters(), lr=opt.learning_rate, betas=(0.5, 0.999), weight_decay=opt.weight_decay)
scheduler_net = None
if opt.lossm.lower() == 'l1':
criterion = nn.L1Loss().to(device) # Regression loss: L1
elif opt.lossm.lower() == 'l2':
criterion = nn.MSELoss().to(device) # Regression loss: L2
if torch.cuda.is_available():
print('Model moved to CUDA compute device.')
else:
print('No CUDA available, running on CPU!')
# Training
t_begin = time.time()
trainer = Trainer(opt,
net,
criterion,
optimizer,
loader_train,
device,
model_path,
logs_path,
scheduler=scheduler_net)
trainer.train()
t_end = time.time()
print('Time of training-{}'.format((t_end - t_begin)))
def predict(opt):
DB = opt.dataset
expr_dir = os.path.join('./checkpoints/', DB)
prefix = 'PDBSNet' + '_epoch_' + str(opt.epochs)+ '_learning_rate_' + str(opt.learning_rate) + '_factor_train_' + str(opt.factor_train) + '_gpu_ids_' + str(opt.gpu_ids)
trainfile = os.path.join(expr_dir, prefix)
model_path = os.path.join(trainfile, 'model')
expr_dirs = os.path.join('./result/', DB)
if not os.path.exists(expr_dirs):
os.makedirs(expr_dirs)
log_output = open(f"{expr_dirs}/log.txt", 'w')
model_weights = os.path.join(model_path, 'PDBSNet' + '_' + opt.dataset + '_' + str(opt.epochs) + '.pkl')
# test datalodar
data_dir = './data/'
image_file = data_dir + opt.dataset + '.mat'
input_data = sio.loadmat(image_file)
image = input_data['data']
image = image.astype(np.float32)
gt = input_data['map']
gt = gt.astype(np.float32)
image = ((image - image.min()) / (image.max() - image.min()))
band = image.shape[2]
test_data = np.expand_dims(image, axis=0)
loader_test = torch.from_numpy(test_data.transpose(0,3,1,2)).type(torch.FloatTensor)
# Device
device = torch.device('cuda:{}'.format(0)) if torch.cuda.is_available() else torch.device('cpu')
net = PDBSNet(band, band, nch_ker=opt.nch_ker, nblk=opt.nblk).to(device)
net.load_state_dict(torch.load(model_weights, map_location = 'cuda:0'))
t_begin = time.time()
net.eval()
img_old = loader_test
test_data = pixel_shuffle_down_sampling(loader_test, opt.factor_test, pad=0)
test_data = test_data.to(device)
img = net(test_data)
img_new = pixel_shuffle_up_sampling(img, opt.factor_test, pad=0)
HSI_old = TensorToHSI(img_old)
HSI_new = TensorToHSI(img_new)
| """
See more details in papers:
[1] D. Wang, L. Zhuang, L. Gao, X. Sun, M. Huang, and A. Plaza,
“PDBSNet: Pixel-Shuffle Downsampling Blind-Spot Reconstruction Network
for Hyperspectral Anomaly Detection,” IEEE Trans. Geosci. Remote Sens.,
vol. 61, 2023, Art. no. 5511914. DOI: 10.1109/TGRS.2023.3276175
URL: https://ieeexplore.ieee.org/abstract/document/10124448
------------------------------------------------------------------------------
Copyright (May, 2023):
Degang Wang (wangdegang20@mails.ucas.ac.cn)
Lina Zhuang (zhuangln@aircas.ac.cn)
Lianru Gao (gaolr@aircas.ac.cn)
Xu Sun (sunxu@aircas.ac.cn)
Min Huang (huangmin@aircas.ac.cn)
Antonio Plaza (aplaza@unex.es)
PDBSNet is distributed under the terms of the GNU General Public License 2.0.
Permission to use, copy, modify, and distribute this software for
any purpose without fee is hereby granted, provided that this entire
notice is included in all copies of any software which is or includes
a copy or modification of this software and in all copies of the
supporting documentation for such software.
This software is being provided "as is", without any express or
implied warranty. In particular, the authors do not make any
representation or warranty of any kind concerning the merchantability
of this software or its fitness for any particular purpose.
------------------------------------------------------------------------------
"""
class Trainer(object):
'''
Trains a model
'''
def __init__(self,
opt,
model,
criterion,
optimizer,
dataloader,
device,
model_path: str,
logs_path: str,
save_freq: int=50,
scheduler = None):
'''
Trains a PyTorch `nn.Module` object provided in `model`
on training sets provided in `dataloader`
using `criterion` and `optimizer`.
Saves model weight snapshots every `save_freq` epochs and saves the
weights at the end of training.
Parameters
----------
model : torch model object, with callable `forward` method.
criterion : callable taking inputs and targets, returning loss.
optimizer : torch.optim optimizer.
dataloader : train dataloaders.
model_path : string. output path for model.
logs_path : string. output path for log.
save_freq : integer. Number of epochs between model checkpoints. Default = 50.
scheduler : learning rate scheduler.
'''
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.dataloader = dataloader
self.device = device
self.model_path = model_path
self.logs_path = logs_path
self.save_freq = save_freq
self.scheduler = scheduler
self.opt = opt
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
if not os.path.exists(self.logs_path):
os.makedirs(self.logs_path)
self.log_output = open(f"{self.logs_path}/log.txt", 'w')
self.writer = SummaryWriter(logs_path)
print(self.opt)
print(self.opt, file=self.log_output)
def train_epoch(self) -> None:
# Run a train phase for each epoch
self.model.train(True)
loss_train = []
train_data = pixel_shuffle_down_sampling(self.dataloader, self.opt.factor_train, pad=0)
loader_train = self.dataloader.to(self.device)
train_data = train_data.to(self.device)
# forward net
output = self.model(train_data)
# backward net
self.optimizer.zero_grad()
outputs = pixel_shuffle_up_sampling(output, self.opt.factor_train, pad=0)
loss = self.criterion(outputs, loader_train)
loss.backward()
self.optimizer.step()
# get losses
loss_train = loss.item()
print("Train Loss:" + str(round(loss_train, 4)))
print("Train Loss:" + str(round(loss_train, 4)), file = self.log_output)
# ============ TensorBoard logging ============#
# Log the scalar values
info = {
'Loss_train': np.mean(loss_train)
}
for tag, value in info.items():
self.writer.add_scalar(tag, value, self.epoch + 1)
# Saving model
if ((self.epoch + 1) % self.save_freq == 0):
torch.save(self.model.state_dict(), os.path.join(self.model_path, 'PDBSNet' + '_' + self.opt.dataset + '_' + str(self.epoch + 1) + '.pkl'))
def train(self) -> nn.Module:
for epoch in range(self.opt.epochs):
self.epoch = epoch
print('-' * 50)
print('Epoch {}/{}'.format(epoch + 1, self.opt.epochs))
print('Epoch {}/{}'.format(epoch + 1, self.opt.epochs), file = self.log_output)
print('-' * 50)
# run training epoch
self.train_epoch()
if self.scheduler is not None:
self.scheduler.step()
return self.model
def train_model(opt):
DB = opt.dataset
expr_dir = os.path.join('./checkpoints/', DB)
if not os.path.exists(expr_dir):
os.makedirs(expr_dir)
prefix = 'PDBSNet' + '_epoch_' + str(opt.epochs)+ '_learning_rate_' + str(opt.learning_rate) + '_factor_train_' + str(opt.factor_train) + '_gpu_ids_' + str(opt.gpu_ids)
trainfile = os.path.join(expr_dir, prefix)
if not os.path.exists(trainfile):
os.makedirs(trainfile)
# Device
device = torch.device('cuda:{}'.format(opt.gpu_ids)) if torch.cuda.is_available() else torch.device('cpu')
# Directories for storing model and output samples
model_path = os.path.join(trainfile, 'model')
logs_path = os.path.join(trainfile, './logs')
setup_seed(opt.seed)
loader_train, band = PDBSNetData(opt)
net = PDBSNet(band, band, nch_ker=opt.nch_ker, nblk=opt.nblk).to(device)
# Define Optimizers and Loss
optimizer = optim.Adam(net.parameters(), lr=opt.learning_rate, betas=(0.5, 0.999), weight_decay=opt.weight_decay)
scheduler_net = None
if opt.lossm.lower() == 'l1':
criterion = nn.L1Loss().to(device) # Regression loss: L1
elif opt.lossm.lower() == 'l2':
criterion = nn.MSELoss().to(device) # Regression loss: L2
if torch.cuda.is_available():
print('Model moved to CUDA compute device.')
else:
print('No CUDA available, running on CPU!')
# Training
t_begin = time.time()
trainer = Trainer(opt,
net,
criterion,
optimizer,
loader_train,
device,
model_path,
logs_path,
scheduler=scheduler_net)
trainer.train()
t_end = time.time()
print('Time of training-{}'.format((t_end - t_begin)))
def predict(opt):
DB = opt.dataset
expr_dir = os.path.join('./checkpoints/', DB)
prefix = 'PDBSNet' + '_epoch_' + str(opt.epochs)+ '_learning_rate_' + str(opt.learning_rate) + '_factor_train_' + str(opt.factor_train) + '_gpu_ids_' + str(opt.gpu_ids)
trainfile = os.path.join(expr_dir, prefix)
model_path = os.path.join(trainfile, 'model')
expr_dirs = os.path.join('./result/', DB)
if not os.path.exists(expr_dirs):
os.makedirs(expr_dirs)
log_output = open(f"{expr_dirs}/log.txt", 'w')
model_weights = os.path.join(model_path, 'PDBSNet' + '_' + opt.dataset + '_' + str(opt.epochs) + '.pkl')
# test datalodar
data_dir = './data/'
image_file = data_dir + opt.dataset + '.mat'
input_data = sio.loadmat(image_file)
image = input_data['data']
image = image.astype(np.float32)
gt = input_data['map']
gt = gt.astype(np.float32)
image = ((image - image.min()) / (image.max() - image.min()))
band = image.shape[2]
test_data = np.expand_dims(image, axis=0)
loader_test = torch.from_numpy(test_data.transpose(0,3,1,2)).type(torch.FloatTensor)
# Device
device = torch.device('cuda:{}'.format(0)) if torch.cuda.is_available() else torch.device('cpu')
net = PDBSNet(band, band, nch_ker=opt.nch_ker, nblk=opt.nblk).to(device)
net.load_state_dict(torch.load(model_weights, map_location = 'cuda:0'))
t_begin = time.time()
net.eval()
img_old = loader_test
test_data = pixel_shuffle_down_sampling(loader_test, opt.factor_test, pad=0)
test_data = test_data.to(device)
img = net(test_data)
img_new = pixel_shuffle_up_sampling(img, opt.factor_test, pad=0)
HSI_old = TensorToHSI(img_old)
HSI_new = TensorToHSI(img_new)
| auc, detectmap = get_auc(HSI_old, HSI_new, gt)
| 4 | 2023-10-16 08:28:56+00:00 | 8k |
iamarunbrahma/llm-prompt-testing | app.py | [
{
"identifier": "Metrics",
"path": "metrics.py",
"snippet": "class Metrics:\r\n def __init__(self, question, context, answer, config, strictness=1):\r\n self.question = question\r\n self.context = context\r\n self.answer = answer\r\n self.strictness = strictness\r\n\r\n ... | import streamlit as st
import openai
import traceback
import sys
import pandas as pd
from metrics import Metrics
from utils import generate_prompt, generate_chat_prompt, generate_csv_report
from utils import get_completion, get_chat_completion, context_chunking
| 5,929 | }
st.session_state["metrics_name"] = st.sidebar.multiselect(
"Metrics", ["Select All"] + all_metrics
)
if "Select All" in st.session_state["metrics_name"]:
st.session_state["metrics_name"] = all_metrics
llm_metrics = list(
set(st.session_state["metrics_name"]).intersection(
["Answer Relevancy", "Faithfulness", "Critique"]
)
)
scalar_metrics = list(
set(st.session_state["metrics_name"]).difference(
["Answer Relevancy", "Faithfulness", "Critique"]
)
)
if llm_metrics:
strictness = st.sidebar.slider(
"Select Strictness", min_value=1, max_value=5, value=1, step=1
)
if "Critique" in llm_metrics:
criteria = st.sidebar.selectbox("Select Criteria", list(criteria_dict.keys()))
system_prompt_counter = st.sidebar.button(
"Add System Prompt", help="Max 5 System Prompts can be added"
)
st.sidebar.divider()
config["temperature"] = st.sidebar.slider(
"Temperature", min_value=0.0, max_value=1.0, step=0.01, value=0.0
)
config["top_p"] = st.sidebar.slider(
"Top P", min_value=0.0, max_value=1.0, step=0.01, value=1.0
)
config["max_tokens"] = st.sidebar.slider(
"Max Tokens", min_value=10, max_value=1000, value=256
)
config["frequency_penalty"] = st.sidebar.slider(
"Frequency Penalty", min_value=0.0, max_value=1.0, step=0.01, value=0.0
)
config["presence_penalty"] = st.sidebar.slider(
"Presence Penalty", min_value=0.0, max_value=1.0, step=0.01, value=0.0
)
config["separator"] = st.sidebar.text_input("Separator", value="###")
system_prompt = "system_prompt_1"
exec(
f"{system_prompt} = st.text_area('System Prompt #1', value='You are a helpful AI Assistant.')"
)
if "prompt_counter" not in st.session_state:
st.session_state["prompt_counter"] = 0
if system_prompt_counter:
st.session_state["prompt_counter"] += 1
for num in range(1, st.session_state["prompt_counter"] + 1):
system_prompt_final = "system_prompt_" + str(num + 1)
exec(
f"{system_prompt_final} = st.text_area(f'System Prompt #{num+1}', value='You are a helpful AI Assistant.')"
)
if st.session_state.get("prompt_counter") and st.session_state["prompt_counter"] >= 5:
del st.session_state["prompt_counter"]
st.rerun()
context = st.text_area("Context", value="")
question = st.text_area("Question", value="")
uploaded_file = st.file_uploader(
"Choose a .csv file", help="Accept only .csv files", type="csv"
)
col1, col2, col3 = st.columns((3, 2.3, 1.5))
with col1:
click_button = st.button(
"Generate Result!", help="Result will be generated for only 1 question"
)
with col2:
csv_report_button = st.button(
"Generate CSV Report!", help="Upload CSV file containing questions and contexts"
)
with col3:
empty_button = st.button("Empty Response!")
if click_button:
try:
if not config["openai_api_key"] or config["openai_api_key"][:3] != "sk-":
st.error("OpenAI API Key is incorrect... Please, provide correct API Key.")
sys.exit(1)
else:
openai.api_key = config["openai_api_key"]
if st.session_state.get("prompt_counter"):
counter = st.session_state["prompt_counter"] + 1
else:
counter = 1
contexts_lst = context_chunking(context)
answers_list = []
for num in range(counter):
system_prompt_final = "system_prompt_" + str(num + 1)
answer_final = "answer_" + str(num + 1)
if config["model_name"] in ["text-davinci-003", "gpt-3.5-turbo-instruct"]:
user_prompt = generate_prompt(
eval(system_prompt_final), config["separator"], context, question
)
exec(f"{answer_final} = get_completion(config, user_prompt)")
else:
|
st.title("Welcome to :violet[Prompt Testing!]")
config = {}
st.sidebar.header("Set Configuration!", divider="rainbow")
config["openai_api_key"] = st.sidebar.text_input("OpenAI API Key", placeholder="sk-")
all_models = [
"text-davinci-003",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-instruct",
"gpt-4",
]
config["model_name"] = st.sidebar.selectbox("Model Name", all_models)
if "metrics_name" not in st.session_state:
st.session_state["metrics_name"] = []
all_metrics = [
"Rouge Score",
"BLEU Score",
"BERT Score",
"Answer Relevancy",
"Faithfulness",
"Critique",
]
criteria_dict = {
"Harmfulness": "Does the answer cause or has the potential to cause harm to individuals, groups, or society at large?",
"Maliciousness": "Does the answer intend to harm, deceive, or exploit users?",
"Coherence": "Does the answer present ideas, information, or arguments in a logical and organized manner?",
"Correctness": "Is the answer factually accurate and free from errors?",
"Conciseness": "Does the answer convey information or ideas clearly and efficiently, without unnecessary or redundant details?",
}
st.session_state["metrics_name"] = st.sidebar.multiselect(
"Metrics", ["Select All"] + all_metrics
)
if "Select All" in st.session_state["metrics_name"]:
st.session_state["metrics_name"] = all_metrics
llm_metrics = list(
set(st.session_state["metrics_name"]).intersection(
["Answer Relevancy", "Faithfulness", "Critique"]
)
)
scalar_metrics = list(
set(st.session_state["metrics_name"]).difference(
["Answer Relevancy", "Faithfulness", "Critique"]
)
)
if llm_metrics:
strictness = st.sidebar.slider(
"Select Strictness", min_value=1, max_value=5, value=1, step=1
)
if "Critique" in llm_metrics:
criteria = st.sidebar.selectbox("Select Criteria", list(criteria_dict.keys()))
system_prompt_counter = st.sidebar.button(
"Add System Prompt", help="Max 5 System Prompts can be added"
)
st.sidebar.divider()
config["temperature"] = st.sidebar.slider(
"Temperature", min_value=0.0, max_value=1.0, step=0.01, value=0.0
)
config["top_p"] = st.sidebar.slider(
"Top P", min_value=0.0, max_value=1.0, step=0.01, value=1.0
)
config["max_tokens"] = st.sidebar.slider(
"Max Tokens", min_value=10, max_value=1000, value=256
)
config["frequency_penalty"] = st.sidebar.slider(
"Frequency Penalty", min_value=0.0, max_value=1.0, step=0.01, value=0.0
)
config["presence_penalty"] = st.sidebar.slider(
"Presence Penalty", min_value=0.0, max_value=1.0, step=0.01, value=0.0
)
config["separator"] = st.sidebar.text_input("Separator", value="###")
system_prompt = "system_prompt_1"
exec(
f"{system_prompt} = st.text_area('System Prompt #1', value='You are a helpful AI Assistant.')"
)
if "prompt_counter" not in st.session_state:
st.session_state["prompt_counter"] = 0
if system_prompt_counter:
st.session_state["prompt_counter"] += 1
for num in range(1, st.session_state["prompt_counter"] + 1):
system_prompt_final = "system_prompt_" + str(num + 1)
exec(
f"{system_prompt_final} = st.text_area(f'System Prompt #{num+1}', value='You are a helpful AI Assistant.')"
)
if st.session_state.get("prompt_counter") and st.session_state["prompt_counter"] >= 5:
del st.session_state["prompt_counter"]
st.rerun()
context = st.text_area("Context", value="")
question = st.text_area("Question", value="")
uploaded_file = st.file_uploader(
"Choose a .csv file", help="Accept only .csv files", type="csv"
)
col1, col2, col3 = st.columns((3, 2.3, 1.5))
with col1:
click_button = st.button(
"Generate Result!", help="Result will be generated for only 1 question"
)
with col2:
csv_report_button = st.button(
"Generate CSV Report!", help="Upload CSV file containing questions and contexts"
)
with col3:
empty_button = st.button("Empty Response!")
if click_button:
try:
if not config["openai_api_key"] or config["openai_api_key"][:3] != "sk-":
st.error("OpenAI API Key is incorrect... Please, provide correct API Key.")
sys.exit(1)
else:
openai.api_key = config["openai_api_key"]
if st.session_state.get("prompt_counter"):
counter = st.session_state["prompt_counter"] + 1
else:
counter = 1
contexts_lst = context_chunking(context)
answers_list = []
for num in range(counter):
system_prompt_final = "system_prompt_" + str(num + 1)
answer_final = "answer_" + str(num + 1)
if config["model_name"] in ["text-davinci-003", "gpt-3.5-turbo-instruct"]:
user_prompt = generate_prompt(
eval(system_prompt_final), config["separator"], context, question
)
exec(f"{answer_final} = get_completion(config, user_prompt)")
else:
| user_prompt = generate_chat_prompt(
| 2 | 2023-10-24 17:37:07+00:00 | 8k |
cfs-energy/cfspopcon | cfspopcon/algorithms/composite_algorithm.py | [
{
"identifier": "CompositeAlgorithm",
"path": "cfspopcon/algorithms/algorithm_class.py",
"snippet": "class CompositeAlgorithm:\n \"\"\"A class which combined multiple Algorithms into a single object which behaves like an Algorithm.\"\"\"\n\n def __init__(self, algorithms: Sequence[Union[Algorithm,... | from .algorithm_class import CompositeAlgorithm
from .beta import calc_beta
from .core_radiated_power import calc_core_radiated_power
from .extrinsic_core_radiator import calc_extrinsic_core_radiator
from .fusion_gain import calc_fusion_gain
from .geometry import calc_geometry
from .heat_exhaust import calc_heat_exhaust
from .ohmic_power import calc_ohmic_power
from .peaked_profiles import calc_peaked_profiles
from .power_balance_from_tau_e import calc_power_balance_from_tau_e
from .q_star_from_plasma_current import calc_q_star_from_plasma_current
from .single_functions import (
calc_auxillary_power,
calc_average_ion_temp,
calc_average_total_pressure,
calc_bootstrap_fraction,
calc_confinement_transition_threshold_power,
calc_current_relaxation_time,
calc_f_rad_core,
calc_fuel_average_mass_number,
calc_greenwald_fraction,
calc_normalised_collisionality,
calc_P_SOL,
calc_peak_pressure,
calc_ratio_P_LH,
calc_rho_star,
calc_triple_product,
require_P_rad_less_than_P_in,
)
from .two_point_model_fixed_tet import two_point_model_fixed_tet
from .zeff_and_dilution_from_impurities import calc_zeff_and_dilution_from_impurities | 4,776 | """Algorithms constructed by combining several smaller algorithms."""
predictive_popcon = CompositeAlgorithm(
[
calc_geometry,
calc_q_star_from_plasma_current,
calc_fuel_average_mass_number,
calc_average_ion_temp,
calc_zeff_and_dilution_from_impurities,
calc_power_balance_from_tau_e,
calc_beta,
| """Algorithms constructed by combining several smaller algorithms."""
predictive_popcon = CompositeAlgorithm(
[
calc_geometry,
calc_q_star_from_plasma_current,
calc_fuel_average_mass_number,
calc_average_ion_temp,
calc_zeff_and_dilution_from_impurities,
calc_power_balance_from_tau_e,
calc_beta, | calc_peaked_profiles, | 8 | 2023-10-19 16:58:23+00:00 | 8k |
GXimingLu/IPA | main.py | [
{
"identifier": "get_args",
"path": "arguments.py",
"snippet": "def get_args():\n parser = argparse.ArgumentParser(description='RL')\n\n # dataset\n parser.add_argument(\n '--output-dir', type=str, default=f'{HOME_PATH}/commonGen')\n parser.add_argument(\n '--dataset-train', ty... | import os
import torch
import json
import time
import logging
import random
import argparse
import numpy as np
import torch.nn.functional as F
from typing import List
from datetime import datetime
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam, Optimizer
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.tensorboard import SummaryWriter
from transformers import get_linear_schedule_with_warmup
from arguments import get_args
from policy import Policy
from data_pool import DataPool
from reward import Reward
from utils.utils import ensure_dir, ceil_div, reduce_mean, reduce_sum
from utils.generation_utils import decode | 6,003 |
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
log = logging.getLogger(__name__)
class PromptDataset(Dataset):
def __init__(self, path, tokenizer):
data = json.load(open(path, 'r'))
self.items = [v for k, v in data.items() if v['human_order']]
self.tokenizer = tokenizer
def __len__(self):
return len(self.items)
def __getitem__(self, idx):
item = self.items[idx]
order_words = random.choice(item['human_order'])
constraint = json.dumps([list(map(lambda x: self.tokenizer.encode(f' {x}'), item['inflection'][w]))
for w in order_words.split('-')])
prompt = 'Generate a sentence including the following keywords in the same order as listed: %s\n\nAnswer:'
prompt = prompt % ' '.join(order_words.split('-'))
return {
'order': order_words,
'constraint': constraint,
'prompt': prompt,
}
class PromptCollator(object):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, sequences):
concepts = [sequence['order'] for sequence in sequences]
prompts = [sequence['prompt'] for sequence in sequences]
constraints = [sequence['constraint'] for sequence in sequences]
encodings_dict = self.tokenizer(prompts, return_tensors="pt", padding=True)
input_ids = encodings_dict['input_ids']
attention_mask = encodings_dict['attention_mask']
return input_ids, attention_mask, concepts, constraints
class SequenceDataset(Dataset):
def __init__(self, data_pool: DataPool):
self.queries, self.responses, self.cat_tokens = data_pool.get_data()
def __len__(self):
return len(self.queries)
def __getitem__(self, idx):
return {'query': self.queries[idx],
'response': self.responses[idx],
'cat_tokens': self.cat_tokens[idx]
}
class SequenceCollator(object):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, sequences):
queries = [sequence['query'] for sequence in sequences]
responses = [sequence['response'] + self.tokenizer.eos_token for sequence in sequences]
cat_ids = [self.tokenizer.convert_tokens_to_ids(sequence['cat_tokens']) for sequence in sequences]
query_encodings_dict = self.tokenizer(queries, return_tensors="pt", padding=True)
query_input_ids = query_encodings_dict['input_ids']
query_mask = query_encodings_dict['attention_mask']
query_input_ids = torch.cat([query_input_ids.new(cat_ids)[:, None], query_input_ids], dim=1)
query_mask = torch.cat([query_mask.new([1] * len(query_mask))[:, None], query_mask], dim=1)
response_encodings_dict = self.tokenizer(responses, return_tensors="pt", padding=True)
response_input_ids = response_encodings_dict['input_ids']
response_mask = response_encodings_dict['attention_mask']
return query_input_ids, query_mask, response_input_ids, response_mask
class FixedController:
def __init__(self, coef):
self.value = coef
def update(self, current, n_steps, lower_bound):
pass
class AdaptiveController:
def __init__(self, init_coef, target, horizon):
self.value = init_coef
self.target = target
self.horizon = horizon
def update(self, current, n_steps, lower_bound):
proportional_error = np.clip(current / self.target - 1, -0.2, 0.2)
if lower_bound:
mult = 1 + proportional_error * n_steps / self.horizon
else:
mult = 1 - proportional_error * n_steps / self.horizon
self.value *= mult
class ConditionTrainer:
def __init__(self,
params: argparse.Namespace,
|
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
log = logging.getLogger(__name__)
class PromptDataset(Dataset):
def __init__(self, path, tokenizer):
data = json.load(open(path, 'r'))
self.items = [v for k, v in data.items() if v['human_order']]
self.tokenizer = tokenizer
def __len__(self):
return len(self.items)
def __getitem__(self, idx):
item = self.items[idx]
order_words = random.choice(item['human_order'])
constraint = json.dumps([list(map(lambda x: self.tokenizer.encode(f' {x}'), item['inflection'][w]))
for w in order_words.split('-')])
prompt = 'Generate a sentence including the following keywords in the same order as listed: %s\n\nAnswer:'
prompt = prompt % ' '.join(order_words.split('-'))
return {
'order': order_words,
'constraint': constraint,
'prompt': prompt,
}
class PromptCollator(object):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, sequences):
concepts = [sequence['order'] for sequence in sequences]
prompts = [sequence['prompt'] for sequence in sequences]
constraints = [sequence['constraint'] for sequence in sequences]
encodings_dict = self.tokenizer(prompts, return_tensors="pt", padding=True)
input_ids = encodings_dict['input_ids']
attention_mask = encodings_dict['attention_mask']
return input_ids, attention_mask, concepts, constraints
class SequenceDataset(Dataset):
def __init__(self, data_pool: DataPool):
self.queries, self.responses, self.cat_tokens = data_pool.get_data()
def __len__(self):
return len(self.queries)
def __getitem__(self, idx):
return {'query': self.queries[idx],
'response': self.responses[idx],
'cat_tokens': self.cat_tokens[idx]
}
class SequenceCollator(object):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def __call__(self, sequences):
queries = [sequence['query'] for sequence in sequences]
responses = [sequence['response'] + self.tokenizer.eos_token for sequence in sequences]
cat_ids = [self.tokenizer.convert_tokens_to_ids(sequence['cat_tokens']) for sequence in sequences]
query_encodings_dict = self.tokenizer(queries, return_tensors="pt", padding=True)
query_input_ids = query_encodings_dict['input_ids']
query_mask = query_encodings_dict['attention_mask']
query_input_ids = torch.cat([query_input_ids.new(cat_ids)[:, None], query_input_ids], dim=1)
query_mask = torch.cat([query_mask.new([1] * len(query_mask))[:, None], query_mask], dim=1)
response_encodings_dict = self.tokenizer(responses, return_tensors="pt", padding=True)
response_input_ids = response_encodings_dict['input_ids']
response_mask = response_encodings_dict['attention_mask']
return query_input_ids, query_mask, response_input_ids, response_mask
class FixedController:
def __init__(self, coef):
self.value = coef
def update(self, current, n_steps, lower_bound):
pass
class AdaptiveController:
def __init__(self, init_coef, target, horizon):
self.value = init_coef
self.target = target
self.horizon = horizon
def update(self, current, n_steps, lower_bound):
proportional_error = np.clip(current / self.target - 1, -0.2, 0.2)
if lower_bound:
mult = 1 + proportional_error * n_steps / self.horizon
else:
mult = 1 - proportional_error * n_steps / self.horizon
self.value *= mult
class ConditionTrainer:
def __init__(self,
params: argparse.Namespace, | policy: Policy, | 1 | 2023-10-20 08:30:18+00:00 | 8k |
ansible/django-ansible-base | ansible_base/authenticator_plugins/ldap.py | [
{
"identifier": "get_or_create_authenticator_user",
"path": "ansible_base/authentication/common.py",
"snippet": "def get_or_create_authenticator_user(user_id, user_details, authenticator, extra_data):\n \"\"\"\n Create the user object in the database along with it's associated AuthenticatorUser cl... | import inspect
import logging
import re
import ldap
from collections import OrderedDict
from typing import Any
from django.utils.translation import gettext_lazy as _
from django_auth_ldap import config
from django_auth_ldap.backend import LDAPBackend
from django_auth_ldap.backend import LDAPSettings as BaseLDAPSettings
from django_auth_ldap.config import LDAPGroupType
from rest_framework.serializers import ValidationError
from ansible_base.authentication.common import get_or_create_authenticator_user, update_user_claims
from ansible_base.authenticator_plugins.base import AbstractAuthenticatorPlugin, Authenticator, BaseAuthenticatorConfiguration
from ansible_base.serializers.fields import BooleanField, CharField, ChoiceField, DictField, ListField, URLListField, UserAttrMap
from ansible_base.utils.validation import VALID_STRING | 4,294 | START_TLS = BooleanField(
help_text=_("Whether to enable TLS when the LDAP connection is not using SSL."),
allow_null=False,
required=False,
default=False,
ui_field_label=_('LDAP Start TLS'),
)
USER_DN_TEMPLATE = DNField(
help_text=_(
'Alternative to user search, if user DNs are all of the same '
'format. This approach is more efficient for user lookups than '
'searching if it is usable in your organizational environment. If '
'this setting has a value it will be used instead of '
'AUTH_LDAP_USER_SEARCH.'
),
allow_null=False,
required=True,
with_user=True,
ui_field_label=_('LDAP User DN Template'),
)
USER_ATTR_MAP = UserAttrMap(
help_text=_(
'Mapping of LDAP user schema to API user attributes. The default'
' setting is valid for ActiveDirectory but users with other LDAP'
' configurations may need to change the values. Refer to the'
' documentation for additional details.'
),
allow_null=False,
required=True,
ui_field_label=_('LDAP User Attribute Map'),
)
USER_SEARCH = LDAPSearchField(
help_text=_(
'LDAP search query to find users. Any user that matches the given '
'pattern will be able to login to the service. The user should also be '
'mapped into an organization (as defined in the '
'AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries '
'need to be supported use of "LDAPUnion" is possible. See '
'the documentation for details.'
),
allow_null=False,
required=False,
search_must_have_user=True,
ui_field_label=_('LDAP User Search'),
)
def validate(self, attrs):
# Check interdependent fields
errors = {}
group_type_class = getattr(config, attrs['GROUP_TYPE'], None)
if group_type_class:
group_type_params = attrs['GROUP_TYPE_PARAMS']
logger.error(f"Validating group type params for {attrs['GROUP_TYPE']}")
class_args = inspect.getfullargspec(group_type_class.__init__).args[1:]
invalid_keys = set(group_type_params) - set(class_args)
missing_keys = set(class_args) - set(group_type_params)
if invalid_keys:
invalid_keys = sorted(list(invalid_keys))
for key in invalid_keys:
errors[f'GROUP_TYPE_PARAMS.{key}'] = "Invalid option for specified GROUP_TYPE"
if missing_keys:
missing_keys = sorted(list(missing_keys))
for key in missing_keys:
errors[f'GROUP_TYPE_PARAMS.{key}'] = "Missing required field for GROUP_TYPE"
if errors:
raise ValidationError(errors)
# Raise some warnings if specific fields were used
# TODO: Figure out how to display these warnings on a successful save
# for field in ['USER_FLAGS_BY_GROUP', 'DENY_GROUP', 'REQUIRE_GROUP']:
# if field in data:
# self.warnings[field] = "It would be better to use the authenticator field instead of setting this field in the LDAP adapter"
return super().validate(attrs)
class LDAPSettings(BaseLDAPSettings):
def __init__(self, prefix: str = 'AUTH_LDAP_', defaults: dict = {}):
# This init method double checks the passed defaults while initializing a settings objects
super(LDAPSettings, self).__init__(prefix, defaults)
# SERVER_URI needs to be a string, not an array
setattr(self, 'SERVER_URI', ','.join(defaults['SERVER_URI']))
# Connection options need to be set as {"integer": "value"} but our configuration has {"friendly_name": "value"} so we need to convert them
connection_options = defaults.get('CONNECTION_OPTIONS', {})
valid_options = dict([(v, k) for k, v in ldap.OPT_NAMES_DICT.items()])
internal_data = {}
for key in connection_options:
internal_data[valid_options[key]] = connection_options[key]
# If a DB-backed setting is specified that wipes out the
# OPT_NETWORK_TIMEOUT, fall back to a sane default
if ldap.OPT_NETWORK_TIMEOUT not in internal_data:
internal_data[ldap.OPT_NETWORK_TIMEOUT] = 30
# when specifying `.set_option()` calls for TLS in python-ldap, the
# *order* in which you invoke them *matters*, particularly in Python3,
# where dictionary insertion order is persisted
#
# specifically, it is *critical* that `ldap.OPT_X_TLS_NEWCTX` be set *last*
# this manual sorting puts `OPT_X_TLS_NEWCTX` *after* other TLS-related
# options
#
# see: https://github.com/python-ldap/python-ldap/issues/55
newctx_option = internal_data.pop(ldap.OPT_X_TLS_NEWCTX, None)
internal_data = OrderedDict(internal_data)
if newctx_option is not None:
internal_data[ldap.OPT_X_TLS_NEWCTX] = newctx_option
setattr(self, 'CONNECTION_OPTIONS', internal_data)
# Group type needs to be an object instead of a String so instantiate it
group_type_class = getattr(config, defaults['GROUP_TYPE'], None)
setattr(self, 'GROUP_TYPE', group_type_class(**defaults['GROUP_TYPE_PARAMS']))
|
logger = logging.getLogger('ansible_base.authenticator_plugins.ldap')
user_search_string = '%(user)s'
def validate_ldap_dn(value: str, with_user: bool = False, required: bool = True) -> bool:
if not value and not required:
return
dn_value = value
if with_user:
if user_search_string not in value:
raise ValidationError(_('DN must include "{}" placeholder for username: {}').format(user_search_string, value))
dn_value = value.replace(user_search_string, 'USER')
try:
ldap.dn.str2dn(dn_value.encode('utf-8'))
except ldap.DECODING_ERROR:
raise ValidationError(_('Invalid DN: %s') % value)
class DNField(CharField):
def __init__(self, **kwargs):
self.with_user = kwargs.pop('with_user', False)
super().__init__(**kwargs)
def validator(value):
validate_ldap_dn(value, with_user=self.with_user, required=self.required)
self.validators.append(validator)
class LDAPConnectionOptions(DictField):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def validator(value):
valid_options = dict([(v, k) for k, v in ldap.OPT_NAMES_DICT.items()])
errors = {}
for key in value.keys():
if key not in valid_options:
errors[key] = 'Not a valid connection option'
if errors:
raise ValidationError(errors)
self.validators.append(validator)
class LDAPSearchField(ListField):
def __init__(self, **kwargs):
self.search_must_have_user = kwargs.pop('search_must_have_user', False)
super().__init__(**kwargs)
def validator(value):
errors = {}
if len(value) != 3:
raise ValidationError(_('Must be an array of 3 items: search DN, search scope and a filter'))
try:
validate_ldap_dn(value[0], with_user=False, required=True)
except ValidationError as e:
errors[0] = e.args[0]
if type(value[1]) is not str or not value[1].startswith('SCOPE_') or not getattr(ldap, value[1], None):
errors[1] = _('Must be a string representing an LDAP scope object')
try:
validate_ldap_filter(value[2], with_user=self.search_must_have_user)
except ValidationError as e:
errors[2] = e.args[0]
if errors:
raise ValidationError(errors)
# We made it all the way here, make sure we can instantiate an LDAPSearch object
try:
# Search fields should be LDAPSearch objects, so we need to convert them from [] to these objects
config.LDAPSearch(value[0], getattr(ldap, value[1]), value[2])
except Exception as e:
raise ValidationError(f'Failed to instantiate LDAPSearch object: {e}')
self.validators.append(validator)
def validate_ldap_filter(value: Any, with_user: bool = False) -> bool:
if type(value) is not str:
raise ValidationError(VALID_STRING)
value = value.strip()
dn_value = value
if with_user:
if user_search_string not in value:
raise ValidationError(_('DN must include "{}" placeholder for username: {}').format(user_search_string, value))
dn_value = value.replace(user_search_string, 'USER')
if re.match(r'^\([A-Za-z0-9-]+?=[^()]+?\)$', dn_value):
return
elif re.match(r'^\([&|!]\(.*?\)\)$', dn_value):
for sub_filter in dn_value[3:-2].split(')('):
# We only need to check with_user at the top of the recursion stack
validate_ldap_filter(f'({sub_filter})', with_user=False)
return
raise ValidationError(_('Invalid filter: %s') % value)
def get_all_sub_classes(cls):
# This function can get the names of all subclasses... maybe we want to move this into utils
# We use it to find all of the parent classes for LDAPGroup
sub_classes = []
for sub_cls in cls.__subclasses__():
sub_classes.append(sub_cls.__name__)
sub_classes.extend(get_all_sub_classes(sub_cls))
return sub_classes
class LDAPConfiguration(BaseAuthenticatorConfiguration):
# We add group type params to our list of valid settings
defaults = dict(list(BaseLDAPSettings.defaults.items()) + list({'GROUP_TYPE_PARAMS': {}}.items()))
documentation_url = "https://django-auth-ldap.readthedocs.io/en/latest/"
SERVER_URI = URLListField(
help_text=_('A list of URIs to connect to LDAP server, such as "ldap://ldap.example.com:389" ' '(non-SSL) or "ldaps://ldap.example.com:636" (SSL).'),
allow_null=False,
required=True,
schemes=['ldap', 'ldaps'],
ui_field_label=_('LDAP Server URI'),
)
BIND_DN = DNField(
help_text=_(
'DN (Distinguished Name) of user to bind for all search queries. This'
' is the system user account we will use to login to query LDAP for other'
' user information. Refer to the documentation for example syntax.'
),
allow_null=False,
required=False,
with_user=False,
ui_field_label=_('LDAP Bind DN'),
)
BIND_PASSWORD = CharField(
help_text=_("The password used for BIND_DN."),
allow_null=False,
required=False,
ui_field_label=_('LDAP Bind Password'),
)
CONNECTION_OPTIONS = LDAPConnectionOptions(
help_text=_(
'Additional options to set for the LDAP connection. LDAP '
'referrals are disabled by default (to prevent certain LDAP '
'queries from hanging with AD). Option names should be strings '
'(e.g. "OPT_REFERRALS"). Refer to '
'https://www.python-ldap.org/doc/html/ldap.html#options for '
'possible options and values that can be set.'
),
default={},
allow_null=False,
required=False,
ui_field_label=_('LDAP Connection Options'),
)
GROUP_TYPE = ChoiceField(
help_text=_(
'The group type may need to be changed based on the type of the '
'LDAP server. Values are listed at: '
'https://django-auth-ldap.readthedocs.io/en/stable/groups.html#types-of-groups'
),
allow_null=False,
required=True,
choices=get_all_sub_classes(LDAPGroupType),
ui_field_label=_('LDAP Group Type'),
)
GROUP_TYPE_PARAMS = DictField(
help_text=_('Key value parameters to send the chosen group type init method.'),
allow_null=False,
required=True,
ui_field_label=_('LDAP Group Type Parameters'),
)
GROUP_SEARCH = LDAPSearchField(
help_text=_(
'Users are mapped to organizations based on their membership in LDAP'
' groups. This setting defines the LDAP search query to find groups. '
'Unlike the user search, group search does not support LDAPSearchUnion.'
),
allow_null=True,
required=False,
search_must_have_user=False,
ui_field_label=_('LDAP Group Search'),
)
START_TLS = BooleanField(
help_text=_("Whether to enable TLS when the LDAP connection is not using SSL."),
allow_null=False,
required=False,
default=False,
ui_field_label=_('LDAP Start TLS'),
)
USER_DN_TEMPLATE = DNField(
help_text=_(
'Alternative to user search, if user DNs are all of the same '
'format. This approach is more efficient for user lookups than '
'searching if it is usable in your organizational environment. If '
'this setting has a value it will be used instead of '
'AUTH_LDAP_USER_SEARCH.'
),
allow_null=False,
required=True,
with_user=True,
ui_field_label=_('LDAP User DN Template'),
)
USER_ATTR_MAP = UserAttrMap(
help_text=_(
'Mapping of LDAP user schema to API user attributes. The default'
' setting is valid for ActiveDirectory but users with other LDAP'
' configurations may need to change the values. Refer to the'
' documentation for additional details.'
),
allow_null=False,
required=True,
ui_field_label=_('LDAP User Attribute Map'),
)
USER_SEARCH = LDAPSearchField(
help_text=_(
'LDAP search query to find users. Any user that matches the given '
'pattern will be able to login to the service. The user should also be '
'mapped into an organization (as defined in the '
'AUTH_LDAP_ORGANIZATION_MAP setting). If multiple search queries '
'need to be supported use of "LDAPUnion" is possible. See '
'the documentation for details.'
),
allow_null=False,
required=False,
search_must_have_user=True,
ui_field_label=_('LDAP User Search'),
)
def validate(self, attrs):
# Check interdependent fields
errors = {}
group_type_class = getattr(config, attrs['GROUP_TYPE'], None)
if group_type_class:
group_type_params = attrs['GROUP_TYPE_PARAMS']
logger.error(f"Validating group type params for {attrs['GROUP_TYPE']}")
class_args = inspect.getfullargspec(group_type_class.__init__).args[1:]
invalid_keys = set(group_type_params) - set(class_args)
missing_keys = set(class_args) - set(group_type_params)
if invalid_keys:
invalid_keys = sorted(list(invalid_keys))
for key in invalid_keys:
errors[f'GROUP_TYPE_PARAMS.{key}'] = "Invalid option for specified GROUP_TYPE"
if missing_keys:
missing_keys = sorted(list(missing_keys))
for key in missing_keys:
errors[f'GROUP_TYPE_PARAMS.{key}'] = "Missing required field for GROUP_TYPE"
if errors:
raise ValidationError(errors)
# Raise some warnings if specific fields were used
# TODO: Figure out how to display these warnings on a successful save
# for field in ['USER_FLAGS_BY_GROUP', 'DENY_GROUP', 'REQUIRE_GROUP']:
# if field in data:
# self.warnings[field] = "It would be better to use the authenticator field instead of setting this field in the LDAP adapter"
return super().validate(attrs)
class LDAPSettings(BaseLDAPSettings):
def __init__(self, prefix: str = 'AUTH_LDAP_', defaults: dict = {}):
# This init method double checks the passed defaults while initializing a settings objects
super(LDAPSettings, self).__init__(prefix, defaults)
# SERVER_URI needs to be a string, not an array
setattr(self, 'SERVER_URI', ','.join(defaults['SERVER_URI']))
# Connection options need to be set as {"integer": "value"} but our configuration has {"friendly_name": "value"} so we need to convert them
connection_options = defaults.get('CONNECTION_OPTIONS', {})
valid_options = dict([(v, k) for k, v in ldap.OPT_NAMES_DICT.items()])
internal_data = {}
for key in connection_options:
internal_data[valid_options[key]] = connection_options[key]
# If a DB-backed setting is specified that wipes out the
# OPT_NETWORK_TIMEOUT, fall back to a sane default
if ldap.OPT_NETWORK_TIMEOUT not in internal_data:
internal_data[ldap.OPT_NETWORK_TIMEOUT] = 30
# when specifying `.set_option()` calls for TLS in python-ldap, the
# *order* in which you invoke them *matters*, particularly in Python3,
# where dictionary insertion order is persisted
#
# specifically, it is *critical* that `ldap.OPT_X_TLS_NEWCTX` be set *last*
# this manual sorting puts `OPT_X_TLS_NEWCTX` *after* other TLS-related
# options
#
# see: https://github.com/python-ldap/python-ldap/issues/55
newctx_option = internal_data.pop(ldap.OPT_X_TLS_NEWCTX, None)
internal_data = OrderedDict(internal_data)
if newctx_option is not None:
internal_data[ldap.OPT_X_TLS_NEWCTX] = newctx_option
setattr(self, 'CONNECTION_OPTIONS', internal_data)
# Group type needs to be an object instead of a String so instantiate it
group_type_class = getattr(config, defaults['GROUP_TYPE'], None)
setattr(self, 'GROUP_TYPE', group_type_class(**defaults['GROUP_TYPE_PARAMS']))
| class AuthenticatorPlugin(LDAPBackend, AbstractAuthenticatorPlugin): | 2 | 2023-10-20 13:20:12+00:00 | 8k |
violet-sto/HN-GFN | oracle/scorer/seh_scorer.py | [
{
"identifier": "MolMDPExtended",
"path": "mol_mdp_ext.py",
"snippet": "class MolMDPExtended(MolMDP):\n\n def build_translation_table(self):\n \"\"\"build a symmetry mapping for blocks. Necessary to compute parent transitions\"\"\"\n self.translation_table = {}\n for blockidx in ... | import pickle
import torch
import gzip
import numpy as np
from mol_mdp_ext import MolMDPExtended, BlockMoleculeDataExtended
from generator.gfn import make_model
from rdkit import Chem | 3,760 |
models = {}
bpath = "./data/blocks_105.json"
proxy_path = "oracle/scorer/seh"
class seh_model:
def __init__(self, bpath, device):
eargs = pickle.load(gzip.open(f'{proxy_path}/info.pkl.gz'))['args']
params = pickle.load(gzip.open(f'{proxy_path}/best_params.pkl.gz'))
|
models = {}
bpath = "./data/blocks_105.json"
proxy_path = "oracle/scorer/seh"
class seh_model:
def __init__(self, bpath, device):
eargs = pickle.load(gzip.open(f'{proxy_path}/info.pkl.gz'))['args']
params = pickle.load(gzip.open(f'{proxy_path}/best_params.pkl.gz')) | self.mdp = MolMDPExtended(bpath) | 0 | 2023-10-24 14:10:35+00:00 | 8k |
line/Skeleton-Temporal-Action-Localization | train.py | [
{
"identifier": "getClassificationMAP",
"path": "evaluation/classificationMAP.py",
"snippet": "def getClassificationMAP(confidence, labels):\n \"\"\" confidence and labels are of dimension n_samples x n_label \"\"\"\n\n AP = []\n for i in range(np.shape(labels)[1]):\n AP.append(getAP(con... | import argparse
import inspect
import os
import pdb
import pickle
import random
import re
import shutil
import time
import ipdb
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import yaml
from collections import *
from einops import rearrange, reduce, repeat
from evaluation.classificationMAP import getClassificationMAP as cmAP
from evaluation.detectionMAP import getSingleStreamDetectionMAP as dsmAP
from feeders.tools import collate_with_padding_multi_joint
from model.losses import cross_entropy_loss, mvl_loss
from sklearn.metrics import f1_score
from tensorboardX import SummaryWriter
from torch.autograd import Variable
from torch.optim.lr_scheduler import _LRScheduler
from tqdm import tqdm
from utils.logger import Logger | 3,762 | self.model.parameters(),
lr=self.arg.base_lr,
momentum=0.9,
nesterov=self.arg.nesterov,
weight_decay=self.arg.weight_decay,
)
elif self.arg.optimizer == "Adam":
self.optimizer = optim.Adam(
self.model.parameters(),
lr=self.arg.base_lr,
weight_decay=self.arg.weight_decay,
)
else:
raise ValueError()
def save_arg(self):
# save arg
arg_dict = vars(self.arg)
if not os.path.exists(self.arg.work_dir):
os.makedirs(self.arg.work_dir)
with open("{}/config.yaml".format(self.arg.work_dir), "w") as f:
yaml.dump(arg_dict, f)
def adjust_learning_rate(self, epoch):
if self.arg.optimizer == "SGD" or self.arg.optimizer == "Adam":
if epoch < self.arg.warm_up_epoch:
lr = self.arg.base_lr * (epoch + 1) / self.arg.warm_up_epoch
else:
lr = self.arg.base_lr * (
0.1 ** np.sum(epoch >= np.array(self.arg.step))
)
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
return lr
else:
raise ValueError()
def print_time(self):
localtime = time.asctime(time.localtime(time.time()))
self.print_log("Local current time : " + localtime)
def print_log(self, str, print_time=True):
if print_time:
localtime = time.asctime(time.localtime(time.time()))
str = "[ " + localtime + " ] " + str
print(str)
if self.arg.print_log:
with open("{}/print_log.txt".format(self.arg.work_dir), "a") as f:
print(str, file=f)
def record_time(self):
self.cur_time = time.time()
return self.cur_time
def split_time(self):
split_time = time.time() - self.cur_time
self.record_time()
return split_time
def train(self, epoch, wb_dict, save_model=False):
self.model.train()
self.print_log("Training epoch: {}".format(epoch + 1))
loader = self.data_loader["train"]
self.adjust_learning_rate(epoch)
loss_value, batch_acc = [], []
self.train_writer.add_scalar("epoch", epoch, self.global_step)
self.record_time()
timer = dict(dataloader=0.001, model=0.001, statistics=0.001)
process = tqdm(loader)
if self.arg.only_train_part:
if epoch > self.arg.only_train_epoch:
print("only train part, require grad")
for key, value in self.model.named_parameters():
if "PA" in key:
value.requires_grad = True
else:
print("only train part, do not require grad")
for key, value in self.model.named_parameters():
if "PA" in key:
value.requires_grad = False
vid_preds = []
frm_preds = []
vid_lens = []
labels = []
results = []
indexs = []
for batch_idx, (data, label, target, mask, index, soft_label) in enumerate(
process
):
self.global_step += 1
# get data
data = data.float().cuda(self.output_device)
label = label.cuda(self.output_device)
mask = mask.cuda(self.output_device)
soft_label = soft_label.cuda(self.output_device)
timer["dataloader"] += self.split_time()
indexs.extend(index.cpu().numpy().tolist())
ab_labels = torch.cat([label, torch.ones(label.size(0), 1).cuda()], -1)
# forward
mil_pred, frm_scrs, mil_pred_2, frm_scrs_2 = self.model(data)
cls_mil_loss = self.loss_nce(mil_pred, ab_labels.float()) + self.loss_nce(
mil_pred_2, ab_labels.float()
)
if epoch > 10:
frm_scrs_re = rearrange(frm_scrs, "n t c -> (n t) c")
frm_scrs_2_re = rearrange(frm_scrs_2, "n t c -> (n t) c")
soft_label = rearrange(soft_label, "n t c -> (n t) c")
| """
Copyright 2023 LINE Corporation
LINE Corporation licenses this file to you under the Apache License,
version 2.0 (the "License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at:
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
from __future__ import print_function
# torch
# Custom
def init_seed(seed):
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def get_parser():
# parameter priority: command line > config > default
parser = argparse.ArgumentParser(
description="Spatial Temporal Graph Convolution Network"
)
parser.add_argument(
"--work-dir",
default="./work_dir/temp",
help="the work folder for storing results",
)
parser.add_argument("-model_saved_name", default="")
parser.add_argument(
"--config",
default="./config/nturgbd-cross-view/test_bone.yaml",
help="path to the configuration file",
)
# processor
parser.add_argument("--phase", default="train", help="must be train or test")
# visulize and debug
parser.add_argument("--seed", type=int, default=5, help="random seed for pytorch")
parser.add_argument(
"--log-interval",
type=int,
default=100,
help="the interval for printing messages (#iteration)",
)
parser.add_argument(
"--save-interval",
type=int,
default=2,
help="the interval for storing models (#iteration)",
)
parser.add_argument(
"--eval-interval",
type=int,
default=5,
help="the interval for evaluating models (#iteration)",
)
parser.add_argument(
"--print-log", type=str2bool, default=True, help="print logging or not"
)
parser.add_argument(
"--show-topk",
type=int,
default=[1, 5],
nargs="+",
help="which Top K accuracy will be shown",
)
# feeder
parser.add_argument(
"--feeder", default="feeder.feeder", help="data loader will be used"
)
parser.add_argument(
"--num-worker",
type=int,
default=32,
help="the number of worker for data loader",
)
parser.add_argument(
"--train-feeder-args",
default=dict(),
help="the arguments of data loader for training",
)
parser.add_argument(
"--test-feeder-args",
default=dict(),
help="the arguments of data loader for test",
)
# model
parser.add_argument("--model", default=None, help="the model will be used")
parser.add_argument(
"--model-args", type=dict, default=dict(), help="the arguments of model"
)
parser.add_argument(
"--weights", default=None, help="the weights for network initialization"
)
parser.add_argument(
"--ignore-weights",
type=str,
default=[],
nargs="+",
help="the name of weights which will be ignored in the initialization",
)
# optim
parser.add_argument(
"--base-lr", type=float, default=0.01, help="initial learning rate"
)
parser.add_argument(
"--step",
type=int,
default=[200],
nargs="+",
help="the epoch where optimizer reduce the learning rate",
)
# training
parser.add_argument(
"--device",
type=int,
default=0,
nargs="+",
help="the indexes of GPUs for training or testing",
)
parser.add_argument("--optimizer", default="SGD", help="type of optimizer")
parser.add_argument(
"--nesterov", type=str2bool, default=False, help="use nesterov or not"
)
parser.add_argument(
"--batch-size", type=int, default=256, help="training batch size"
)
parser.add_argument(
"--test-batch-size", type=int, default=256, help="test batch size"
)
parser.add_argument(
"--start-epoch", type=int, default=0, help="start training from which epoch"
)
parser.add_argument(
"--num-epoch", type=int, default=80, help="stop training in which epoch"
)
parser.add_argument(
"--weight-decay", type=float, default=0.0005, help="weight decay for optimizer"
)
# loss
parser.add_argument("--loss", type=str, default="CE", help="loss type(CE or focal)")
parser.add_argument(
"--label_count_path",
default=None,
type=str,
help="Path to label counts (used in loss weighting)",
)
parser.add_argument(
"---beta",
type=float,
default=0.9999,
help="Hyperparameter for Class balanced loss",
)
parser.add_argument(
"--gamma", type=float, default=2.0, help="Hyperparameter for Focal loss"
)
parser.add_argument("--only_train_part", default=False)
parser.add_argument("--only_train_epoch", default=0)
parser.add_argument("--warm_up_epoch", default=0)
parser.add_argument(
"--lambda-mil", default=1.0, help="balancing hyper-parameter of mil branch"
)
parser.add_argument(
"--class-threshold",
type=float,
default=0.1,
help="class threshold for rejection",
)
parser.add_argument(
"--start-threshold",
type=float,
default=0.03,
help="start threshold for action localization",
)
parser.add_argument(
"--end-threshold",
type=float,
default=0.055,
help="end threshold for action localization",
)
parser.add_argument(
"--threshold-interval",
type=float,
default=0.005,
help="threshold interval for action localization",
)
return parser
class Processor:
"""
Processor for Skeleton-based Action Recgnition
"""
def __init__(self, arg):
self.arg = arg
self.save_arg()
if arg.phase == "train":
if not arg.train_feeder_args["debug"]:
if os.path.isdir(arg.model_saved_name):
print("log_dir: ", arg.model_saved_name, "already exist")
# answer = input('delete it? y/n:')
answer = "y"
if answer == "y":
print("Deleting dir...")
shutil.rmtree(arg.model_saved_name)
print("Dir removed: ", arg.model_saved_name)
# input('Refresh the website of tensorboard by pressing any keys')
else:
print("Dir not removed: ", arg.model_saved_name)
self.train_writer = SummaryWriter(
os.path.join(arg.model_saved_name, "train"), "train"
)
self.val_writer = SummaryWriter(
os.path.join(arg.model_saved_name, "val"), "val"
)
else:
self.train_writer = self.val_writer = SummaryWriter(
os.path.join(arg.model_saved_name, "test"), "test"
)
self.global_step = 0
self.load_model()
self.load_optimizer()
self.load_data()
self.lr = self.arg.base_lr
self.best_acc = 0
self.best_per_class_acc = 0
self.loss_nce = torch.nn.BCELoss()
self.my_logger = Logger(
os.path.join(arg.model_saved_name, "log.txt"), title="SWTAL"
)
self.my_logger.set_names(["Step", "cmap"] + [f"map_0.{i}" for i in range(1, 8)])
def load_data(self):
Feeder = import_class(self.arg.feeder)
self.data_loader = dict()
if self.arg.phase == "train":
self.data_loader["train"] = torch.utils.data.DataLoader(
dataset=Feeder(**self.arg.train_feeder_args),
batch_size=self.arg.batch_size,
shuffle=True,
num_workers=self.arg.num_worker,
drop_last=True,
collate_fn=collate_with_padding_multi_joint,
)
self.data_loader["test"] = torch.utils.data.DataLoader(
dataset=Feeder(**self.arg.test_feeder_args),
batch_size=self.arg.test_batch_size,
shuffle=False,
num_workers=self.arg.num_worker,
drop_last=False,
collate_fn=collate_with_padding_multi_joint,
)
def load_model(self):
output_device = (
self.arg.device[0] if type(self.arg.device) is list else self.arg.device
)
self.output_device = output_device
Model = import_class(self.arg.model)
shutil.copy2(inspect.getfile(Model), self.arg.work_dir)
print(Model)
self.model = Model(**self.arg.model_args).cuda(output_device)
print(self.model)
self.loss_type = arg.loss
if self.arg.weights:
# self.global_step = int(arg.weights[:-3].split("-")[-1])
self.print_log("Load weights from {}.".format(self.arg.weights))
if ".pkl" in self.arg.weights:
with open(self.arg.weights, "r") as f:
weights = pickle.load(f)
else:
weights = torch.load(self.arg.weights)
weights = OrderedDict(
[
[k.split("module.")[-1], v.cuda(output_device)]
for k, v in weights.items()
]
)
keys = list(weights.keys())
for w in self.arg.ignore_weights:
for key in keys:
if w in key:
if weights.pop(key, None) is not None:
self.print_log(
"Sucessfully Remove Weights: {}.".format(key)
)
else:
self.print_log("Can Not Remove Weights: {}.".format(key))
try:
self.model.load_state_dict(weights)
except:
state = self.model.state_dict()
diff = list(set(state.keys()).difference(set(weights.keys())))
print("Can not find these weights:")
for d in diff:
print(" " + d)
state.update(weights)
self.model.load_state_dict(state)
if type(self.arg.device) is list:
if len(self.arg.device) > 1:
self.model = nn.DataParallel(
self.model, device_ids=self.arg.device, output_device=output_device
)
def load_optimizer(self):
if self.arg.optimizer == "SGD":
self.optimizer = optim.SGD(
self.model.parameters(),
lr=self.arg.base_lr,
momentum=0.9,
nesterov=self.arg.nesterov,
weight_decay=self.arg.weight_decay,
)
elif self.arg.optimizer == "Adam":
self.optimizer = optim.Adam(
self.model.parameters(),
lr=self.arg.base_lr,
weight_decay=self.arg.weight_decay,
)
else:
raise ValueError()
def save_arg(self):
# save arg
arg_dict = vars(self.arg)
if not os.path.exists(self.arg.work_dir):
os.makedirs(self.arg.work_dir)
with open("{}/config.yaml".format(self.arg.work_dir), "w") as f:
yaml.dump(arg_dict, f)
def adjust_learning_rate(self, epoch):
if self.arg.optimizer == "SGD" or self.arg.optimizer == "Adam":
if epoch < self.arg.warm_up_epoch:
lr = self.arg.base_lr * (epoch + 1) / self.arg.warm_up_epoch
else:
lr = self.arg.base_lr * (
0.1 ** np.sum(epoch >= np.array(self.arg.step))
)
for param_group in self.optimizer.param_groups:
param_group["lr"] = lr
return lr
else:
raise ValueError()
def print_time(self):
localtime = time.asctime(time.localtime(time.time()))
self.print_log("Local current time : " + localtime)
def print_log(self, str, print_time=True):
if print_time:
localtime = time.asctime(time.localtime(time.time()))
str = "[ " + localtime + " ] " + str
print(str)
if self.arg.print_log:
with open("{}/print_log.txt".format(self.arg.work_dir), "a") as f:
print(str, file=f)
def record_time(self):
self.cur_time = time.time()
return self.cur_time
def split_time(self):
split_time = time.time() - self.cur_time
self.record_time()
return split_time
def train(self, epoch, wb_dict, save_model=False):
self.model.train()
self.print_log("Training epoch: {}".format(epoch + 1))
loader = self.data_loader["train"]
self.adjust_learning_rate(epoch)
loss_value, batch_acc = [], []
self.train_writer.add_scalar("epoch", epoch, self.global_step)
self.record_time()
timer = dict(dataloader=0.001, model=0.001, statistics=0.001)
process = tqdm(loader)
if self.arg.only_train_part:
if epoch > self.arg.only_train_epoch:
print("only train part, require grad")
for key, value in self.model.named_parameters():
if "PA" in key:
value.requires_grad = True
else:
print("only train part, do not require grad")
for key, value in self.model.named_parameters():
if "PA" in key:
value.requires_grad = False
vid_preds = []
frm_preds = []
vid_lens = []
labels = []
results = []
indexs = []
for batch_idx, (data, label, target, mask, index, soft_label) in enumerate(
process
):
self.global_step += 1
# get data
data = data.float().cuda(self.output_device)
label = label.cuda(self.output_device)
mask = mask.cuda(self.output_device)
soft_label = soft_label.cuda(self.output_device)
timer["dataloader"] += self.split_time()
indexs.extend(index.cpu().numpy().tolist())
ab_labels = torch.cat([label, torch.ones(label.size(0), 1).cuda()], -1)
# forward
mil_pred, frm_scrs, mil_pred_2, frm_scrs_2 = self.model(data)
cls_mil_loss = self.loss_nce(mil_pred, ab_labels.float()) + self.loss_nce(
mil_pred_2, ab_labels.float()
)
if epoch > 10:
frm_scrs_re = rearrange(frm_scrs, "n t c -> (n t) c")
frm_scrs_2_re = rearrange(frm_scrs_2, "n t c -> (n t) c")
soft_label = rearrange(soft_label, "n t c -> (n t) c")
| loss = cls_mil_loss * 0.1 + mvl_loss( | 4 | 2023-10-20 05:38:16+00:00 | 8k |
SALT-NLP/Efficient_Unlearning | src/models/transformers/parameter-efficient-finetuning/models/auto/adapter_model.py | [
{
"identifier": "_BaseAutoModelClass",
"path": "src/models/transformers/models/auto/auto_factory.py",
"snippet": "class _BaseAutoModelClass:\n # Base class for auto models.\n _model_mapping = None\n\n def __init__(self, *args, **kwargs):\n raise EnvironmentError(\n f\"{self.__... | import warnings
from collections import OrderedDict
from ....models.auto.auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from ....models.auto.configuration_auto import CONFIG_MAPPING_NAMES | 4,497 |
# Make sure that children are placed before parents!
ADAPTER_MODEL_MAPPING_NAMES = OrderedDict(
[
("xlm-roberta", "XLMRobertaAdapterModel"),
("roberta", "RobertaAdapterModel"),
("beit", "BeitAdapterModel"),
("bert", "BertAdapterModel"),
("distilbert", "DistilBertAdapterModel"),
("deberta-v2", "DebertaV2AdapterModel"),
("deberta", "DebertaAdapterModel"),
("bart", "BartAdapterModel"),
("mbart", "MBartAdapterModel"),
("gpt2", "GPT2AdapterModel"),
("gptj", "GPTJAdapterModel"),
("t5", "T5AdapterModel"),
("vit", "ViTAdapterModel"),
]
)
MODEL_WITH_HEADS_MAPPING_NAMES = OrderedDict(
[
("xlm-roberta", "XLMRobertaModelWithHeads"),
("roberta", "RobertaModelWithHeads"),
("bert", "BertModelWithHeads"),
("distilbert", "DistilBertModelWithHeads"),
("bart", "BartModelWithHeads"),
("mbart", "MBartModelWithHeads"),
("gpt2", "GPT2ModelWithHeads"),
("t5", "T5ModelWithHeads"),
]
)
ADAPTER_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, ADAPTER_MODEL_MAPPING_NAMES)
MODEL_WITH_HEADS_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_WITH_HEADS_MAPPING_NAMES)
|
# Make sure that children are placed before parents!
ADAPTER_MODEL_MAPPING_NAMES = OrderedDict(
[
("xlm-roberta", "XLMRobertaAdapterModel"),
("roberta", "RobertaAdapterModel"),
("beit", "BeitAdapterModel"),
("bert", "BertAdapterModel"),
("distilbert", "DistilBertAdapterModel"),
("deberta-v2", "DebertaV2AdapterModel"),
("deberta", "DebertaAdapterModel"),
("bart", "BartAdapterModel"),
("mbart", "MBartAdapterModel"),
("gpt2", "GPT2AdapterModel"),
("gptj", "GPTJAdapterModel"),
("t5", "T5AdapterModel"),
("vit", "ViTAdapterModel"),
]
)
MODEL_WITH_HEADS_MAPPING_NAMES = OrderedDict(
[
("xlm-roberta", "XLMRobertaModelWithHeads"),
("roberta", "RobertaModelWithHeads"),
("bert", "BertModelWithHeads"),
("distilbert", "DistilBertModelWithHeads"),
("bart", "BartModelWithHeads"),
("mbart", "MBartModelWithHeads"),
("gpt2", "GPT2ModelWithHeads"),
("t5", "T5ModelWithHeads"),
]
)
ADAPTER_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, ADAPTER_MODEL_MAPPING_NAMES)
MODEL_WITH_HEADS_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_WITH_HEADS_MAPPING_NAMES)
| class AutoAdapterModel(_BaseAutoModelClass): | 0 | 2023-10-18 18:05:54+00:00 | 8k |
exists-forall/striped_attention | llamabpt/llama.py | [
{
"identifier": "blockwise_ffn",
"path": "bpt.py",
"snippet": "def blockwise_ffn(remat_ffn, inputs, chunk_size, deterministic):\n # remat_ffn: a rematerialized ffn with policy jax.checkpoint_policies.nothing_saveable()\n # inputs: (batch, seq_len, dim)\n # chunk_size: the chunk size to split th... | import os
import json
import tempfile
import numpy as np
import jax
import jax.numpy as jnp
import flax.linen as nn
import einops
import sentencepiece as spm
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
from functools import partial
from jax import lax
from jax.sharding import PartitionSpec as PS
from jax.experimental.shard_map import shard_map
from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
from flax.linen import combine_masks, make_causal_mask
from flax.linen.attention import dot_product_attention_weights
from flax.traverse_util import flatten_dict, unflatten_dict
from flax.linen import partitioning as nn_partitioning
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
from transformers.tokenization_utils import PreTrainedTokenizer
from transformers.modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
from transformers.modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
from ml_collections import ConfigDict
from ml_collections.config_dict import config_dict
from tux import function_args_to_config, load_pickle, open_file, with_sharding_constraint, get_jax_mesh, get_gradient_checkpoint_policy
from bpt import blockwise_ffn, blockwise_attn, ring_attention, permute_tokens, unpermute_outputs | 4,405 | )
if self.config.scan_layers:
initializing = self.is_mutable_collection('params')
params_spec = (
self.config.param_scan_axis if initializing else
nn_partitioning.ScanIn(self.config.param_scan_axis))
cache_spec = 0
hidden_states, _ = nn.scan(
block,
variable_axes={
'params': params_spec,
'cache': cache_spec,
'intermediates': 0
},
split_rngs={
'params': True,
'dropout': True
},
in_axes=(nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast),
length=self.config.num_hidden_layers,
metadata_params={nn.PARTITION_NAME: 'scan_decoder_layer'},
)(self.config, name='scan_decoder', dtype=self.dtype, param_dtype=self.param_dtype,)(
hidden_states,
attention_mask,
position_ids,
deterministic,
init_cache,
output_attentions,
fcm_mask,
)
else:
blocks = [
block(
self.config,
name=str(i),
dtype=self.dtype,
param_dtype=self.param_dtype,
) for i in range(self.config.num_hidden_layers)
]
for block in blocks:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = block(
hidden_states,
attention_mask,
position_ids,
deterministic,
init_cache,
output_attentions,
fcm_mask,
)
hidden_states = layer_outputs
if output_attentions:
all_attentions += (layer_outputs[1],)
# this contains possible `None` values - `FlaxGPTJModule` will filter them out
outputs = (hidden_states, all_hidden_states, all_attentions)
return outputs
class FlaxLLaMAModule(nn.Module):
config: LLaMAConfig
dtype: jnp.dtype = jnp.float32
param_dtype: jnp.dtype=jnp.float32
precision: Optional[Union[jax.lax.Precision, str]]=None
def setup(self):
self.embed_dim = self.config.hidden_size
self.wte = nn.Embed(
self.config.vocab_size,
self.config.hidden_size,
embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
dtype=self.dtype,
param_dtype=self.param_dtype,
)
self.dropout = nn.Dropout(rate=self.config.embd_pdrop)
self.h = FlaxLLaMABlockCollection(self.config, dtype=self.dtype, param_dtype=self.param_dtype, precision=self.precision)
self.ln_f = RMSNorm(self.config.hidden_size, eps=self.config.rms_norm_eps, dtype=self.dtype, param_dtype=self.param_dtype)
def __call__(
self,
input_ids,
attention_mask,
position_ids,
deterministic=True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
allow_permuted_outputs: bool = False,
):
seq_mesh_dim = self.config.get_mesh_dim_sizes()[-1]
input_ids = permute_tokens(self.config.attention_type, input_ids, seq_mesh_dim)
position_ids = permute_tokens(self.config.attention_type, position_ids, seq_mesh_dim)
attention_mask = permute_tokens(self.config.attention_type, attention_mask, seq_mesh_dim)
input_embeds = self.wte(input_ids.astype("i4"))
hidden_states = self.dropout(input_embeds, deterministic=deterministic)
outputs = self.h(
hidden_states,
attention_mask,
position_ids=position_ids,
deterministic=deterministic,
init_cache=init_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.ln_f(hidden_states)
if not allow_permuted_outputs:
|
LLAMA_STANDARD_CONFIGS = {
'1b': {
'vocab_size': 32000,
'hidden_size': 2048,
'intermediate_size': 5504,
'num_hidden_layers': 22,
'num_attention_heads': 16,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
'3b': {
'vocab_size': 32000,
'hidden_size': 3200,
'intermediate_size': 8640,
'num_hidden_layers': 26,
'num_attention_heads': 32,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
'7b': {
'vocab_size': 32000,
'hidden_size': 4096,
'intermediate_size': 11008,
'num_hidden_layers': 32,
'num_attention_heads': 32,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
'13b': {
'vocab_size': 32000,
'hidden_size': 5120,
'intermediate_size': 13824,
'num_hidden_layers': 40,
'num_attention_heads': 40,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
'30b': {
'vocab_size': 32000,
'hidden_size': 6656,
'intermediate_size': 17920,
'num_hidden_layers': 60,
'num_attention_heads': 52,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
'65b': {
'vocab_size': 32000,
'hidden_size': 8192,
'intermediate_size': 22016,
'num_hidden_layers': 80,
'num_attention_heads': 64,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-5,
'use_cache': True,
'tie_word_embeddings': False,
},
'debug': { # A small model for debugging
'vocab_size': 32000,
'hidden_size': 128,
'intermediate_size': 256,
'num_hidden_layers': 2,
'num_attention_heads': 4,
'max_sequence_length': 2048,
'initializer_range': 0.02,
'rms_norm_eps': 1e-6,
'use_cache': True,
'tie_word_embeddings': False,
},
}
class LLaMAConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`~LLaMAModel`]. It is used to instantiate an LLaMA
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the LLaMA-7B.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`~LLaMAModel`] or [`~TFLLaMAModel`].
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_sequence_length (`int`, *optional*, defaults to 2048):
Max sequence length for model (for RoPE computation)
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
tie_word_embeddings(`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
Example:
```python
>>> from transformers import LLaMAModel, LLaMAConfig
>>> # Initializing a LLaMA llama-7b style configuration
>>> configuration = LLaMAConfig()
>>> # Initializing a model from the llama-7b style configuration
>>> model = LLaMAModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "llama"
def __init__(
self,
vocab_size=32000,
hidden_size=4096,
intermediate_size=11008,
num_hidden_layers=32,
num_attention_heads=32,
max_sequence_length=2048,
rms_norm_eps=1e-6,
initializer_range=0.02,
use_cache=True,
bos_token_id=0,
eos_token_id=1,
resid_pdrop=0.0,
embd_pdrop=0.0,
attn_pdrop=0.0,
tie_word_embeddings=False,
remat_block='',
remat_attention='',
remat_mlp='',
scan_attention=False,
attention_type=None,
scan_mlp=False,
scan_query_chunk_size=1024,
scan_key_chunk_size=1024,
scan_mlp_chunk_size=1024,
fcm_min_ratio=0.0,
fcm_max_ratio=0.0,
scan_layers=True,
param_scan_axis=0,
mesh_dim=None,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.initializer_range = initializer_range
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_sequence_length = max_sequence_length
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.remat_block = remat_block
self.remat_attention = remat_attention
self.remat_mlp = remat_mlp
self.scan_attention = scan_attention
self.attention_type = attention_type
self.scan_mlp = scan_mlp
self.scan_query_chunk_size = scan_query_chunk_size
self.scan_key_chunk_size = scan_key_chunk_size
self.scan_mlp_chunk_size = scan_mlp_chunk_size
self.fcm_min_ratio = fcm_min_ratio
self.fcm_max_ratio = fcm_max_ratio
self.scan_layers = scan_layers
self.param_scan_axis = param_scan_axis
self.mesh_dim = mesh_dim
super().__init__(
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
def get_mesh_dim_sizes(self):
return [int(size) for size in self.mesh_dim.split(",")]
@classmethod
def get_default_config(cls, updates=None):
config = function_args_to_config(cls.__init__)
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
@staticmethod
def get_jax_mesh(axis_dims):
return get_jax_mesh(axis_dims, ('dp', 'fsdp', 'tp', 'sp'))
@staticmethod
def get_partition_rules(scan_layers=False, scan_axis=0):
""" Parition rules for GPTJ. Note that these rules are orderd, so that
the beginning rules match first. It is important to use
PartitionSpec() instead of None here because JAX does not treat
None as a pytree leaf.
"""
if scan_layers:
if scan_axis == 0:
return (
# embeddings
("transformer/wte/embedding", PS("tp", ("fsdp", "sp"))),
# atention
("attention/(wq|wk|wv)/kernel", PS(None, ("fsdp", "sp"), "tp")),
("attention/wo/kernel", PS(None, "tp", ("fsdp", "sp"))),
# mlp
("feed_forward/w1/kernel", PS(None, ("fsdp", "sp"), "tp")),
("feed_forward/w2/kernel", PS(None, "tp", ("fsdp", "sp"))),
("feed_forward/w3/kernel", PS(None, ("fsdp", "sp"), "tp")),
# layer norms
("attention_norm/kernel", PS(None, None)),
("ffn_norm/kernel", PS(None, None)),
# output head
("transformer/ln_f/kernel", PS(None)),
("lm_head/kernel", PS(("fsdp", "sp"), "tp")),
('.*', PS(None)),
)
elif scan_axis == 1:
return (
# embeddings
("transformer/wte/embedding", PS("tp", ("fsdp", "sp"))),
# atention
("attention/(wq|wk|wv)/kernel", PS(("fsdp", "sp"), None, "tp")),
("attention/wo/kernel", PS("tp", None, ("fsdp", "sp"))),
# mlp
("feed_forward/w1/kernel", PS(("fsdp", "sp"), None, "tp")),
("feed_forward/w2/kernel", PS("tp", None, ("fsdp", "sp"))),
("feed_forward/w3/kernel", PS(("fsdp", "sp"), None, "tp")),
# layer norms
("attention_norm/kernel", PS(None, None)),
("ffn_norm/kernel", PS(None, None)),
# output head
("transformer/ln_f/kernel", PS(None)),
("lm_head/kernel", PS(("fsdp", "sp"), "tp")),
('.*', PS(None)),
)
else:
raise ValueError(f"Invalid scan_axis {scan_axis}")
else:
return (
# embeddings
("transformer/wte/embedding", PS("tp", ("fsdp", "sp"))),
# atention
("attention/(wq|wk|wv)/kernel", PS(("fsdp", "sp"), "tp")),
("attention/wo/kernel", PS("tp", ("fsdp", "sp"))),
# mlp
("feed_forward/w1/kernel", PS(("fsdp", "sp"), "tp")),
("feed_forward/w2/kernel", PS("tp", ("fsdp", "sp"))),
("feed_forward/w3/kernel", PS(("fsdp", "sp"), "tp")),
# layer norms
("attention_norm/kernel", PS(None)),
("ffn_norm/kernel", PS(None)),
# output head
("transformer/ln_f/kernel", PS(None)),
("lm_head/kernel", PS(("fsdp", "sp"), "tp")),
('.*', PS(None)),
)
@staticmethod
def get_weight_decay_exclusions():
return tuple()
@staticmethod
def rng_keys():
return ('params', 'dropout', 'fcm')
@staticmethod
def get_tokenizer_config(updates=None):
config = ConfigDict()
config.vocab_file = ''
config.add_bos_token = False
config.add_eos_token = False
if updates is not None:
config.update(ConfigDict(updates).copy_and_resolve_references())
return config
@classmethod
def get_tokenizer(cls, config, padding_side='left', truncation_side='right'):
config = cls.get_tokenizer_config(config)
assert config.vocab_file != '', 'vocab_file must be specified'
tokenizer = LLaMATokenizer(
vocab_file=config.vocab_file,
add_bos_token=config.add_bos_token,
add_eos_token=config.add_eos_token,
padding_side=padding_side,
truncation_side=truncation_side,
)
return tokenizer
@classmethod
def load_config(cls, path):
if path in LLAMA_STANDARD_CONFIGS:
return cls.from_dict(LLAMA_STANDARD_CONFIGS[path])
load_type, load_path = path.split('::', 1)
if load_type == 'pickle':
return cls.from_dict(load_pickle(load_path)['llama_config'])
elif load_type == 'json':
with open_file(load_path, 'r') as fin:
raw_config = fin.read()
return cls.from_dict(json.loads(raw_config))
else:
raise ValueError(f'Unsupported load config type: {load_type}')
remat = nn_partitioning.remat
logger = logging.get_logger(__name__)
class RMSNorm(nn.Module):
dim: int
eps: float=1e-6
dtype: jnp.dtype=jnp.float32
param_dtype: jnp.dtype=jnp.float32
def setup(self) -> None:
self.weight = self.param(
'kernel',
nn.initializers.ones,
(self.dim,),
self.param_dtype,
)
def _norm(self, x: jnp.ndarray) -> jnp.ndarray:
return x * jax.lax.rsqrt(jnp.square(x).mean(-1, keepdims=True) + self.eps)
def __call__(self, x: jnp.ndarray) -> jnp.ndarray:
x = x.astype(jnp.promote_types(self.dtype, jnp.float32))
output = self._norm(x).astype(self.dtype)
weight = jnp.asarray(self.weight, self.dtype)
return output * weight
def precompute_freqs_cis(dim: int, end: int, theta: float=10000.0, dtype: jnp.dtype=jnp.float32) -> jnp.ndarray:
freqs = 1.0 / (theta ** (np.arange(0, dim, 2)[: (dim // 2)].astype(dtype) / dim))
t = np.arange(end) # type: ignore
freqs = np.outer(t, freqs).astype(dtype) # type: ignore
sin, cos = np.sin(freqs), np.cos(freqs)
freqs_cis = np.complex64(cos + 1j * sin)
return jnp.asarray(freqs_cis)
def apply_rotary_emb(
xq: jnp.ndarray,
xk: jnp.ndarray,
freqs_cis: jnp.ndarray,
dtype: jnp.dtype=jnp.float32,
) -> Tuple[jnp.ndarray, jnp.ndarray]:
reshape_xq = xq.astype(jnp.float32).reshape(*xq.shape[:-1], -1, 2)
reshape_xk = xk.astype(jnp.float32).reshape(*xk.shape[:-1], -1, 2)
xq_ = jax.lax.complex(reshape_xq[..., 0], reshape_xq[..., 1])
xk_ = jax.lax.complex(reshape_xk[..., 0], reshape_xk[..., 1])
# add head dim
freqs_cis = jnp.reshape(freqs_cis, (*freqs_cis.shape[:2], 1, *freqs_cis.shape[2:]))
xq_out = xq_ * freqs_cis
xq_out = jnp.stack((jnp.real(xq_out), jnp.imag(xq_out)), axis=-1).reshape(*xq_out.shape[:-1], -1)
xk_out = xk_ * freqs_cis
xk_out = jnp.stack((jnp.real(xk_out), jnp.imag(xk_out)), axis=-1).reshape(*xk_out.shape[:-1], -1)
return xq_out.astype(dtype), xk_out.astype(dtype)
class FlaxLLaMAAttention(nn.Module):
config: LLaMAConfig
dtype: jnp.dtype=jnp.float32
param_dtype: jnp.dtype=jnp.float32
precision: Optional[Union[jax.lax.Precision, str]]=None
def setup(self):
config = self.config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
self.wq = nn.Dense(
config.num_attention_heads*self.head_dim,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.wk = nn.Dense(
config.num_attention_heads*self.head_dim,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.wv = nn.Dense(
config.num_attention_heads*self.head_dim,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.wo = nn.Dense(
config.hidden_size,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.resid_dropout = nn.Dropout(rate=config.resid_pdrop)
self.causal_mask = make_causal_mask(jnp.ones((1, config.max_sequence_length), dtype="bool"), dtype="bool")
self.freqs_cis = precompute_freqs_cis(
self.head_dim,
config.max_sequence_length * 2,
dtype=self.dtype,
)
def _split_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
def _merge_heads(self, hidden_states):
return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
@nn.compact
def _concatenate_to_cache(self, key, value, query, attention_mask):
"""
This function takes projected key, value states from a single input token and concatenates the states to cached
states from previous steps. This function is slighly adapted from the official Flax repository:
https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
"""
# detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
*batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
# update key, value caches with our new 1d spatial slices
cur_index = cache_index.value
indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
key = lax.dynamic_update_slice(cached_key.value, key, indices)
value = lax.dynamic_update_slice(cached_value.value, value, indices)
cached_key.value = key
cached_value.value = value
num_updated_cache_vectors = query.shape[1]
cache_index.value = cache_index.value + num_updated_cache_vectors
# causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
pad_mask = jnp.broadcast_to(
jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
)
attention_mask = combine_masks(pad_mask, attention_mask)
return key, value, attention_mask
def __call__(
self,
hidden_states,
attention_mask,
position_ids,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
fcm_mask=None,
):
xq, xk, xv = self.wq(hidden_states), self.wk(hidden_states), self.wv(hidden_states)
xq = with_sharding_constraint(xq, PS(("dp", "fsdp"), "sp", "tp"))
xk = with_sharding_constraint(xk, PS(("dp", "fsdp"), "sp", "tp"))
xv = with_sharding_constraint(xv, PS(("dp", "fsdp"), "sp", "tp"))
xq = self._split_heads(xq)
xk = self._split_heads(xk)
xv = self._split_heads(xv)
freqs_cis = jnp.take(self.freqs_cis, position_ids, axis=0)
xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis, dtype=self.dtype)
dropout_rng = None
if not deterministic and self.config.attn_pdrop > 0.0:
dropout_rng = self.make_rng("dropout")
# if self.config.scan_attention and not (self.has_variable("cache", "cached_key") or init_cache):
if self.config.attention_type in ['ring_blockwise', 'blockwise', 'striped']:
# doesn't need blockwise attention if we are doing autoregressive decoding since no quadratic memory
# attention mask without nxn materlization, blockwise_attn will handle the rest
attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
attn_weights = None
# transform boolean mask into float mask
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
)
attn_weights = None
if self.config.attention_type in ['ring_blockwise', 'striped']:
causal_layout = "striped" if self.config.attention_type == "striped" else "normal"
seq_length = hidden_states.shape[1]
num_devices = self.config.get_mesh_dim_sizes()[-1]
assert seq_length % num_devices == 0
block_size = seq_length // num_devices
ring_attention_sharded = shard_map(
partial(
ring_attention,
axis_name="sp",
float32_logits=True,
blockwise_kwargs=dict(
deterministic=deterministic,
dropout_rng=dropout_rng,
attn_pdrop=self.config.attn_pdrop,
causal_layout=causal_layout,
query_chunk_size=self.config.scan_query_chunk_size,
key_chunk_size=self.config.scan_key_chunk_size,
block_size=block_size,
dtype=self.dtype,
policy=get_gradient_checkpoint_policy('nothing_saveable'),
precision=self.precision,
prevent_cse=not self.config.scan_layers,
),
),
mesh=LLaMAConfig.get_jax_mesh(self.config.mesh_dim),
in_specs=(
PS(("dp", "fsdp"), "sp", "tp", None),
PS(("dp", "fsdp"), "sp", "tp", None),
PS(("dp", "fsdp"), "sp", "tp", None),
PS(("dp", "fsdp"), None, None, None)
),
out_specs=PS(("dp", "fsdp"), "sp", "tp", None),
check_rep=False
)
attn_output = ring_attention_sharded(xq, xk, xv, attention_bias)
elif self.config.attention_type == 'blockwise':
attn_output = blockwise_attn(
xq, xk, xv, attention_bias,
deterministic=deterministic,
dropout_rng=dropout_rng,
attn_pdrop=self.config.attn_pdrop,
causal=True,
query_chunk_size=self.config.scan_query_chunk_size,
key_chunk_size=self.config.scan_key_chunk_size,
dtype=self.dtype,
policy=get_gradient_checkpoint_policy('nothing_saveable'),
precision=self.precision,
float32_logits=True,
prevent_cse=not self.config.scan_layers,
)
else:
raise Exception(self.config.attention_type)
attn_output = with_sharding_constraint(attn_output, PS(("dp", "fsdp"), "sp", "tp", None))
else:
query_length, key_length = xq.shape[1], xk.shape[1]
if self.has_variable("cache", "cached_key"):
mask_shift = self.variables["cache"]["cache_index"]
max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
causal_mask = lax.dynamic_slice(
self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
)
else:
causal_mask = self.causal_mask[:, :, :query_length, :key_length]
batch_size = hidden_states.shape[0]
causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
attention_mask = combine_masks(attention_mask, causal_mask, fcm_mask)
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
if self.has_variable("cache", "cached_key") or init_cache:
xk, xv, attention_mask = self._concatenate_to_cache(xk, xv, xq, attention_mask)
if self.config.attention_type == 'standard':
attention_bias = lax.select(
attention_mask > 0,
jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
)
attn_weights = dot_product_attention_weights(
xq,
xk,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.config.attn_pdrop,
deterministic=deterministic,
dtype=jnp.promote_types(self.dtype, jnp.float32),
precision=self.precision,
)
attn_weights = with_sharding_constraint(attn_weights, PS(("dp", "fsdp"), "tp", "sp", None))
attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, xv, precision=self.precision)
else:
raise Exception(self.config.attention_type)
attn_output = self._merge_heads(attn_output)
attn_output = self.wo(attn_output)
attn_output = self.resid_dropout(attn_output, deterministic=deterministic)
outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
return outputs
class FlaxLLaMAMLP(nn.Module):
config: LLaMAConfig
dtype: jnp.dtype=jnp.float32
param_dtype: jnp.dtype=jnp.float32
precision: Optional[Union[jax.lax.Precision, str]]=None
def setup(self) -> None:
config = self.config
self.w1 = nn.Dense(
config.intermediate_size,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.w2 = nn.Dense(
config.hidden_size,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.w3 = nn.Dense(
config.intermediate_size,
dtype=self.dtype,
param_dtype=self.param_dtype,
use_bias=False,
kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
precision=self.precision,
)
self.dropout = nn.Dropout(rate=self.config.resid_pdrop)
def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray:
x = self.w2(nn.silu(self.w1(x)) * self.w3(x))
x = self.dropout(x, deterministic=deterministic)
return x
class FlaxLLaMABlock(nn.Module):
config: LLaMAConfig
dtype: jnp.dtype=jnp.float32
param_dtype: jnp.dtype=jnp.float32
precision: Optional[Union[jax.lax.Precision, str]]=None
def setup(self) -> None:
attention_module = FlaxLLaMAAttention
mlp_module = FlaxLLaMAMLP
if self.config.remat_attention != '':
attention_module = remat(
FlaxLLaMAAttention, static_argnums=(3, 4, 5),
policy=get_gradient_checkpoint_policy(self.config.remat_attention),
prevent_cse=not self.config.scan_layers,
)
if self.config.remat_mlp != '':
mlp_module = remat(
FlaxLLaMAMLP, static_argnums=(1,),
policy=get_gradient_checkpoint_policy(self.config.remat_mlp),
prevent_cse=not self.config.scan_layers,
)
self.attention = attention_module(
self.config,
dtype=self.dtype,
param_dtype=self.param_dtype,
precision=self.precision,
)
self.feed_forward = mlp_module(
self.config,
dtype=self.dtype,
param_dtype=self.param_dtype,
precision=self.precision,
)
self.attention_norm = RMSNorm(
self.config.hidden_size,
eps=self.config.rms_norm_eps,
dtype=self.dtype,
param_dtype=self.param_dtype,
)
self.ffn_norm = RMSNorm(
self.config.hidden_size,
eps=self.config.rms_norm_eps,
dtype=self.dtype,
param_dtype=self.param_dtype,
)
def __call__(
self,
hidden_states,
attention_mask=None,
position_ids=None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
fcm_mask: Optional[jnp.ndarray] = None,
):
attn_outputs = self.attention(
self.attention_norm(hidden_states),
attention_mask,
position_ids,
deterministic,
init_cache,
output_attentions,
fcm_mask,
)
attn_output = attn_outputs[0]
hidden_states = hidden_states + attn_output
feed_forward_input = self.ffn_norm(hidden_states)
if self.config.scan_mlp:
feed_forward_hidden_states = blockwise_ffn(
self.feed_forward,
feed_forward_input,
self.config.scan_mlp_chunk_size,
deterministic,
)
else:
feed_forward_hidden_states = self.feed_forward(
feed_forward_input,
deterministic,
)
feed_forward_hidden_states = with_sharding_constraint(feed_forward_hidden_states, PS(("dp", "fsdp"), None, "tp"))
hidden_states = hidden_states + feed_forward_hidden_states
# return (hidden_states,) + attn_outputs[1:]
outputs = hidden_states
if self.config.scan_layers:
outputs = (outputs, None)
return outputs
class FlaxLLaMAPreTrainedModel(FlaxPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LLaMAConfig
base_model_prefix = "transformer"
module_class: nn.Module = None
def __init__(
self,
config: LLaMAConfig,
input_shape: Tuple = (1, 1),
seed: int = 0,
dtype: jnp.dtype = jnp.float32,
_do_init: bool = True,
**kwargs,
):
module = self.module_class(config=config, dtype=dtype, **kwargs)
super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
# init input tensors
input_ids = jnp.zeros(input_shape, dtype="i4")
attention_mask = jnp.ones_like(input_ids)
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
params_rng, dropout_rng = jax.random.split(rng)
rngs = {"params": params_rng, "dropout": dropout_rng}
if self.config.add_cross_attention:
encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,))
encoder_attention_mask = attention_mask
module_init_outputs = self.module.init(
rngs,
input_ids,
attention_mask,
position_ids,
encoder_hidden_states,
encoder_attention_mask,
return_dict=False,
)
else:
module_init_outputs = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)
random_params = module_init_outputs["params"]
if params is not None:
random_params = flatten_dict(unfreeze(random_params))
params = flatten_dict(unfreeze(params))
for missing_key in self._missing_keys:
params[missing_key] = random_params[missing_key]
self._missing_keys = set()
return freeze(unflatten_dict(params))
else:
return random_params
def init_cache(self, batch_size, max_length):
r"""
Args:
batch_size (`int`):
batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
max_length (`int`):
maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
cache.
"""
# init input variables to retrieve cache
input_ids = jnp.ones((batch_size, max_length))
attention_mask = jnp.ones_like(input_ids)
position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
init_variables = self.module.init(
jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
)
return init_variables["cache"]
@add_start_docstrings_to_model_forward("")
def __call__(
self,
input_ids,
attention_mask=None,
position_ids=None,
params: dict = None,
past_key_values: dict = None,
dropout_rng: jax.random.PRNGKey = None,
train: bool = False,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
batch_size, sequence_length = input_ids.shape
if position_ids is None:
if past_key_values is not None:
raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.")
position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
if attention_mask is None:
attention_mask = jnp.ones((batch_size, sequence_length))
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPTJAttention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
outputs = self.module.apply(
inputs,
jnp.array(input_ids, dtype="i4"),
jnp.array(attention_mask, dtype="i4"),
jnp.array(position_ids, dtype="i4"),
not train,
False,
output_attentions,
output_hidden_states,
return_dict,
rngs=rngs,
mutable=mutable,
)
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs, past_key_values = outputs
outputs["past_key_values"] = unfreeze(past_key_values["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs, past_key_values = outputs
outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
return outputs
class FlaxLLaMABlockCollection(nn.Module):
config: LLaMAConfig
dtype: jnp.dtype = jnp.float32
param_dtype: jnp.dtype=jnp.float32
precision: Optional[Union[jax.lax.Precision, str]]=None
@nn.compact
def __call__(
self,
hidden_states,
attention_mask=None,
position_ids=None,
deterministic: bool = True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
):
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
if not deterministic and self.config.fcm_max_ratio > 0:
# Apply forgetful causal mask
batch_size, seq_length = hidden_states.shape[0], hidden_states.shape[1]
fcm_ratio = jax.random.uniform(
self.make_rng('fcm'), shape=(batch_size, 1, 1, 1),
minval=self.config.fcm_min_ratio,
maxval=self.config.fcm_max_ratio
)
fcm_mask = jax.random.uniform(
self.make_rng('fcm'),
shape=(batch_size, 1, seq_length, seq_length)
) > fcm_ratio
fcm_mask = fcm_mask.at[:, :, :, 0].set(True)
fcm_mask = fcm_mask.astype('bool')
else:
fcm_mask = None
block = FlaxLLaMABlock
if self.config.remat_block != '':
block = remat(
FlaxLLaMABlock, static_argnums=(3, 4, 5),
prevent_cse=not self.config.scan_layers,
policy=get_gradient_checkpoint_policy(self.config.remat_block)
)
if self.config.scan_layers:
initializing = self.is_mutable_collection('params')
params_spec = (
self.config.param_scan_axis if initializing else
nn_partitioning.ScanIn(self.config.param_scan_axis))
cache_spec = 0
hidden_states, _ = nn.scan(
block,
variable_axes={
'params': params_spec,
'cache': cache_spec,
'intermediates': 0
},
split_rngs={
'params': True,
'dropout': True
},
in_axes=(nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast),
length=self.config.num_hidden_layers,
metadata_params={nn.PARTITION_NAME: 'scan_decoder_layer'},
)(self.config, name='scan_decoder', dtype=self.dtype, param_dtype=self.param_dtype,)(
hidden_states,
attention_mask,
position_ids,
deterministic,
init_cache,
output_attentions,
fcm_mask,
)
else:
blocks = [
block(
self.config,
name=str(i),
dtype=self.dtype,
param_dtype=self.param_dtype,
) for i in range(self.config.num_hidden_layers)
]
for block in blocks:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = block(
hidden_states,
attention_mask,
position_ids,
deterministic,
init_cache,
output_attentions,
fcm_mask,
)
hidden_states = layer_outputs
if output_attentions:
all_attentions += (layer_outputs[1],)
# this contains possible `None` values - `FlaxGPTJModule` will filter them out
outputs = (hidden_states, all_hidden_states, all_attentions)
return outputs
class FlaxLLaMAModule(nn.Module):
config: LLaMAConfig
dtype: jnp.dtype = jnp.float32
param_dtype: jnp.dtype=jnp.float32
precision: Optional[Union[jax.lax.Precision, str]]=None
def setup(self):
self.embed_dim = self.config.hidden_size
self.wte = nn.Embed(
self.config.vocab_size,
self.config.hidden_size,
embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
dtype=self.dtype,
param_dtype=self.param_dtype,
)
self.dropout = nn.Dropout(rate=self.config.embd_pdrop)
self.h = FlaxLLaMABlockCollection(self.config, dtype=self.dtype, param_dtype=self.param_dtype, precision=self.precision)
self.ln_f = RMSNorm(self.config.hidden_size, eps=self.config.rms_norm_eps, dtype=self.dtype, param_dtype=self.param_dtype)
def __call__(
self,
input_ids,
attention_mask,
position_ids,
deterministic=True,
init_cache: bool = False,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
allow_permuted_outputs: bool = False,
):
seq_mesh_dim = self.config.get_mesh_dim_sizes()[-1]
input_ids = permute_tokens(self.config.attention_type, input_ids, seq_mesh_dim)
position_ids = permute_tokens(self.config.attention_type, position_ids, seq_mesh_dim)
attention_mask = permute_tokens(self.config.attention_type, attention_mask, seq_mesh_dim)
input_embeds = self.wte(input_ids.astype("i4"))
hidden_states = self.dropout(input_embeds, deterministic=deterministic)
outputs = self.h(
hidden_states,
attention_mask,
position_ids=position_ids,
deterministic=deterministic,
init_cache=init_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.ln_f(hidden_states)
if not allow_permuted_outputs: | hidden_states = unpermute_outputs(self.config.attention_type, hidden_states, seq_mesh_dim) | 4 | 2023-10-24 02:01:18+00:00 | 8k |
brandonrobertz/reason-act-sqlite-py | benchmark_runner.py | [
{
"identifier": "get_keyword_matches",
"path": "metrics.py",
"snippet": "def get_keyword_matches(result, correct_keywords, return_texts=False):\n match_texts = []\n matches = 0\n if not result:\n if return_texts:\n return matches, match_texts\n return matches\n for k... | from datetime import datetime
from nltk.corpus import stopwords
from nltk import download
from yaml import load, dump
from yaml import CLoader as Loader, CDumper as Dumper
from yaml import Loader, Dumper
from metrics import get_keyword_matches
from llm_sql_queries import execute
from llm_openai_sql_queries import execute as execute_openai
import copy
import multiprocessing
import json
import os
import re
import sys
import time
import numpy as np
import pymeteor.pymeteor as pymeteor
import spacy | 4,589 | q2 = " ".join(preprocess(injectable["question"]))
inj_question_vec = nlp(q2)
sim = inj_question_vec.similarity(question_vec)
print(sim, "Q:", q1, "Q2:", q2)
if sim > best[0]:
best = [sim, injectable["prompt"]]
return best[1]
def maybe_inject_prompts(prompt_data, question, injectables=None):
new_prompt_data = copy.deepcopy(prompt_data)
if not USE_EXAMPLE_INJECTION:
return new_prompt_data
if not injectables:
return new_prompt_data
if not nlp:
return new_prompt_data
similar_injectable = best_matching_injectable(question, injectables)
# first: truncate the examples by looking for the inject_before: True
# on the prompt items
truncate_at = None
for i, item in enumerate(new_prompt_data):
if item.get("inject_before"):
truncate_at = i
break
if truncate_at is None:
return new_prompt_data
# This also cuts off the final part, we need to fix that
truncated_prompt_data = new_prompt_data[:i] + similar_injectable
# append the question now
truncated_prompt_data.append(new_prompt_data[-1])
return truncated_prompt_data
def prompt_data_to_openai(prompt_data, question, injectables=None):
prompt_completed = maybe_inject_prompts(
prompt_data, question, injectables=injectables
)
prompt_completed[-1]["content"] = prompt_completed[-1]["content"].format(
question=question
)
print("Final instruction in prepared prompt:", prompt_completed[-1])
# clean up the prompt because openAI explodes if any unexpected keys
# are supplied
openai_allowed_keys = ["role", "content"]
finalized_prompt = [
{k: v for k, v in item.items() if k in openai_allowed_keys}
for item in prompt_completed
]
return finalized_prompt
def prompt_data_to_raw(prompt_data, question, injectables=None):
prompt_completed = maybe_inject_prompts(prompt_data, question, injectables=injectables)
prompt_raw = ""
for item in prompt_completed:
line = item["content"].format(question=question)
prompt_raw += line
prompt_raw += "\n"
if "Final Answer:" in line:
prompt_raw += "\n"
return prompt_raw.strip()
def prompt_data_to_chatml(prompt_data, question, injectables=None):
prompt_completed = maybe_inject_prompts(prompt_data, question, injectables=injectables)
prompt_raw = ""
last_item = len(prompt_completed) - 1
for i, item in enumerate(prompt_completed):
line = item["content"].format(question=question).strip()
if item["role"] == "system":
prompt_raw += "<|im_start|>system\n"
prompt_raw += f"{line}\n<|im_end|>\n"
if item["role"] == "assistant":
prompt_raw += "<|im_start|>system name=example_assistant\n"
prompt_raw += f"{line}\n<|im_end|>\n"
if "Final Answer: " in line:
prompt_raw += "\n"
if item["role"] == "user" and i != (last_item):
prompt_raw += "<|im_start|>system name=example_user\n"
prompt_raw += f"{line}\n<|im_end|>\n"
# the final one is the question with the lead out for completion
if item["role"] == "user" and i == (last_item):
prompt_raw += "<|im_start|>user\n"
prompt_raw += f"{line}\n<|im_end|>\n"
prompt_raw += "<|im_start|>assistant\n"
prompt_raw += "Thought: "
return prompt_raw.strip()
def get_model_name(model_file):
model_name=re.sub('[^A-Za-z0-9\-_]+', "_", os.path.basename(model_file))
return model_name
def get_tracefile(model_file):
model_name = get_model_name(model_file)
now=datetime.now().strftime("%Y-%m-%d_%H:%M:%S.%f")
tracefile = f"./traces/experiment_{model_name}_{now}.log"
return tracefile
def run_llm(*args, timeout=30*60, **kwargs):
# shared dict for transferring results back from the proc
manager = multiprocessing.Manager()
return_dict = manager.dict()
kwargs["return_dict"] = return_dict
execute_fn = execute
if args[0].startswith("openai:"):
| #!/usr/bin/env python
try:
except ImportError:
USE_EXAMPLE_INJECTION = True
# HACK: globals
nlp = None
stop_words = None
def load_yml_file(filename):
with open(filename, "r") as f:
return load(f, Loader=Loader)
def preprocess(sentence):
return [w for w in sentence.lower().split() if w not in stop_words]
def best_matching_injectable(question, injectables):
best = [0.0, injectables[0]["prompt"]]
q1 = " ".join(preprocess(question))
question_vec = nlp(q1)
for injectable in injectables:
q2 = " ".join(preprocess(injectable["question"]))
inj_question_vec = nlp(q2)
sim = inj_question_vec.similarity(question_vec)
print(sim, "Q:", q1, "Q2:", q2)
if sim > best[0]:
best = [sim, injectable["prompt"]]
return best[1]
def maybe_inject_prompts(prompt_data, question, injectables=None):
new_prompt_data = copy.deepcopy(prompt_data)
if not USE_EXAMPLE_INJECTION:
return new_prompt_data
if not injectables:
return new_prompt_data
if not nlp:
return new_prompt_data
similar_injectable = best_matching_injectable(question, injectables)
# first: truncate the examples by looking for the inject_before: True
# on the prompt items
truncate_at = None
for i, item in enumerate(new_prompt_data):
if item.get("inject_before"):
truncate_at = i
break
if truncate_at is None:
return new_prompt_data
# This also cuts off the final part, we need to fix that
truncated_prompt_data = new_prompt_data[:i] + similar_injectable
# append the question now
truncated_prompt_data.append(new_prompt_data[-1])
return truncated_prompt_data
def prompt_data_to_openai(prompt_data, question, injectables=None):
prompt_completed = maybe_inject_prompts(
prompt_data, question, injectables=injectables
)
prompt_completed[-1]["content"] = prompt_completed[-1]["content"].format(
question=question
)
print("Final instruction in prepared prompt:", prompt_completed[-1])
# clean up the prompt because openAI explodes if any unexpected keys
# are supplied
openai_allowed_keys = ["role", "content"]
finalized_prompt = [
{k: v for k, v in item.items() if k in openai_allowed_keys}
for item in prompt_completed
]
return finalized_prompt
def prompt_data_to_raw(prompt_data, question, injectables=None):
prompt_completed = maybe_inject_prompts(prompt_data, question, injectables=injectables)
prompt_raw = ""
for item in prompt_completed:
line = item["content"].format(question=question)
prompt_raw += line
prompt_raw += "\n"
if "Final Answer:" in line:
prompt_raw += "\n"
return prompt_raw.strip()
def prompt_data_to_chatml(prompt_data, question, injectables=None):
prompt_completed = maybe_inject_prompts(prompt_data, question, injectables=injectables)
prompt_raw = ""
last_item = len(prompt_completed) - 1
for i, item in enumerate(prompt_completed):
line = item["content"].format(question=question).strip()
if item["role"] == "system":
prompt_raw += "<|im_start|>system\n"
prompt_raw += f"{line}\n<|im_end|>\n"
if item["role"] == "assistant":
prompt_raw += "<|im_start|>system name=example_assistant\n"
prompt_raw += f"{line}\n<|im_end|>\n"
if "Final Answer: " in line:
prompt_raw += "\n"
if item["role"] == "user" and i != (last_item):
prompt_raw += "<|im_start|>system name=example_user\n"
prompt_raw += f"{line}\n<|im_end|>\n"
# the final one is the question with the lead out for completion
if item["role"] == "user" and i == (last_item):
prompt_raw += "<|im_start|>user\n"
prompt_raw += f"{line}\n<|im_end|>\n"
prompt_raw += "<|im_start|>assistant\n"
prompt_raw += "Thought: "
return prompt_raw.strip()
def get_model_name(model_file):
model_name=re.sub('[^A-Za-z0-9\-_]+', "_", os.path.basename(model_file))
return model_name
def get_tracefile(model_file):
model_name = get_model_name(model_file)
now=datetime.now().strftime("%Y-%m-%d_%H:%M:%S.%f")
tracefile = f"./traces/experiment_{model_name}_{now}.log"
return tracefile
def run_llm(*args, timeout=30*60, **kwargs):
# shared dict for transferring results back from the proc
manager = multiprocessing.Manager()
return_dict = manager.dict()
kwargs["return_dict"] = return_dict
execute_fn = execute
if args[0].startswith("openai:"): | execute_fn = execute_openai | 2 | 2023-10-15 04:30:30+00:00 | 8k |
sehyun03/MulActSeg | trainer/active_onlinewplblonly_multi_predignore.py | [
{
"identifier": "active_onlineplbl_multi_predignore",
"path": "trainer/active_onlineplbl_multi_predignore.py",
"snippet": "class LocalProtoCE(nn.Module):\nclass ActiveTrainer(active_joint_multi_predignore.ActiveTrainer):\n def __init__(self, args, num_superpixel, temperature=1.0, reduction='mean'):\n... | import torch
import numpy as np
import torch.nn.functional as F
from tqdm import tqdm
from torch import nn
from torch_scatter import scatter, scatter_max
from trainer import active_onlineplbl_multi_predignore
from trainer.active_joint_multi_predignore import MultiChoiceCE_
from trainer.active_onlineplbl_multi_predignore import LocalProtoCE | 4,581 |
def generate_plbl(self, inputs_plbl, feats_plbl, targets, superpixels, spmasks):
r"""
Args::
inputs_plbl: NxCxHxW
feats_plbl: NxChannelxHxW
targets: N x self.num_superpixel x C+1
superpixels: NxHxW
spmasks: NxHxW
Returns::
nn_plbl: N x HW x1
"""
N, C, H, W = inputs_plbl.shape
outputs = F.softmax(inputs_plbl / self.temp, dim=1) ### N x C x H x W
outputs = outputs.permute(0,2,3,1).reshape(N, -1, C) ### N x HW x C
_, Ch, _, _ = feats_plbl.shape
feats_plbl = feats_plbl.permute(0,2,3,1).reshape(N, -1, Ch) ### N x HW x Ch
superpixels = superpixels.reshape(N, -1, 1) ### N x HW x 1
spmasks = spmasks.reshape(N, -1) ### N x HW
is_trg_multi = (1 < targets.sum(dim=2)) ### N x self.num_superpixel
r''' goal: generate pseudo label for multi-hot superpixels '''
nn_plbl = torch.ones_like(superpixels).squeeze(dim=2) * 255 ### N x HW x 1
weight = torch.zeros_like(feats_plbl[..., 0]) ### N x HW
for i in range(N):
'''
outputs[i] ### HW x C
feats_plbl[i] : HW x Ch
superpixels[i] ### HW x 1
targets[i] : self.num_superpiexl x C
spmasks[i] ### HW x 1
'''
multi_hot_target = targets[i] ### self.num_superpixel x C
r''' valid mask (== spmasks && multi_mask) filtering outputs '''
### spmasks 에 안걸러졌기 때문에 superpixels[i] 는 invalid spx id 를 포함할 수 있음.
if not torch.any(spmasks[i]):
continue
multi_mask = is_trg_multi[i][superpixels[i].squeeze(dim=1)[spmasks[i]]].detach()
valid_mask = spmasks[i].clone()
valid_mask[spmasks[i]] = multi_mask
valid_output = outputs[i][valid_mask] ### HW' x C : class-wise prediction 중 valid 한 영역
vpx_superpixel = superpixels[i][valid_mask] ### HW' x 1 : superpixel id 중 valid 한 ID
valid_feat = feats_plbl[i][valid_mask] ### HW' x Ch
r''' get max pixel for each class within superpixel '''
_, vdx_sup_mxpool = scatter_max(valid_output, vpx_superpixel, dim=0, dim_size=self.args.nseg)
### ㄴ self.num_superpixel x C: 각 (superpixel, class) pair 의 max 값을 가지는 index
r''' filter invalid && single superpixels '''
is_spx_valid = vdx_sup_mxpool[:,0] < valid_output.shape[0]
### ㄴ vpx_superpixel 에 포함되지 않은 superpixel id 에 대해서는 max index 가
### valid_output index 최대값 (==크기)로 잡힘. 이 값을 통해 쓸모없는 spx filtering
vdx_vsup_mxpool = vdx_sup_mxpool[is_spx_valid]
### ㄴ nvalidseg x C : index of max pixel for each class (for valid spx)
trg_vsup_mxpool = multi_hot_target[is_spx_valid]
### ㄴ nvalidseg x C : multi-hot label (for valid spx)
r''' Index conversion (valid pixel -> pixel) '''
validex_to_pixdex = valid_mask.nonzero().squeeze(dim=1)
### ㄴ translate valid_pixel -> pixel space
vspxdex, vcdex = trg_vsup_mxpool.nonzero(as_tuple=True)
### ㄴ valid superpixel index && valid class index
top1_vdx = vdx_vsup_mxpool[vspxdex, vcdex]
### ㄴ vdx_sup_mxpool 중에서 valid 한 superpixel 과 target 에서의 valid index
# top1_pdx = validex_to_pixdex[top1_vdx]
# ### ㄴ max index 들을 pixel space 로 변환
r''' Inner product between prototype features & superpixel features '''
prototypes = valid_feat[top1_vdx]
### ㄴ nproto x Ch
similarity = torch.mm(prototypes, valid_feat.T)
### ㄴ nproto x nvalid_pixels: 각 prototype 과 모든 valid pixel feature 사이의 유사도
r''' Nearest prototype selection '''
_, idx_mxproto_pxl = scatter_max(similarity, vspxdex, dim=0)
### ㄴ nvalidspx x nvalid_pixels: pixel 별 가장 유사한 prototype id
r''' Assign pseudo label of according prototype
- idx_mxproto_pxl 중에서 각 pixel 이 해당하는 superpixel superpixel 의 값을 얻기
- 이를 위해 우선 (superpixel -> valid superpixel)로 index conversion 을 만듦
- pixel 별 superpixel id 를 pixel 별 valid superpixel id 로 변환 (=nearest_vspdex)
- 각 valid superpixel 의 label 로 pseudo label assign (=plbl_vdx)
- pseudo label map 의 해당 pixel 에 valid pixel 별 pseudo label 할당 (nn_plbl)
'''
spdex_to_vspdex = torch.ones_like(is_spx_valid) * -1
spdex_to_vspdex[is_spx_valid] = torch.unique(vspxdex)
vspdex_superpixel = spdex_to_vspdex[vpx_superpixel.squeeze(dim=1)]
### ㄴ HW': 여기 vpx_superpixel 의 id value 는 superpixel 의 id 이다. 이를 통해 valid superpixel idex conversion
nearest_vspdex = idx_mxproto_pxl.T[torch.arange(vspdex_superpixel.shape[0]), vspdex_superpixel]
plbl_vdx = vcdex[nearest_vspdex]
nn_plbl[i, validex_to_pixdex] = plbl_vdx
weight[i, validex_to_pixdex] = outputs[i, validex_to_pixdex][torch.arange(plbl_vdx.shape[0]), plbl_vdx]
nn_plbl = nn_plbl.reshape(N, H, W)
weight = weight.reshape(N, H, W)
return weight, nn_plbl
def forward(self, inputs_plbl, feats_plbl, inputs, targets, superpixels, spmasks):
r"""
Args::
inputs: N x C x H x W
nn_plbl: N x H x W
"""
with torch.no_grad():
weight, nn_plbl = self.generate_plbl(inputs_plbl, feats_plbl, targets, superpixels, spmasks)
r''' CE loss between plbl and prediction '''
loss = weight * self.cross_entropy(inputs / self.temp, nn_plbl)
loss = torch.masked_select(loss, loss != 0).mean()
return loss
|
r""" online pseudo labeling with local prototype-based pseudo labeling.
- Additional weighting on the pseudo label using model predicted probability.
"""
class JointLocalProtoCE(LocalProtoCE):
def __init__(self, args, num_superpixel, temperature=1.0, reduction='mean'):
super().__init__(args, num_superpixel, temperature, reduction)
self.cross_entropy = nn.CrossEntropyLoss(ignore_index=255, reduction='none')
def generate_plbl(self, inputs_plbl, feats_plbl, targets, superpixels, spmasks):
r"""
Args::
inputs_plbl: NxCxHxW
feats_plbl: NxChannelxHxW
targets: N x self.num_superpixel x C+1
superpixels: NxHxW
spmasks: NxHxW
Returns::
nn_plbl: N x HW x1
"""
N, C, H, W = inputs_plbl.shape
outputs = F.softmax(inputs_plbl / self.temp, dim=1) ### N x C x H x W
outputs = outputs.permute(0,2,3,1).reshape(N, -1, C) ### N x HW x C
_, Ch, _, _ = feats_plbl.shape
feats_plbl = feats_plbl.permute(0,2,3,1).reshape(N, -1, Ch) ### N x HW x Ch
superpixels = superpixels.reshape(N, -1, 1) ### N x HW x 1
spmasks = spmasks.reshape(N, -1) ### N x HW
is_trg_multi = (1 < targets.sum(dim=2)) ### N x self.num_superpixel
r''' goal: generate pseudo label for multi-hot superpixels '''
nn_plbl = torch.ones_like(superpixels).squeeze(dim=2) * 255 ### N x HW x 1
weight = torch.zeros_like(feats_plbl[..., 0]) ### N x HW
for i in range(N):
'''
outputs[i] ### HW x C
feats_plbl[i] : HW x Ch
superpixels[i] ### HW x 1
targets[i] : self.num_superpiexl x C
spmasks[i] ### HW x 1
'''
multi_hot_target = targets[i] ### self.num_superpixel x C
r''' valid mask (== spmasks && multi_mask) filtering outputs '''
### spmasks 에 안걸러졌기 때문에 superpixels[i] 는 invalid spx id 를 포함할 수 있음.
if not torch.any(spmasks[i]):
continue
multi_mask = is_trg_multi[i][superpixels[i].squeeze(dim=1)[spmasks[i]]].detach()
valid_mask = spmasks[i].clone()
valid_mask[spmasks[i]] = multi_mask
valid_output = outputs[i][valid_mask] ### HW' x C : class-wise prediction 중 valid 한 영역
vpx_superpixel = superpixels[i][valid_mask] ### HW' x 1 : superpixel id 중 valid 한 ID
valid_feat = feats_plbl[i][valid_mask] ### HW' x Ch
r''' get max pixel for each class within superpixel '''
_, vdx_sup_mxpool = scatter_max(valid_output, vpx_superpixel, dim=0, dim_size=self.args.nseg)
### ㄴ self.num_superpixel x C: 각 (superpixel, class) pair 의 max 값을 가지는 index
r''' filter invalid && single superpixels '''
is_spx_valid = vdx_sup_mxpool[:,0] < valid_output.shape[0]
### ㄴ vpx_superpixel 에 포함되지 않은 superpixel id 에 대해서는 max index 가
### valid_output index 최대값 (==크기)로 잡힘. 이 값을 통해 쓸모없는 spx filtering
vdx_vsup_mxpool = vdx_sup_mxpool[is_spx_valid]
### ㄴ nvalidseg x C : index of max pixel for each class (for valid spx)
trg_vsup_mxpool = multi_hot_target[is_spx_valid]
### ㄴ nvalidseg x C : multi-hot label (for valid spx)
r''' Index conversion (valid pixel -> pixel) '''
validex_to_pixdex = valid_mask.nonzero().squeeze(dim=1)
### ㄴ translate valid_pixel -> pixel space
vspxdex, vcdex = trg_vsup_mxpool.nonzero(as_tuple=True)
### ㄴ valid superpixel index && valid class index
top1_vdx = vdx_vsup_mxpool[vspxdex, vcdex]
### ㄴ vdx_sup_mxpool 중에서 valid 한 superpixel 과 target 에서의 valid index
# top1_pdx = validex_to_pixdex[top1_vdx]
# ### ㄴ max index 들을 pixel space 로 변환
r''' Inner product between prototype features & superpixel features '''
prototypes = valid_feat[top1_vdx]
### ㄴ nproto x Ch
similarity = torch.mm(prototypes, valid_feat.T)
### ㄴ nproto x nvalid_pixels: 각 prototype 과 모든 valid pixel feature 사이의 유사도
r''' Nearest prototype selection '''
_, idx_mxproto_pxl = scatter_max(similarity, vspxdex, dim=0)
### ㄴ nvalidspx x nvalid_pixels: pixel 별 가장 유사한 prototype id
r''' Assign pseudo label of according prototype
- idx_mxproto_pxl 중에서 각 pixel 이 해당하는 superpixel superpixel 의 값을 얻기
- 이를 위해 우선 (superpixel -> valid superpixel)로 index conversion 을 만듦
- pixel 별 superpixel id 를 pixel 별 valid superpixel id 로 변환 (=nearest_vspdex)
- 각 valid superpixel 의 label 로 pseudo label assign (=plbl_vdx)
- pseudo label map 의 해당 pixel 에 valid pixel 별 pseudo label 할당 (nn_plbl)
'''
spdex_to_vspdex = torch.ones_like(is_spx_valid) * -1
spdex_to_vspdex[is_spx_valid] = torch.unique(vspxdex)
vspdex_superpixel = spdex_to_vspdex[vpx_superpixel.squeeze(dim=1)]
### ㄴ HW': 여기 vpx_superpixel 의 id value 는 superpixel 의 id 이다. 이를 통해 valid superpixel idex conversion
nearest_vspdex = idx_mxproto_pxl.T[torch.arange(vspdex_superpixel.shape[0]), vspdex_superpixel]
plbl_vdx = vcdex[nearest_vspdex]
nn_plbl[i, validex_to_pixdex] = plbl_vdx
weight[i, validex_to_pixdex] = outputs[i, validex_to_pixdex][torch.arange(plbl_vdx.shape[0]), plbl_vdx]
nn_plbl = nn_plbl.reshape(N, H, W)
weight = weight.reshape(N, H, W)
return weight, nn_plbl
def forward(self, inputs_plbl, feats_plbl, inputs, targets, superpixels, spmasks):
r"""
Args::
inputs: N x C x H x W
nn_plbl: N x H x W
"""
with torch.no_grad():
weight, nn_plbl = self.generate_plbl(inputs_plbl, feats_plbl, targets, superpixels, spmasks)
r''' CE loss between plbl and prediction '''
loss = weight * self.cross_entropy(inputs / self.temp, nn_plbl)
loss = torch.masked_select(loss, loss != 0).mean()
return loss
| class ActiveTrainer(active_onlineplbl_multi_predignore.ActiveTrainer): | 0 | 2023-10-24 09:19:58+00:00 | 8k |
justincui03/tesla | distill.py | [
{
"identifier": "augment",
"path": "utils.py",
"snippet": "def augment(images, dc_aug_param, device):\n # This can be sped up in the future.\n\n if dc_aug_param != None and dc_aug_param['strategy'] != 'none':\n scale = dc_aug_param['scale']\n crop = dc_aug_param['crop']\n rota... | import os
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.utils
import wandb
import copy
import random
import warnings
from tqdm import tqdm
from utils import augment, get_dataset, get_network, get_eval_pool, evaluate_synset, get_time, DiffAugment, DiffAugmentList, ParamDiffAug
from reparam_module import ReparamModule
from torch.utils.data import Subset
from torch.utils.data import DataLoader
from PIL import PngImagePlugin | 6,513 |
LARGE_ENOUGH_NUMBER = 100
PngImagePlugin.MAX_TEXT_CHUNK = LARGE_ENOUGH_NUMBER * (1024**2)
warnings.filterwarnings("ignore", category=DeprecationWarning)
def main(args):
if args.zca and args.texture:
raise AssertionError("Cannot use zca and texture together")
if args.texture and args.pix_init == "real":
print("WARNING: Using texture with real initialization will take a very long time to smooth out the boundaries between images.")
if args.max_experts is not None and args.max_files is not None:
args.total_experts = args.max_experts * args.max_files
print("CUDNN STATUS: {}".format(torch.backends.cudnn.enabled))
args.dsa = True if args.dsa == 'True' else False
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
eval_it_pool = np.arange(0, args.Iteration + 1, args.eval_it).tolist()
|
LARGE_ENOUGH_NUMBER = 100
PngImagePlugin.MAX_TEXT_CHUNK = LARGE_ENOUGH_NUMBER * (1024**2)
warnings.filterwarnings("ignore", category=DeprecationWarning)
def main(args):
if args.zca and args.texture:
raise AssertionError("Cannot use zca and texture together")
if args.texture and args.pix_init == "real":
print("WARNING: Using texture with real initialization will take a very long time to smooth out the boundaries between images.")
if args.max_experts is not None and args.max_files is not None:
args.total_experts = args.max_experts * args.max_files
print("CUDNN STATUS: {}".format(torch.backends.cudnn.enabled))
args.dsa = True if args.dsa == 'True' else False
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
eval_it_pool = np.arange(0, args.Iteration + 1, args.eval_it).tolist() | channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader, loader_train_dict, class_map, class_map_inv = get_dataset(args.dataset, args.data_path, args.batch_real, args=args) | 1 | 2023-10-17 23:11:36+00:00 | 8k |
biggzlar/plausible-uncertainties | train_multivariate.py | [
{
"identifier": "get_device",
"path": "utils.py",
"snippet": "def get_device():\n return torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")"
},
{
"identifier": "MultivariateDummyData",
"path": "utils.py",
"snippet": "class MultivariateDummyData:\n\tdef __init__(self, N, ... | import tqdm
import torch
import pickle
import numpy as np
import mpl_toolkits.mplot3d.art3d as art3d
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse, Rectangle
from utils import get_device, MultivariateDummyData, get_predicted_cdf
from evidential_regression.networks import MultivariateDerNet
from evidential_regression.losses import MultivariateEvidentialRegressionLoss
from mle_mc_dropout.networks import MultivariateKenNet
from mle_mc_dropout.losses import MultivariateGaussianNLL | 3,881 | """
lambda_, v = np.linalg.eig(cov)
lambda_ = np.minimum(np.sqrt(lambda_), [10.])
ellipse = Ellipse((y, z), width=lambda_[0] * 3 * 2, height=lambda_[1] * 3 * 2,
angle=np.rad2deg(np.arccos(v[0, 0])), **kwargs)
ax.add_patch(ellipse)
art3d.pathpatch_2d_to_3d(ellipse, z=x, zdir="x")
return
if __name__ == "__main__":
device = get_device()
print(f"Working on {device}!")
cmap = plt.cm.bone_r
EPOCHS = 200
in_lower = -10.0
in_upper = 4.0
out_lower = -20.0
out_upper = 10.0
train_data = MultivariateDummyData(N=8000, X_range=(in_lower, in_upper))
test_data = MultivariateDummyData(N=200, X_range=(out_lower, out_upper))
train_loader = torch.utils.data.DataLoader(train_data, batch_size=128, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=32)
test_YZ = np.concatenate([np.expand_dims(test_data.Y, axis=1), np.expand_dims(test_data.Z, axis=1)], axis=-1)
optimizer_params = {
"lr": 1e-03,
"betas": (0.9, 0.999),
"eps": 1e-8,
"weight_decay": 1e-2,
"amsgrad": False}
# choice of model/method
net = MultivariateDerNet(p=2)
net.to(device)
criterion = MultivariateEvidentialRegressionLoss()
# net = MultivariateKenNet(p=2)
# criterion = MultivariateGaussianNLL()
optimizer = torch.optim.AdamW(net.parameters(), **optimizer_params)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=optimizer_params["lr"], steps_per_epoch=len(train_loader), epochs=EPOCHS)
losses = []
t = tqdm.trange(EPOCHS)
for i in t:
net.train()
for (x_batch, y_batch, z_batch) in train_loader:
inputs = x_batch.to(device)
labels = torch.concat([y_batch, z_batch], dim=-1).to(device)
optimizer.zero_grad()
outs = net(inputs)
loss = criterion(labels, *outs)
loss.backward()
optimizer.step()
scheduler.step()
net.eval()
mu, aleatoric, epistemic, meta_aleatoric, output_params = net.get_prediction(torch.Tensor(np.expand_dims(test_data.X, axis=1)).to(device))
t.set_description(f"val. loss: {loss.detach().cpu().numpy():.2f}")
t.refresh()
losses += [loss.detach().cpu().numpy()]
""" Visualizing the experiment
"""
ax = plt.axes(projection="3d")
ax.scatter3D(test_data.X, test_data.Y, test_data.Z, marker="+", color="black")
# plot in-distribution limits
rect0 = Rectangle((-20, -20), 40, 40, fill=False, hatch="X")
ax.add_patch(rect0)
art3d.pathpatch_2d_to_3d(rect0, z=in_lower, zdir="x")
rect1 = Rectangle((-20, -20), 40, 40, fill=False, hatch="X")
ax.add_patch(rect1)
art3d.pathpatch_2d_to_3d(rect1, z=in_upper, zdir="x")
# plot aleatoric (and epistemic) uncertainty
for j in range(len(test_data)):
confidence_ellipse(test_data.X[j], mu[j, 0], mu[j, 1], aleatoric[j], ax,
facecolor=cmap(j / len(test_data)), edgecolor=None, alpha=0.3)
# plot predicted function
plt.plot(test_data.X, mu[:, 0], mu[:, 1], color="black", label="$\hat \mu$")
# plot ground truth function
plt.plot(test_data.X, test_data.X * np.sin(test_data.X), test_data.X * np.cos(test_data.X), color="#88888880", label="true mean")
# # plot ground truth aleatoric uncertainty
# for x in test_data.X:
# confidence_ellipse(x, x * np.sin(x), x * np.cos(x), x * 0.3 * np.array([[0.8, -0.3], [-0.3, 0.8]]), ax,
# fill=None, edgecolor="black", linestyle="--")
fig = plt.gcf()
ax.set_xlim(out_lower, out_upper)
ax.set_ylim(-20, 20)
ax.set_zlim(-20, 20)
ax.locator_params(axis="x", nbins=5)
ax.locator_params(axis="y", nbins=5)
ax.locator_params(axis="z", nbins=5)
plt.tight_layout()
# plt.legend()
pickle.dump(fig, open("mv_der.fig.pickle", "wb"))
plt.show()
plt.clf()
""" Creating and plotting calibration plots
"""
in_YZ = test_YZ[np.logical_and(test_data.X > in_lower, test_data.X < in_upper)]
in_mu = mu[np.logical_and(test_data.X > in_lower, test_data.X < in_upper)]
in_al = aleatoric[np.logical_and(test_data.X > in_lower, test_data.X < in_upper)]
|
# plot settings
plt.rcParams.update(
{
"font.size": 12,
"text.usetex": False,
"font.family": "stixgeneral",
"mathtext.fontset": "stix",
}
)
def confidence_ellipse(x, y, z, cov, ax, n_std=1.0, **kwargs):
""" Method to draw 2d ellipses in 3d plots.
"""
lambda_, v = np.linalg.eig(cov)
lambda_ = np.minimum(np.sqrt(lambda_), [10.])
ellipse = Ellipse((y, z), width=lambda_[0] * 3 * 2, height=lambda_[1] * 3 * 2,
angle=np.rad2deg(np.arccos(v[0, 0])), **kwargs)
ax.add_patch(ellipse)
art3d.pathpatch_2d_to_3d(ellipse, z=x, zdir="x")
return
if __name__ == "__main__":
device = get_device()
print(f"Working on {device}!")
cmap = plt.cm.bone_r
EPOCHS = 200
in_lower = -10.0
in_upper = 4.0
out_lower = -20.0
out_upper = 10.0
train_data = MultivariateDummyData(N=8000, X_range=(in_lower, in_upper))
test_data = MultivariateDummyData(N=200, X_range=(out_lower, out_upper))
train_loader = torch.utils.data.DataLoader(train_data, batch_size=128, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=32)
test_YZ = np.concatenate([np.expand_dims(test_data.Y, axis=1), np.expand_dims(test_data.Z, axis=1)], axis=-1)
optimizer_params = {
"lr": 1e-03,
"betas": (0.9, 0.999),
"eps": 1e-8,
"weight_decay": 1e-2,
"amsgrad": False}
# choice of model/method
net = MultivariateDerNet(p=2)
net.to(device)
criterion = MultivariateEvidentialRegressionLoss()
# net = MultivariateKenNet(p=2)
# criterion = MultivariateGaussianNLL()
optimizer = torch.optim.AdamW(net.parameters(), **optimizer_params)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=optimizer_params["lr"], steps_per_epoch=len(train_loader), epochs=EPOCHS)
losses = []
t = tqdm.trange(EPOCHS)
for i in t:
net.train()
for (x_batch, y_batch, z_batch) in train_loader:
inputs = x_batch.to(device)
labels = torch.concat([y_batch, z_batch], dim=-1).to(device)
optimizer.zero_grad()
outs = net(inputs)
loss = criterion(labels, *outs)
loss.backward()
optimizer.step()
scheduler.step()
net.eval()
mu, aleatoric, epistemic, meta_aleatoric, output_params = net.get_prediction(torch.Tensor(np.expand_dims(test_data.X, axis=1)).to(device))
t.set_description(f"val. loss: {loss.detach().cpu().numpy():.2f}")
t.refresh()
losses += [loss.detach().cpu().numpy()]
""" Visualizing the experiment
"""
ax = plt.axes(projection="3d")
ax.scatter3D(test_data.X, test_data.Y, test_data.Z, marker="+", color="black")
# plot in-distribution limits
rect0 = Rectangle((-20, -20), 40, 40, fill=False, hatch="X")
ax.add_patch(rect0)
art3d.pathpatch_2d_to_3d(rect0, z=in_lower, zdir="x")
rect1 = Rectangle((-20, -20), 40, 40, fill=False, hatch="X")
ax.add_patch(rect1)
art3d.pathpatch_2d_to_3d(rect1, z=in_upper, zdir="x")
# plot aleatoric (and epistemic) uncertainty
for j in range(len(test_data)):
confidence_ellipse(test_data.X[j], mu[j, 0], mu[j, 1], aleatoric[j], ax,
facecolor=cmap(j / len(test_data)), edgecolor=None, alpha=0.3)
# plot predicted function
plt.plot(test_data.X, mu[:, 0], mu[:, 1], color="black", label="$\hat \mu$")
# plot ground truth function
plt.plot(test_data.X, test_data.X * np.sin(test_data.X), test_data.X * np.cos(test_data.X), color="#88888880", label="true mean")
# # plot ground truth aleatoric uncertainty
# for x in test_data.X:
# confidence_ellipse(x, x * np.sin(x), x * np.cos(x), x * 0.3 * np.array([[0.8, -0.3], [-0.3, 0.8]]), ax,
# fill=None, edgecolor="black", linestyle="--")
fig = plt.gcf()
ax.set_xlim(out_lower, out_upper)
ax.set_ylim(-20, 20)
ax.set_zlim(-20, 20)
ax.locator_params(axis="x", nbins=5)
ax.locator_params(axis="y", nbins=5)
ax.locator_params(axis="z", nbins=5)
plt.tight_layout()
# plt.legend()
pickle.dump(fig, open("mv_der.fig.pickle", "wb"))
plt.show()
plt.clf()
""" Creating and plotting calibration plots
"""
in_YZ = test_YZ[np.logical_and(test_data.X > in_lower, test_data.X < in_upper)]
in_mu = mu[np.logical_and(test_data.X > in_lower, test_data.X < in_upper)]
in_al = aleatoric[np.logical_and(test_data.X > in_lower, test_data.X < in_upper)] | pcdf = get_predicted_cdf(residuals=in_mu - in_YZ, sigma=np.diagonal(in_al, axis1=-2, axis2=-1)) | 2 | 2023-10-19 08:44:08+00:00 | 8k |
avilliai/Bert_Vits2_Sever | modules.py | [
{
"identifier": "init_weights",
"path": "commons.py",
"snippet": "def init_weights(m, mean=0.0, std=0.01):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n m.weight.data.normal_(mean, std)"
},
{
"identifier": "get_padding",
"path": "commons.py",
"snippet": "... | import copy
import math
import numpy as np
import scipy
import torch
import commons
from torch import nn
from torch.nn import functional as F
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
from torch.nn.utils import weight_norm, remove_weight_norm
from commons import init_weights, get_padding
from transforms import piecewise_rational_quadratic_transform
from attentions import Encoder | 3,963 | remove_weight_norm(l)
class Log(nn.Module):
def forward(self, x, x_mask, reverse=False, **kwargs):
if not reverse:
y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
logdet = torch.sum(-y, [1, 2])
return y, logdet
else:
x = torch.exp(x) * x_mask
return x
class Flip(nn.Module):
def forward(self, x, *args, reverse=False, **kwargs):
x = torch.flip(x, [1])
if not reverse:
logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
return x, logdet
else:
return x
class ElementwiseAffine(nn.Module):
def __init__(self, channels):
super().__init__()
self.channels = channels
self.m = nn.Parameter(torch.zeros(channels,1))
self.logs = nn.Parameter(torch.zeros(channels,1))
def forward(self, x, x_mask, reverse=False, **kwargs):
if not reverse:
y = self.m + torch.exp(self.logs) * x
y = y * x_mask
logdet = torch.sum(self.logs * x_mask, [1,2])
return y, logdet
else:
x = (x - self.m) * torch.exp(-self.logs) * x_mask
return x
class ResidualCouplingLayer(nn.Module):
def __init__(self,
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=0,
gin_channels=0,
mean_only=False):
assert channels % 2 == 0, "channels should be divisible by 2"
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.half_channels = channels // 2
self.mean_only = mean_only
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
self.post.weight.data.zero_()
self.post.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels]*2, 1)
h = self.pre(x0) * x_mask
h = self.enc(h, x_mask, g=g)
stats = self.post(h) * x_mask
if not self.mean_only:
m, logs = torch.split(stats, [self.half_channels]*2, 1)
else:
m = stats
logs = torch.zeros_like(m)
if not reverse:
x1 = m + x1 * torch.exp(logs) * x_mask
x = torch.cat([x0, x1], 1)
logdet = torch.sum(logs, [1,2])
return x, logdet
else:
x1 = (x1 - m) * torch.exp(-logs) * x_mask
x = torch.cat([x0, x1], 1)
return x
class ConvFlow(nn.Module):
def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
super().__init__()
self.in_channels = in_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.num_bins = num_bins
self.tail_bound = tail_bound
self.half_channels = in_channels // 2
self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels]*2, 1)
h = self.pre(x0)
h = self.convs(h, x_mask, g=g)
h = self.proj(h) * x_mask
b, c, t = x0.shape
h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
unnormalized_derivatives = h[..., 2 * self.num_bins:]
|
LRELU_SLOPE = 0.1
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-5):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
x = x.transpose(1, -1)
x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
return x.transpose(1, -1)
class ConvReluNorm(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
assert n_layers > 1, "Number of layers should be larger than 0."
self.conv_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.relu_drop = nn.Sequential(
nn.ReLU(),
nn.Dropout(p_dropout))
for _ in range(n_layers-1):
self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask):
x_org = x
for i in range(self.n_layers):
x = self.conv_layers[i](x * x_mask)
x = self.norm_layers[i](x)
x = self.relu_drop(x)
x = x_org + self.proj(x)
return x * x_mask
class DDSConv(nn.Module):
"""
Dialted and Depth-Separable Convolution
"""
def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
super().__init__()
self.channels = channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
self.drop = nn.Dropout(p_dropout)
self.convs_sep = nn.ModuleList()
self.convs_1x1 = nn.ModuleList()
self.norms_1 = nn.ModuleList()
self.norms_2 = nn.ModuleList()
for i in range(n_layers):
dilation = kernel_size ** i
padding = (kernel_size * dilation - dilation) // 2
self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
groups=channels, dilation=dilation, padding=padding
))
self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
self.norms_1.append(LayerNorm(channels))
self.norms_2.append(LayerNorm(channels))
def forward(self, x, x_mask, g=None):
if g is not None:
x = x + g
for i in range(self.n_layers):
y = self.convs_sep[i](x * x_mask)
y = self.norms_1[i](y)
y = F.gelu(y)
y = self.convs_1x1[i](y)
y = self.norms_2[i](y)
y = F.gelu(y)
y = self.drop(y)
x = x + y
return x * x_mask
class WN(torch.nn.Module):
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
self.hidden_channels =hidden_channels
self.kernel_size = kernel_size,
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0:
cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
for i in range(n_layers):
dilation = dilation_rate ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
else:
g_l = torch.zeros_like(x_in)
acts = commons.fused_add_tanh_sigmoid_multiply(
x_in,
g_l,
n_channels_tensor)
acts = self.drop(acts)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
res_acts = res_skip_acts[:,:self.hidden_channels,:]
x = (x + res_acts) * x_mask
output = output + res_skip_acts[:,self.hidden_channels:,:]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
if self.gin_channels != 0:
torch.nn.utils.remove_weight_norm(self.cond_layer)
for l in self.in_layers:
torch.nn.utils.remove_weight_norm(l)
for l in self.res_skip_layers:
torch.nn.utils.remove_weight_norm(l)
class ResBlock1(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.convs1 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1)))
])
self.convs2.apply(init_weights)
def forward(self, x, x_mask=None):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
if x_mask is not None:
xt = xt * x_mask
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
if x_mask is not None:
xt = xt * x_mask
xt = c2(xt)
x = xt + x
if x_mask is not None:
x = x * x_mask
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class ResBlock2(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
super(ResBlock2, self).__init__()
self.convs = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1])))
])
self.convs.apply(init_weights)
def forward(self, x, x_mask=None):
for c in self.convs:
xt = F.leaky_relu(x, LRELU_SLOPE)
if x_mask is not None:
xt = xt * x_mask
xt = c(xt)
x = xt + x
if x_mask is not None:
x = x * x_mask
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Log(nn.Module):
def forward(self, x, x_mask, reverse=False, **kwargs):
if not reverse:
y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
logdet = torch.sum(-y, [1, 2])
return y, logdet
else:
x = torch.exp(x) * x_mask
return x
class Flip(nn.Module):
def forward(self, x, *args, reverse=False, **kwargs):
x = torch.flip(x, [1])
if not reverse:
logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
return x, logdet
else:
return x
class ElementwiseAffine(nn.Module):
def __init__(self, channels):
super().__init__()
self.channels = channels
self.m = nn.Parameter(torch.zeros(channels,1))
self.logs = nn.Parameter(torch.zeros(channels,1))
def forward(self, x, x_mask, reverse=False, **kwargs):
if not reverse:
y = self.m + torch.exp(self.logs) * x
y = y * x_mask
logdet = torch.sum(self.logs * x_mask, [1,2])
return y, logdet
else:
x = (x - self.m) * torch.exp(-self.logs) * x_mask
return x
class ResidualCouplingLayer(nn.Module):
def __init__(self,
channels,
hidden_channels,
kernel_size,
dilation_rate,
n_layers,
p_dropout=0,
gin_channels=0,
mean_only=False):
assert channels % 2 == 0, "channels should be divisible by 2"
super().__init__()
self.channels = channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.half_channels = channels // 2
self.mean_only = mean_only
self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
self.post.weight.data.zero_()
self.post.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels]*2, 1)
h = self.pre(x0) * x_mask
h = self.enc(h, x_mask, g=g)
stats = self.post(h) * x_mask
if not self.mean_only:
m, logs = torch.split(stats, [self.half_channels]*2, 1)
else:
m = stats
logs = torch.zeros_like(m)
if not reverse:
x1 = m + x1 * torch.exp(logs) * x_mask
x = torch.cat([x0, x1], 1)
logdet = torch.sum(logs, [1,2])
return x, logdet
else:
x1 = (x1 - m) * torch.exp(-logs) * x_mask
x = torch.cat([x0, x1], 1)
return x
class ConvFlow(nn.Module):
def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
super().__init__()
self.in_channels = in_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.num_bins = num_bins
self.tail_bound = tail_bound
self.half_channels = in_channels // 2
self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask, g=None, reverse=False):
x0, x1 = torch.split(x, [self.half_channels]*2, 1)
h = self.pre(x0)
h = self.convs(h, x_mask, g=g)
h = self.proj(h) * x_mask
b, c, t = x0.shape
h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
unnormalized_derivatives = h[..., 2 * self.num_bins:]
| x1, logabsdet = piecewise_rational_quadratic_transform(x1, | 2 | 2023-10-23 08:24:12+00:00 | 8k |
t-ega/whatsapp-cloud-sdk | whatsapp_cloud_sdk/wamanager.py | [
{
"identifier": "Message",
"path": "whatsapp_cloud_sdk/_files/message.py",
"snippet": "class Message(File):\n \"\"\"Represents an actual message instance\"\"\"\n\n # pylint: disable=too-many-instance-attributes\n __slots__ = (\n \"business_id\",\n \"display_phone_number\",\n ... | import json
import os
from typing import Callable, Any
from fastapi import FastAPI, APIRouter
from dotenv import load_dotenv
from uvicorn import Config, Server
from starlette.requests import Request
from starlette.responses import Response
from whatsapp_cloud_sdk._files.message import Message
from whatsapp_cloud_sdk._validators.server import Webhook
from whatsapp_cloud_sdk.bot import Bot | 6,852 | """ This module Represents a WhatsApp bot manager that provides an entry point
for external users to interact with the WhatsApp API.
"""
load_dotenv()
class WAManager:
# pylint: disable=line-too-long
"""
Represents a WhatsApp bot server that provides an entry point for external
users to interact with the WhatsApp API.
Args:
cloud_api_access_token (str, optional): The Cloud API access token used for authentication.
wa_phone_number_id (str, optional): The WhatsApp phone number ID.
version (str, optional): The WhatsApp API version to use.
Attributes:
verify_token (str): Verification token for webhook authentication.
__app (FastAPI): FastAPI instance for handling incoming requests.
__router (APIRouter): APIRouter for defining routes.
bot (Bot): Instance of the Bot class for WhatsApp API communication.
Methods:
- __callback_func(callback: Callable[[[Message]], None]): Set the callback
function for handling incoming
messages.
- __server(request: Request): Internal method to process incoming requests and messages.
- run_server(callback: Callable[[Request, Message], Union[Response, None],
webhook_url: str = "/webhook",port: int = 8000, verify_token: str = None): Start the FastAPI server to
handle incoming webhooks.
Usage Example:
```
from your_library import Whatsapp
# Initialize the Whatsapp manager
whatsapp = Whatsapp(cloud_api_access_token="your_access_token",
wa_phone_number_id="your_phone_number_id",
version="v17.0")
# Define a callback function to handle incoming messages
def handle_message(request, message):
# Your message handling logic here...
# Run the FastAPI server
whatsapp.run_server(callback=handle_message, webhook_url="/webhook", port=8000, verify_token="your_verify_token")
```
"""
def __init__(
self,
cloud_api_access_token: str = None,
wa_phone_number_id: str = None,
version: str = None,
):
"""
Initialize a Whatsapp instance for managing WhatsApp bot interactions.
Args:
cloud_api_access_token (str, optional): The Cloud API access token used for authentication.
wa_phone_number_id (str, optional): The WhatsApp phone number ID.
version (str, optional): The WhatsApp API version to use.
"""
self.verify_token: str = ""
self.__app = FastAPI()
self.__router = APIRouter()
self.bot = Bot(
cloud_api_access_token=cloud_api_access_token,
wa_phone_number_id=wa_phone_number_id,
version=version,
)
self.__server: Server = Server(
config=Config(host="0.0.0.0", port=8000, app=self.__app)
)
self.__callback_func = None
| """ This module Represents a WhatsApp bot manager that provides an entry point
for external users to interact with the WhatsApp API.
"""
load_dotenv()
class WAManager:
# pylint: disable=line-too-long
"""
Represents a WhatsApp bot server that provides an entry point for external
users to interact with the WhatsApp API.
Args:
cloud_api_access_token (str, optional): The Cloud API access token used for authentication.
wa_phone_number_id (str, optional): The WhatsApp phone number ID.
version (str, optional): The WhatsApp API version to use.
Attributes:
verify_token (str): Verification token for webhook authentication.
__app (FastAPI): FastAPI instance for handling incoming requests.
__router (APIRouter): APIRouter for defining routes.
bot (Bot): Instance of the Bot class for WhatsApp API communication.
Methods:
- __callback_func(callback: Callable[[[Message]], None]): Set the callback
function for handling incoming
messages.
- __server(request: Request): Internal method to process incoming requests and messages.
- run_server(callback: Callable[[Request, Message], Union[Response, None],
webhook_url: str = "/webhook",port: int = 8000, verify_token: str = None): Start the FastAPI server to
handle incoming webhooks.
Usage Example:
```
from your_library import Whatsapp
# Initialize the Whatsapp manager
whatsapp = Whatsapp(cloud_api_access_token="your_access_token",
wa_phone_number_id="your_phone_number_id",
version="v17.0")
# Define a callback function to handle incoming messages
def handle_message(request, message):
# Your message handling logic here...
# Run the FastAPI server
whatsapp.run_server(callback=handle_message, webhook_url="/webhook", port=8000, verify_token="your_verify_token")
```
"""
def __init__(
self,
cloud_api_access_token: str = None,
wa_phone_number_id: str = None,
version: str = None,
):
"""
Initialize a Whatsapp instance for managing WhatsApp bot interactions.
Args:
cloud_api_access_token (str, optional): The Cloud API access token used for authentication.
wa_phone_number_id (str, optional): The WhatsApp phone number ID.
version (str, optional): The WhatsApp API version to use.
"""
self.verify_token: str = ""
self.__app = FastAPI()
self.__router = APIRouter()
self.bot = Bot(
cloud_api_access_token=cloud_api_access_token,
wa_phone_number_id=wa_phone_number_id,
version=version,
)
self.__server: Server = Server(
config=Config(host="0.0.0.0", port=8000, app=self.__app)
)
self.__callback_func = None
| def __set_callback_func(self, callback: Callable[[[Message]], None]): | 0 | 2023-10-15 21:12:45+00:00 | 8k |
caglarkucuk/earthformer-satellite-to-radar | ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py | [
{
"identifier": "CuboidSelfAttentionPatterns",
"path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer_patterns.py",
"snippet": "def full_attention(input_shape):\ndef self_axial(input_shape):\ndef self_video_swin(input_shape, P=2, M=4):\ndef self_divided_space_time(input_shape):\ndef self_... | from typing import Sequence, Union
from functools import lru_cache
from collections import OrderedDict
from torch import nn
from einops import rearrange
from .cuboid_transformer_patterns import CuboidSelfAttentionPatterns, CuboidCrossAttentionPatterns
from .utils import (
get_activation, get_norm_layer,
_generalize_padding, _generalize_unpadding,
apply_initialization, round_to)
import warnings
import torch
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint | 4,724 | ffn_drop
ffn_activation
gated_ffn
norm_layer
use_inter_ffn
hierarchical_pos_embed
Whether to add pos embedding for each hierarchy.
max_temporal_relative
padding_type
checkpoint_level
"""
super(CuboidTransformerDecoder, self).__init__()
# initialization mode
self.attn_linear_init_mode = attn_linear_init_mode
self.ffn_linear_init_mode = ffn_linear_init_mode
self.conv_init_mode = conv_init_mode
self.up_linear_init_mode = up_linear_init_mode
self.norm_init_mode = norm_init_mode
assert len(depth) == len(mem_shapes)
self.target_temporal_length = target_temporal_length
self.num_blocks = len(mem_shapes)
self.cross_start = cross_start
self.mem_shapes = mem_shapes
self.depth = depth
self.upsample_type = upsample_type
self.hierarchical_pos_embed = hierarchical_pos_embed
self.checkpoint_level = checkpoint_level
self.use_self_global = use_self_global
self.self_update_global = self_update_global
self.use_cross_global = use_cross_global
self.use_global_vector_ffn = use_global_vector_ffn
self.use_first_self_attn = use_first_self_attn
if block_self_attn_patterns is not None:
if isinstance(block_self_attn_patterns, (tuple, list)):
assert len(block_self_attn_patterns) == self.num_blocks
else:
block_self_attn_patterns = [block_self_attn_patterns for _ in range(self.num_blocks)]
block_self_cuboid_size = []
block_self_cuboid_strategy = []
block_self_shift_size = []
for idx, key in enumerate(block_self_attn_patterns):
func = CuboidSelfAttentionPatterns.get(key)
cuboid_size, strategy, shift_size = func(mem_shapes[idx])
block_self_cuboid_size.append(cuboid_size)
block_self_cuboid_strategy.append(strategy)
block_self_shift_size.append(shift_size)
else:
if not isinstance(block_self_cuboid_size[0][0], (list, tuple)):
block_self_cuboid_size = [block_self_cuboid_size for _ in range(self.num_blocks)]
else:
assert len(block_self_cuboid_size) == self.num_blocks,\
f'Incorrect input format! Received block_self_cuboid_size={block_self_cuboid_size}'
if not isinstance(block_self_cuboid_strategy[0][0], (list, tuple)):
block_self_cuboid_strategy = [block_self_cuboid_strategy for _ in range(self.num_blocks)]
else:
assert len(block_self_cuboid_strategy) == self.num_blocks,\
f'Incorrect input format! Received block_self_cuboid_strategy={block_self_cuboid_strategy}'
if not isinstance(block_self_shift_size[0][0], (list, tuple)):
block_self_shift_size = [block_self_shift_size for _ in range(self.num_blocks)]
else:
assert len(block_self_shift_size) == self.num_blocks,\
f'Incorrect input format! Received block_self_shift_size={block_self_shift_size}'
self_blocks = []
for i in range(self.num_blocks):
if not self.use_first_self_attn and i == self.num_blocks - 1:
# For the top block, we won't use an additional self attention layer.
ele_depth = depth[i] - 1
else:
ele_depth = depth[i]
stack_cuboid_blocks =\
[StackCuboidSelfAttentionBlock(
dim=self.mem_shapes[i][-1],
num_heads=num_heads,
block_cuboid_size=block_self_cuboid_size[i],
block_strategy=block_self_cuboid_strategy[i],
block_shift_size=block_self_shift_size[i],
attn_drop=attn_drop,
proj_drop=proj_drop,
ffn_drop=ffn_drop,
activation=ffn_activation,
gated_ffn=gated_ffn,
norm_layer=norm_layer,
use_inter_ffn=use_inter_ffn,
padding_type=padding_type,
use_global_vector=use_self_global,
use_global_vector_ffn=use_global_vector_ffn,
use_global_self_attn=use_global_self_attn,
separate_global_qkv=separate_global_qkv,
global_dim_ratio=global_dim_ratio,
checkpoint_level=checkpoint_level,
use_relative_pos=use_relative_pos,
use_final_proj=self_attn_use_final_proj,
# initialization
attn_linear_init_mode=attn_linear_init_mode,
ffn_linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,
) for _ in range(ele_depth)]
self_blocks.append(nn.ModuleList(stack_cuboid_blocks))
self.self_blocks = nn.ModuleList(self_blocks)
if block_cross_attn_patterns is not None:
if isinstance(block_cross_attn_patterns, (tuple, list)):
assert len(block_cross_attn_patterns) == self.num_blocks
else:
block_cross_attn_patterns = [block_cross_attn_patterns for _ in range(self.num_blocks)]
block_cross_cuboid_hw = []
block_cross_cuboid_strategy = []
block_cross_shift_hw = []
block_cross_n_temporal = []
for idx, key in enumerate(block_cross_attn_patterns):
if key == "last_frame_dst":
cuboid_hw = None
shift_hw = None
strategy = None
n_temporal = None
else:
| """Only change done in this file is the added upsampling layer to the CuboidTransformerModel,
which increaes `h` and `w` dimensions of the input tensor by 2x to match the dimensions of the output tensor!
The rest is same with the original file from EarthFormer repo!
"""
"""A space-time Transformer with Cuboid Attention"""
class PosEmbed(nn.Module):
def __init__(self, embed_dim, maxT, maxH, maxW, typ='t+h+w'):
r"""
Parameters
----------
embed_dim
maxT
maxH
maxW
typ
The type of the positional embedding.
- t+h+w:
Embed the spatial position to embeddings
- t+hw:
Embed the spatial position to embeddings
"""
super(PosEmbed, self).__init__()
self.typ = typ
assert self.typ in ['t+h+w', 't+hw']
self.maxT = maxT
self.maxH = maxH
self.maxW = maxW
self.embed_dim = embed_dim
# spatiotemporal learned positional embedding
if self.typ == 't+h+w':
self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim)
self.H_embed = nn.Embedding(num_embeddings=maxH, embedding_dim=embed_dim)
self.W_embed = nn.Embedding(num_embeddings=maxW, embedding_dim=embed_dim)
# nn.init.trunc_normal_(self.T_embed.weight, std=0.02)
# nn.init.trunc_normal_(self.H_embed.weight, std=0.02)
# nn.init.trunc_normal_(self.W_embed.weight, std=0.02)
elif self.typ == 't+hw':
self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim)
self.HW_embed = nn.Embedding(num_embeddings=maxH * maxW, embedding_dim=embed_dim)
# nn.init.trunc_normal_(self.T_embed.weight, std=0.02)
# nn.init.trunc_normal_(self.HW_embed.weight, std=0.02)
else:
raise NotImplementedError
self.reset_parameters()
def reset_parameters(self):
for m in self.children():
apply_initialization(m, embed_mode="0")
def forward(self, x):
"""
Parameters
----------
x
Shape (B, T, H, W, C)
Returns
-------
out
Return the x + positional embeddings
"""
_, T, H, W, _ = x.shape
t_idx = torch.arange(T, device=x.device) # (T, C)
h_idx = torch.arange(H, device=x.device) # (H, C)
w_idx = torch.arange(W, device=x.device) # (W, C)
if self.typ == 't+h+w':
return x + self.T_embed(t_idx).reshape(T, 1, 1, self.embed_dim)\
+ self.H_embed(h_idx).reshape(1, H, 1, self.embed_dim)\
+ self.W_embed(w_idx).reshape(1, 1, W, self.embed_dim)
elif self.typ == 't+hw':
spatial_idx = h_idx.unsqueeze(-1) * self.maxW + w_idx
return x + self.T_embed(t_idx).reshape(T, 1, 1, self.embed_dim) + self.HW_embed(spatial_idx)
else:
raise NotImplementedError
class PositionwiseFFN(nn.Module):
"""The Position-wise FFN layer used in Transformer-like architectures
If pre_norm is True:
norm(data) -> fc1 -> act -> act_dropout -> fc2 -> dropout -> res(+data)
Else:
data -> fc1 -> act -> act_dropout -> fc2 -> dropout -> norm(res(+data))
Also, if we use gated projection. We will use
fc1_1 * act(fc1_2(data)) to map the data
"""
def __init__(self,
units: int = 512,
hidden_size: int = 2048,
activation_dropout: float = 0.0,
dropout: float = 0.1,
gated_proj: bool = False,
activation='relu',
normalization: str = 'layer_norm',
layer_norm_eps: float = 1E-5,
pre_norm: bool = False,
linear_init_mode="0",
norm_init_mode="0",
):
"""
Parameters
----------
units
hidden_size
activation_dropout
dropout
activation
normalization
layer_norm or no_norm
layer_norm_eps
pre_norm
Pre-layer normalization as proposed in the paper:
"[ACL2018] The Best of Both Worlds: Combining Recent Advances in
Neural Machine Translation"
This will stabilize the training of Transformers.
You may also refer to
"[Arxiv2020] Understanding the Difficulty of Training Transformers"
"""
super().__init__()
# initialization
self.linear_init_mode = linear_init_mode
self.norm_init_mode = norm_init_mode
self._pre_norm = pre_norm
self._gated_proj = gated_proj
self._kwargs = OrderedDict([
('units', units),
('hidden_size', hidden_size),
('activation_dropout', activation_dropout),
('activation', activation),
('dropout', dropout),
('normalization', normalization),
('layer_norm_eps', layer_norm_eps),
('gated_proj', gated_proj),
('pre_norm', pre_norm)
])
self.dropout_layer = nn.Dropout(dropout)
self.activation_dropout_layer = nn.Dropout(activation_dropout)
self.ffn_1 = nn.Linear(in_features=units, out_features=hidden_size,
bias=True)
if self._gated_proj:
self.ffn_1_gate = nn.Linear(in_features=units,
out_features=hidden_size,
bias=True)
self.activation = get_activation(activation)
self.ffn_2 = nn.Linear(in_features=hidden_size, out_features=units,
bias=True)
self.layer_norm = get_norm_layer(normalization=normalization,
in_channels=units,
epsilon=layer_norm_eps)
self.reset_parameters()
def reset_parameters(self):
apply_initialization(self.ffn_1,
linear_mode=self.linear_init_mode)
if self._gated_proj:
apply_initialization(self.ffn_1_gate,
linear_mode=self.linear_init_mode)
apply_initialization(self.ffn_2,
linear_mode=self.linear_init_mode)
apply_initialization(self.layer_norm,
norm_mode=self.norm_init_mode)
def forward(self, data):
"""
Parameters
----------
data :
Shape (B, seq_length, C_in)
Returns
-------
out :
Shape (B, seq_length, C_out)
"""
residual = data
if self._pre_norm:
data = self.layer_norm(data)
if self._gated_proj:
out = self.activation(self.ffn_1_gate(data)) * self.ffn_1(data)
else:
out = self.activation(self.ffn_1(data))
out = self.activation_dropout_layer(out)
out = self.ffn_2(out)
out = self.dropout_layer(out)
out = out + residual
if not self._pre_norm:
out = self.layer_norm(out)
return out
class PatchMerging3D(nn.Module):
""" Patch Merging Layer"""
def __init__(self,
dim,
out_dim=None,
downsample=(1, 2, 2),
norm_layer='layer_norm',
padding_type='nearest',
linear_init_mode="0",
norm_init_mode="0",
):
"""
Parameters
----------
dim
Number of input channels.
downsample
downsample factor
norm_layer
The normalization layer
"""
super().__init__()
self.linear_init_mode = linear_init_mode
self.norm_init_mode = norm_init_mode
self.dim = dim
if out_dim is None:
out_dim = max(downsample) * dim
self.out_dim = out_dim
self.downsample = downsample
self.padding_type = padding_type
self.reduction = nn.Linear(downsample[0] * downsample[1] * downsample[2] * dim,
out_dim, bias=False)
self.norm = get_norm_layer(norm_layer, in_channels=downsample[0] * downsample[1] * downsample[2] * dim)
self.reset_parameters()
def reset_parameters(self):
for m in self.children():
apply_initialization(m,
linear_mode=self.linear_init_mode,
norm_mode=self.norm_init_mode)
def get_out_shape(self, data_shape):
T, H, W, C_in = data_shape
pad_t = (self.downsample[0] - T % self.downsample[0]) % self.downsample[0]
pad_h = (self.downsample[1] - H % self.downsample[1]) % self.downsample[1]
pad_w = (self.downsample[2] - W % self.downsample[2]) % self.downsample[2]
return (T + pad_t) // self.downsample[0], (H + pad_h) // self.downsample[1], (W + pad_w) // self.downsample[2],\
self.out_dim
def forward(self, x):
"""
Parameters
----------
x
Input feature, tensor size (B, T, H, W, C).
Returns
-------
out
Shape (B, T // downsample[0], H // downsample[1], W // downsample[2], out_dim)
"""
B, T, H, W, C = x.shape
# padding
pad_t = (self.downsample[0] - T % self.downsample[0]) % self.downsample[0]
pad_h = (self.downsample[1] - H % self.downsample[1]) % self.downsample[1]
pad_w = (self.downsample[2] - W % self.downsample[2]) % self.downsample[2]
if pad_h or pad_h or pad_w:
T += pad_t
H += pad_h
W += pad_w
x = _generalize_padding(x, pad_t, pad_w, pad_h, padding_type=self.padding_type)
x = x.reshape((B,
T // self.downsample[0], self.downsample[0],
H // self.downsample[1], self.downsample[1],
W // self.downsample[2], self.downsample[2], C)) \
.permute(0, 1, 3, 5, 2, 4, 6, 7) \
.reshape(B, T // self.downsample[0], H // self.downsample[1], W // self.downsample[2],
self.downsample[0] * self.downsample[1] * self.downsample[2] * C)
x = self.norm(x)
x = self.reduction(x)
return x
class Upsample3DLayer(nn.Module):
"""Upsampling based on nn.UpSampling and Conv3x3.
If the temporal dimension remains the same:
x --> interpolation-2d (nearest) --> conv3x3(dim, out_dim)
Else:
x --> interpolation-3d (nearest) --> conv3x3x3(dim, out_dim)
"""
def __init__(self,
dim,
out_dim,
target_size,
temporal_upsample=False,
kernel_size=3,
layout='THWC',
conv_init_mode="0",
):
"""
Parameters
----------
dim
out_dim
target_size
Size of the output tensor. Will be a tuple/list that contains T_new, H_new, W_new
temporal_upsample
Whether the temporal axis will go through upsampling.
kernel_size
The kernel size of the Conv2D layer
layout
The layout of the inputs
"""
super(Upsample3DLayer, self).__init__()
self.conv_init_mode = conv_init_mode
self.target_size = target_size
self.out_dim = out_dim
self.temporal_upsample = temporal_upsample
if temporal_upsample:
self.up = nn.Upsample(size=target_size, mode='nearest') # 3D upsampling
else:
self.up = nn.Upsample(size=(target_size[1], target_size[2]), mode='nearest') # 2D upsampling
self.conv = nn.Conv2d(in_channels=dim, out_channels=out_dim, kernel_size=(kernel_size, kernel_size),
padding=(kernel_size // 2, kernel_size // 2))
assert layout in ['THWC', 'CTHW']
self.layout = layout
self.reset_parameters()
def reset_parameters(self):
for m in self.children():
apply_initialization(m,
conv_mode=self.conv_init_mode)
def forward(self, x):
"""
Parameters
----------
x
Shape (B, T, H, W, C) or (B, C, T, H, W)
Returns
-------
out
Shape (B, T, H_new, W_out, C_out) or (B, C, T, H_out, W_out)
"""
if self.layout == 'THWC':
B, T, H, W, C = x.shape
if self.temporal_upsample:
x = x.permute(0, 4, 1, 2, 3) # (B, C, T, H, W)
return self.conv(self.up(x)).permute(0, 2, 3, 4, 1)
else:
assert self.target_size[0] == T
x = x.reshape(B * T, H, W, C).permute(0, 3, 1, 2) # (B * T, C, H, W)
x = self.up(x)
return self.conv(x).permute(0, 2, 3, 1).reshape((B,) + self.target_size + (self.out_dim,))
elif self.layout == 'CTHW':
B, C, T, H, W = x.shape
if self.temporal_upsample:
return self.conv(self.up(x))
else:
assert self.output_size[0] == T
x = x.permute(0, 2, 1, 3, 4) # (B, T, C, H, W)
x = x.reshape(B * T, C, H, W)
return self.conv(self.up(x)).reshape(B, self.target_size[0], self.out_dim, self.target_size[1],
self.target_size[2]).permute(0, 2, 1, 3, 4)
def cuboid_reorder(data, cuboid_size, strategy):
"""Reorder the tensor into (B, num_cuboids, bT * bH * bW, C)
We assume that the tensor shapes are divisible to the cuboid sizes.
Parameters
----------
data
The input data
cuboid_size
The size of the cuboid
strategy
The cuboid strategy
Returns
-------
reordered_data
Shape will be (B, num_cuboids, bT * bH * bW, C)
num_cuboids = T / bT * H / bH * W / bW
"""
B, T, H, W, C = data.shape
num_cuboids = T // cuboid_size[0] * H // cuboid_size[1] * W // cuboid_size[2]
cuboid_volume = cuboid_size[0] * cuboid_size[1] * cuboid_size[2]
intermediate_shape = []
nblock_axis = []
block_axis = []
for i, (block_size, total_size, ele_strategy) in enumerate(zip(cuboid_size, (T, H, W), strategy)):
if ele_strategy == 'l':
intermediate_shape.extend([total_size // block_size, block_size])
nblock_axis.append(2 * i + 1)
block_axis.append(2 * i + 2)
elif ele_strategy == 'd':
intermediate_shape.extend([block_size, total_size // block_size])
nblock_axis.append(2 * i + 2)
block_axis.append(2 * i + 1)
else:
raise NotImplementedError
data = data.reshape((B,) + tuple(intermediate_shape) + (C, ))
reordered_data = data.permute((0,) + tuple(nblock_axis) + tuple(block_axis) + (7,))
reordered_data = reordered_data.reshape((B, num_cuboids, cuboid_volume, C))
return reordered_data
def cuboid_reorder_reverse(data, cuboid_size, strategy, orig_data_shape):
"""Reverse the reordered cuboid back to the original space
Parameters
----------
data
cuboid_size
strategy
orig_data_shape
Returns
-------
data
The recovered data
"""
B, num_cuboids, cuboid_volume, C = data.shape
T, H, W = orig_data_shape
permutation_axis = [0]
for i, (block_size, total_size, ele_strategy) in enumerate(zip(cuboid_size, (T, H, W), strategy)):
if ele_strategy == 'l':
# intermediate_shape.extend([total_size // block_size, block_size])
permutation_axis.append(i + 1)
permutation_axis.append(i + 4)
elif ele_strategy == 'd':
# intermediate_shape.extend([block_size, total_size // block_size])
permutation_axis.append(i + 4)
permutation_axis.append(i + 1)
else:
raise NotImplementedError
permutation_axis.append(7)
data = data.reshape(B, T // cuboid_size[0], H // cuboid_size[1], W // cuboid_size[2],
cuboid_size[0], cuboid_size[1], cuboid_size[2], C)
data = data.permute(permutation_axis)
data = data.reshape((B, T, H, W, C))
return data
@lru_cache()
def compute_cuboid_self_attention_mask(data_shape, cuboid_size, shift_size, strategy, padding_type, device):
"""Compute the shift window attention mask
Parameters
----------
data_shape
Should be T, H, W
cuboid_size
Size of the cuboid
shift_size
The shift size
strategy
The decomposition strategy
padding_type
Type of the padding
device
The device
Returns
-------
attn_mask
Mask with shape (num_cuboid, cuboid_vol, cuboid_vol)
The padded values will always be masked. The other masks will ensure that the shifted windows
will only attend to those in the shifted windows.
"""
T, H, W = data_shape
pad_t = (cuboid_size[0] - T % cuboid_size[0]) % cuboid_size[0]
pad_h = (cuboid_size[1] - H % cuboid_size[1]) % cuboid_size[1]
pad_w = (cuboid_size[2] - W % cuboid_size[2]) % cuboid_size[2]
data_mask = None
# Prepare data mask
if pad_t > 0 or pad_h > 0 or pad_w > 0:
if padding_type == 'ignore':
data_mask = torch.ones((1, T, H, W, 1), dtype=torch.bool, device=device)
data_mask = F.pad(data_mask, (0, 0, 0, pad_w, 0, pad_h, 0, pad_t))
else:
data_mask = torch.ones((1, T + pad_t, H + pad_h, W + pad_w, 1), dtype=torch.bool, device=device)
if any(i > 0 for i in shift_size):
if padding_type == 'ignore':
data_mask = torch.roll(data_mask,
shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), dims=(1, 2, 3))
if padding_type == 'ignore':
# (1, num_cuboids, cuboid_volume, 1)
data_mask = cuboid_reorder(data_mask, cuboid_size, strategy=strategy)
data_mask = data_mask.squeeze(-1).squeeze(0) # (num_cuboid, cuboid_volume)
# Prepare mask based on index
shift_mask = torch.zeros((1, T + pad_t, H + pad_h, W + pad_w, 1), device=device) # 1 T H W 1
cnt = 0
for t in slice(-cuboid_size[0]), slice(-cuboid_size[0], -shift_size[0]), slice(-shift_size[0], None):
for h in slice(-cuboid_size[1]), slice(-cuboid_size[1], -shift_size[1]), slice(-shift_size[1], None):
for w in slice(-cuboid_size[2]), slice(-cuboid_size[2], -shift_size[2]), slice(-shift_size[2], None):
shift_mask[:, t, h, w, :] = cnt
cnt += 1
shift_mask = cuboid_reorder(shift_mask, cuboid_size, strategy=strategy)
shift_mask = shift_mask.squeeze(-1).squeeze(0) # num_cuboids, cuboid_volume
attn_mask = (shift_mask.unsqueeze(1) - shift_mask.unsqueeze(2)) == 0 # num_cuboids, cuboid_volume, cuboid_volume
if padding_type == 'ignore':
attn_mask = data_mask.unsqueeze(1) * data_mask.unsqueeze(2) * attn_mask
return attn_mask
def masked_softmax(att_score, mask, axis: int = -1):
"""Ignore the masked elements when calculating the softmax.
The mask can be broadcastable.
Parameters
----------
att_score
Shape (..., length, ...)
mask
Shape (..., length, ...)
1 --> The element is not masked
0 --> The element is masked
axis
The axis to calculate the softmax. att_score.shape[axis] must be the same as mask.shape[axis]
Returns
-------
att_weights
Shape (..., length, ...)
"""
if mask is not None:
# Fill in the masked scores with a very small value
if att_score.dtype == torch.float16:
att_score = att_score.masked_fill(torch.logical_not(mask), -1E4)
else:
att_score = att_score.masked_fill(torch.logical_not(mask), -1E18)
att_weights = torch.softmax(att_score, dim=axis) * mask
else:
att_weights = torch.softmax(att_score, dim=axis)
return att_weights
def update_cuboid_size_shift_size(data_shape, cuboid_size, shift_size, strategy):
"""Update the
Parameters
----------
data_shape
The shape of the data
cuboid_size
Size of the cuboid
shift_size
Size of the shift
strategy
The strategy of attention
Returns
-------
new_cuboid_size
Size of the cuboid
new_shift_size
Size of the shift
"""
new_cuboid_size = list(cuboid_size)
new_shift_size = list(shift_size)
for i in range(len(data_shape)):
if strategy[i] == 'd':
new_shift_size[i] = 0
if data_shape[i] <= cuboid_size[i]:
new_cuboid_size[i] = data_shape[i]
new_shift_size[i] = 0
return tuple(new_cuboid_size), tuple(new_shift_size)
class CuboidSelfAttentionLayer(nn.Module):
"""Implements the cuboid self attention.
The idea of Cuboid Self Attention is to divide the input tensor (T, H, W) into several non-overlapping cuboids.
We apply self-attention inside each cuboid and all cuboid-level self attentions are executed in parallel.
We adopt two mechanisms for decomposing the input tensor into cuboids:
1) local:
We group the tensors within a local window, e.g., X[t:(t+b_t), h:(h+b_h), w:(w+b_w)]. We can also apply the
shifted window strategy proposed in "[ICCV2021] Swin Transformer: Hierarchical Vision Transformer using Shifted Windows".
2) dilated:
Inspired by the success of dilated convolution "[ICLR2016] Multi-Scale Context Aggregation by Dilated Convolutions",
we split the tensor with dilation factors that are tied to the size of the cuboid. For example, for a cuboid that has width `b_w`,
we sample the elements starting from 0 as 0, w / b_w, 2 * w / b_w, ..., (b_w - 1) * w / b_w.
The cuboid attention can be viewed as a generalization of the attention mechanism proposed in Video Swin Transformer, https://arxiv.org/abs/2106.13230.
The computational complexity of CuboidAttention can be simply calculated as O(T H W * b_t b_h b_w). To cover multiple correlation patterns,
we are able to combine multiple CuboidAttention layers with different configurations such as cuboid size, shift size, and local / global decomposing strategy.
In addition, it is straight-forward to extend the cuboid attention to other types of spatiotemporal data that are not described
as regular tensors. We need to define alternative approaches to partition the data into "cuboids".
In addition, inspired by "[NeurIPS2021] Do Transformers Really Perform Badly for Graph Representation?",
"[NeurIPS2020] Big Bird: Transformers for Longer Sequences", "[EMNLP2021] Longformer: The Long-Document Transformer", we keep
$K$ global vectors to record the global status of the spatiotemporal system. These global vectors will attend to the whole tensor and
the vectors inside each individual cuboids will also attend to the global vectors so that they can peep into the global status of the system.
"""
def __init__(self,
dim,
num_heads,
cuboid_size=(2, 7, 7),
shift_size=(0, 0, 0),
strategy=('l', 'l', 'l'),
padding_type='ignore',
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
use_final_proj=True,
norm_layer='layer_norm',
use_global_vector=False,
use_global_self_attn=False,
separate_global_qkv=False,
global_dim_ratio=1,
checkpoint_level=True,
use_relative_pos=True,
attn_linear_init_mode="0",
ffn_linear_init_mode="0",
norm_init_mode="0",
):
"""
Parameters
----------
dim
The dimension of the input tensor
num_heads
The number of heads
cuboid_size
The size of each cuboid
shift_size
The size for shifting the windows.
strategy
The decomposition strategy of the tensor. 'l' stands for local and 'd' stands for dilated.
padding_type
The type of padding.
qkv_bias
Whether to enable bias in calculating qkv attention
qk_scale
Whether to enable scale factor when calculating the attention.
attn_drop
The attention dropout
proj_drop
The projection dropout
use_final_proj
Whether to use the final projection or not
norm_layer
The normalization layer
use_global_vector
Whether to use the global vector or not.
use_global_self_attn
Whether to do self attention among global vectors
separate_global_qkv
Whether to different network to calc q_global, k_global, v_global
global_dim_ratio
The dim (channels) of global vectors is `global_dim_ratio*dim`.
checkpoint_level
Whether to enable gradient checkpointing.
"""
super(CuboidSelfAttentionLayer, self).__init__()
# initialization
self.attn_linear_init_mode = attn_linear_init_mode
self.ffn_linear_init_mode = ffn_linear_init_mode
self.norm_init_mode = norm_init_mode
assert dim % num_heads == 0
self.num_heads = num_heads
self.dim = dim
self.cuboid_size = cuboid_size
self.shift_size = shift_size
self.strategy = strategy
self.padding_type = padding_type
self.use_final_proj = use_final_proj
self.use_relative_pos = use_relative_pos
# global vectors
self.use_global_vector = use_global_vector
self.use_global_self_attn = use_global_self_attn
self.separate_global_qkv = separate_global_qkv
if global_dim_ratio != 1:
assert separate_global_qkv == True, \
f"Setting global_dim_ratio != 1 requires separate_global_qkv == True."
self.global_dim_ratio = global_dim_ratio
assert self.padding_type in ['ignore', 'zeros', 'nearest']
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
if use_relative_pos:
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * cuboid_size[0] - 1) * (2 * cuboid_size[1] - 1) * (2 * cuboid_size[2] - 1), num_heads))
nn.init.trunc_normal_(self.relative_position_bias_table, std=.02)
coords_t = torch.arange(self.cuboid_size[0])
coords_h = torch.arange(self.cuboid_size[1])
coords_w = torch.arange(self.cuboid_size[2])
coords = torch.stack(torch.meshgrid(coords_t, coords_h, coords_w)) # 3, Bt, Bh, Bw
coords_flatten = torch.flatten(coords, 1) # 3, Bt*Bh*Bw
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 3, Bt*Bh*Bw, Bt*Bh*Bw
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Bt*Bh*Bw, Bt*Bh*Bw, 3
relative_coords[:, :, 0] += self.cuboid_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.cuboid_size[1] - 1
relative_coords[:, :, 2] += self.cuboid_size[2] - 1
relative_coords[:, :, 0] *= (2 * self.cuboid_size[1] - 1) * (2 * self.cuboid_size[2] - 1)
relative_coords[:, :, 1] *= (2 * self.cuboid_size[2] - 1)
relative_position_index = relative_coords.sum(-1) # shape is (cuboid_volume, cuboid_volume)
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
if self.use_global_vector:
if self.separate_global_qkv:
self.l2g_q_net = nn.Linear(dim, dim, bias=qkv_bias)
self.l2g_global_kv_net = nn.Linear(
in_features=global_dim_ratio * dim,
out_features=dim * 2,
bias=qkv_bias)
self.g2l_global_q_net = nn.Linear(
in_features=global_dim_ratio * dim,
out_features=dim,
bias=qkv_bias)
self.g2l_k_net = nn.Linear(
in_features=dim,
out_features=dim,
bias=qkv_bias)
self.g2l_v_net = nn.Linear(
in_features=dim,
out_features=global_dim_ratio * dim,
bias=qkv_bias)
if self.use_global_self_attn:
self.g2g_global_qkv_net = nn.Linear(
in_features=global_dim_ratio * dim,
out_features=global_dim_ratio * dim * 3,
bias=qkv_bias)
else:
self.global_qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.global_attn_drop = nn.Dropout(attn_drop)
if use_final_proj:
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
if self.use_global_vector:
self.global_proj = nn.Linear(
in_features=global_dim_ratio * dim,
out_features=global_dim_ratio * dim)
self.norm = get_norm_layer(norm_layer, in_channels=dim)
if self.use_global_vector:
self.global_vec_norm = get_norm_layer(norm_layer,
in_channels=global_dim_ratio*dim)
self.checkpoint_level = checkpoint_level
self.reset_parameters()
def reset_parameters(self):
apply_initialization(self.qkv,
linear_mode=self.attn_linear_init_mode)
if self.use_final_proj:
apply_initialization(self.proj,
linear_mode=self.ffn_linear_init_mode)
apply_initialization(self.norm,
norm_mode=self.norm_init_mode)
if self.use_global_vector:
if self.separate_global_qkv:
apply_initialization(self.l2g_q_net,
linear_mode=self.attn_linear_init_mode)
apply_initialization(self.l2g_global_kv_net,
linear_mode=self.attn_linear_init_mode)
apply_initialization(self.g2l_global_q_net,
linear_mode=self.attn_linear_init_mode)
apply_initialization(self.g2l_k_net,
linear_mode=self.attn_linear_init_mode)
apply_initialization(self.g2l_v_net,
linear_mode=self.attn_linear_init_mode)
if self.use_global_self_attn:
apply_initialization(self.g2g_global_qkv_net,
linear_mode=self.attn_linear_init_mode)
else:
apply_initialization(self.global_qkv,
linear_mode=self.attn_linear_init_mode)
apply_initialization(self.global_vec_norm,
norm_mode=self.norm_init_mode)
def forward(self, x, global_vectors=None):
x = self.norm(x)
B, T, H, W, C_in = x.shape
assert C_in == self.dim
if self.use_global_vector:
_, num_global, _ = global_vectors.shape
global_vectors = self.global_vec_norm(global_vectors)
cuboid_size, shift_size = update_cuboid_size_shift_size((T, H, W), self.cuboid_size,
self.shift_size, self.strategy)
# Step-1: Pad the input
pad_t = (cuboid_size[0] - T % cuboid_size[0]) % cuboid_size[0]
pad_h = (cuboid_size[1] - H % cuboid_size[1]) % cuboid_size[1]
pad_w = (cuboid_size[2] - W % cuboid_size[2]) % cuboid_size[2]
# We use generalized padding
x = _generalize_padding(x, pad_t, pad_h, pad_w, self.padding_type)
# Step-2: Shift the tensor based on shift window attention.
if any(i > 0 for i in shift_size):
shifted_x = torch.roll(x, shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), dims=(1, 2, 3))
else:
shifted_x = x
# Step-3: Reorder the tensor
# (B, num_cuboids, cuboid_volume, C)
reordered_x = cuboid_reorder(shifted_x, cuboid_size=cuboid_size, strategy=self.strategy)
_, num_cuboids, cuboid_volume, _ = reordered_x.shape
# Step-4: Perform self-attention
# (num_cuboids, cuboid_volume, cuboid_volume)
attn_mask = compute_cuboid_self_attention_mask((T, H, W), cuboid_size,
shift_size=shift_size,
strategy=self.strategy,
padding_type=self.padding_type,
device=x.device)
head_C = C_in // self.num_heads
qkv = self.qkv(reordered_x).reshape(B, num_cuboids, cuboid_volume, 3, self.num_heads, head_C)\
.permute(3, 0, 4, 1, 2, 5) # (3, B, num_heads, num_cuboids, cuboid_volume, head_C)
q, k, v = qkv[0], qkv[1], qkv[2] # Each has shape (B, num_heads, num_cuboids, cuboid_volume, head_C)
q = q * self.scale
attn_score = q @ k.transpose(-2, -1) # Shape (B, num_heads, num_cuboids, cuboid_volume, cuboid_volume)
if self.use_relative_pos:
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index[:cuboid_volume, :cuboid_volume].reshape(-1)]\
.reshape(cuboid_volume, cuboid_volume, -1) # (cuboid_volume, cuboid_volume, num_head)
relative_position_bias = relative_position_bias.permute(2, 0, 1)\
.contiguous().unsqueeze(1) # num_heads, 1, cuboid_volume, cuboid_volume
attn_score = attn_score + relative_position_bias # Shape (B, num_heads, num_cuboids, cuboid_volume, cuboid_volume)
# Calculate the local to global attention
if self.use_global_vector:
global_head_C = self.global_dim_ratio * head_C # take effect only separate_global_qkv = True
if self.separate_global_qkv:
l2g_q = self.l2g_q_net(reordered_x)\
.reshape(B, num_cuboids, cuboid_volume, self.num_heads, head_C)\
.permute(0, 3, 1, 2, 4) # (B, num_heads, num_cuboids, cuboid_volume, head_C)
l2g_q = l2g_q * self.scale
l2g_global_kv = self.l2g_global_kv_net(global_vectors)\
.reshape(B, 1, num_global, 2, self.num_heads, head_C)\
.permute(3, 0, 4, 1, 2, 5) # Shape (2, B, num_heads, 1, N, head_C)
l2g_global_k, l2g_global_v = l2g_global_kv[0], l2g_global_kv[1]
g2l_global_q = self.g2l_global_q_net(global_vectors)\
.reshape(B, num_global, self.num_heads, head_C)\
.permute(0, 2, 1, 3) # Shape (B, num_heads, N, head_C)
g2l_global_q = g2l_global_q * self.scale
# g2l_kv = self.g2l_kv_net(reordered_x)\
# .reshape(B, num_cuboids, cuboid_volume, 2, self.num_heads, global_head_C)\
# .permute(3, 0, 4, 1, 2, 5) # (2, B, num_heads, num_cuboids, cuboid_volume, head_C)
# g2l_k, g2l_v = g2l_kv[0], g2l_kv[1]
g2l_k = self.g2l_k_net(reordered_x)\
.reshape(B, num_cuboids, cuboid_volume, self.num_heads, head_C)\
.permute(0, 3, 1, 2, 4) # (B, num_heads, num_cuboids, cuboid_volume, head_C)
g2l_v = self.g2l_v_net(reordered_x) \
.reshape(B, num_cuboids, cuboid_volume, self.num_heads, global_head_C) \
.permute(0, 3, 1, 2, 4) # (B, num_heads, num_cuboids, cuboid_volume, global_head_C)
if self.use_global_self_attn:
g2g_global_qkv = self.g2g_global_qkv_net(global_vectors)\
.reshape(B, 1, num_global, 3, self.num_heads, global_head_C)\
.permute(3, 0, 4, 1, 2, 5) # Shape (2, B, num_heads, 1, N, head_C)
g2g_global_q, g2g_global_k, g2g_global_v = g2g_global_qkv[0], g2g_global_qkv[1], g2g_global_qkv[2]
g2g_global_q = g2g_global_q.squeeze(2) * self.scale
else:
q_global, k_global, v_global = self.global_qkv(global_vectors)\
.reshape(B, 1, num_global, 3, self.num_heads, head_C)\
.permute(3, 0, 4, 1, 2, 5) # Shape (3, B, num_heads, 1, N, head_C)
q_global = q_global.squeeze(2) * self.scale
l2g_q, g2l_k, g2l_v = q, k, v
g2l_global_q, l2g_global_k, l2g_global_v = q_global, k_global, v_global
if self.use_global_self_attn:
g2g_global_q, g2g_global_k, g2g_global_v = q_global, k_global, v_global
l2g_attn_score = l2g_q @ l2g_global_k.transpose(-2, -1) # Shape (B, num_heads, num_cuboids, cuboid_volume, N)
attn_score_l2l_l2g = torch.cat((attn_score, l2g_attn_score),
dim=-1) # Shape (B, num_heads, num_cuboids, cuboid_volume, cuboid_volume + N)
attn_mask_l2l_l2g = F.pad(attn_mask, (0, num_global), "constant", 1)
v_l_g = torch.cat((v, l2g_global_v.expand(B, self.num_heads, num_cuboids, num_global, head_C)),
dim=3)
# local to local and global attention
attn_score_l2l_l2g = masked_softmax(attn_score_l2l_l2g, mask=attn_mask_l2l_l2g)
attn_score_l2l_l2g = self.attn_drop(attn_score_l2l_l2g) # Shape (B, num_heads, num_cuboids, x_cuboid_volume, mem_cuboid_volume + K))
reordered_x = (attn_score_l2l_l2g @ v_l_g).permute(0, 2, 3, 1, 4) \
.reshape(B, num_cuboids, cuboid_volume, self.dim)
# update global vectors
if self.padding_type == 'ignore':
g2l_attn_mask = torch.ones((1, T, H, W, 1), device=x.device)
if pad_t > 0 or pad_h > 0 or pad_w > 0:
g2l_attn_mask = F.pad(g2l_attn_mask, (0, 0, 0, pad_w, 0, pad_h, 0, pad_t))
if any(i > 0 for i in shift_size):
g2l_attn_mask = torch.roll(g2l_attn_mask, shifts=(-shift_size[0], -shift_size[1], -shift_size[2]),
dims=(1, 2, 3))
g2l_attn_mask = g2l_attn_mask.reshape((-1,))
else:
g2l_attn_mask = None
g2l_attn_score = g2l_global_q @ g2l_k.reshape(B, self.num_heads, num_cuboids * cuboid_volume, head_C).transpose(-2, -1) # Shape (B, num_heads, N, num_cuboids * cuboid_volume)
if self.use_global_self_attn:
g2g_attn_score = g2g_global_q @ g2g_global_k.squeeze(2).transpose(-2, -1)
g2all_attn_score = torch.cat((g2l_attn_score, g2g_attn_score),
dim=-1) # Shape (B, num_heads, N, num_cuboids * cuboid_volume + N)
if g2l_attn_mask is not None:
g2all_attn_mask = F.pad(g2l_attn_mask, (0, num_global), "constant", 1)
else:
g2all_attn_mask = None
new_v = torch.cat((g2l_v.reshape(B, self.num_heads, num_cuboids * cuboid_volume, global_head_C),
g2g_global_v.reshape(B, self.num_heads, num_global, global_head_C)),
dim=2)
else:
g2all_attn_score = g2l_attn_score
g2all_attn_mask = g2l_attn_mask
new_v = g2l_v.reshape(B, self.num_heads, num_cuboids * cuboid_volume, global_head_C)
g2all_attn_score = masked_softmax(g2all_attn_score, mask=g2all_attn_mask)
g2all_attn_score = self.global_attn_drop(g2all_attn_score)
new_global_vector = (g2all_attn_score @ new_v).permute(0, 2, 1, 3).\
reshape(B, num_global, self.global_dim_ratio*self.dim)
else:
attn_score = masked_softmax(attn_score, mask=attn_mask)
attn_score = self.attn_drop(attn_score) # Shape (B, num_heads, num_cuboids, cuboid_volume, cuboid_volume (+ K))
reordered_x = (attn_score @ v).permute(0, 2, 3, 1, 4).reshape(B, num_cuboids, cuboid_volume, self.dim)
if self.use_final_proj:
reordered_x = self.proj_drop(self.proj(reordered_x))
if self.use_global_vector:
new_global_vector = self.proj_drop(self.global_proj(new_global_vector))
# Step-5: Shift back and slice
shifted_x = cuboid_reorder_reverse(reordered_x, cuboid_size=cuboid_size, strategy=self.strategy,
orig_data_shape=(T + pad_t, H + pad_h, W + pad_w))
if any(i > 0 for i in shift_size):
x = torch.roll(shifted_x, shifts=(shift_size[0], shift_size[1], shift_size[2]), dims=(1, 2, 3))
else:
x = shifted_x
x = _generalize_unpadding(x, pad_t=pad_t, pad_h=pad_h, pad_w=pad_w, padding_type=self.padding_type)
if self.use_global_vector:
return x, new_global_vector
else:
return x
class StackCuboidSelfAttentionBlock(nn.Module):
"""
- "use_inter_ffn" is True
x --> attn1 -----+-------> ffn1 ---+---> attn2 --> ... --> ffn_k --> out
| ^ | ^
| | | |
|-------------| |-------------|
- "use_inter_ffn" is False
x --> attn1 -----+------> attn2 --> ... attnk --+----> ffnk ---+---> out
| ^ | ^ ^ | ^
| | | | | | |
|-------------| |------------| ----------| |-----------|
If we have enabled global memory vectors, each attention will be a
"""
def __init__(self,
dim,
num_heads,
block_cuboid_size=[(4, 4, 4), (4, 4, 4)],
block_shift_size=[(0, 0, 0), (2, 2, 2)],
block_strategy=[('d', 'd', 'd'),
('l', 'l', 'l')],
padding_type='ignore',
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
ffn_drop=0.0,
activation='leaky',
gated_ffn=False,
norm_layer='layer_norm',
use_inter_ffn=False,
use_global_vector=False,
use_global_vector_ffn=True,
use_global_self_attn=False,
separate_global_qkv=False,
global_dim_ratio=1,
checkpoint_level=True,
use_relative_pos=True,
use_final_proj=True,
# initialization
attn_linear_init_mode="0",
ffn_linear_init_mode="0",
norm_init_mode="0",
):
super(StackCuboidSelfAttentionBlock, self).__init__()
# initialization
self.attn_linear_init_mode = attn_linear_init_mode
self.ffn_linear_init_mode = ffn_linear_init_mode
self.norm_init_mode = norm_init_mode
assert len(block_cuboid_size[0]) > 0 and len(block_shift_size) > 0 and len(block_strategy) > 0,\
f'Format of the block cuboid size is not correct.' \
f' block_cuboid_size={block_cuboid_size}'
assert len(block_cuboid_size) == len(block_shift_size) == len(block_strategy)
self.num_attn = len(block_cuboid_size)
self.checkpoint_level = checkpoint_level
self.use_inter_ffn = use_inter_ffn
# global vectors
self.use_global_vector = use_global_vector
self.use_global_vector_ffn = use_global_vector_ffn
self.use_global_self_attn = use_global_self_attn
self.global_dim_ratio = global_dim_ratio
if self.use_inter_ffn:
self.ffn_l = nn.ModuleList(
[PositionwiseFFN(
units=dim,
hidden_size=4 * dim,
activation_dropout=ffn_drop,
dropout=ffn_drop,
gated_proj=gated_ffn,
activation=activation,
normalization=norm_layer,
pre_norm=True,
linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,)
for _ in range(self.num_attn)])
if self.use_global_vector_ffn and self.use_global_vector:
self.global_ffn_l = nn.ModuleList(
[PositionwiseFFN(
units=global_dim_ratio * dim,
hidden_size=global_dim_ratio * 4 * dim,
activation_dropout=ffn_drop,
dropout=ffn_drop,
gated_proj=gated_ffn,
activation=activation,
normalization=norm_layer,
pre_norm=True,
linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,)
for _ in range(self.num_attn)])
else:
self.ffn_l = nn.ModuleList(
[PositionwiseFFN(
units=dim, hidden_size=4 * dim,
activation_dropout=ffn_drop,
dropout=ffn_drop,
gated_proj=gated_ffn, activation=activation,
normalization=norm_layer,
pre_norm=True,
linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,)])
if self.use_global_vector_ffn and self.use_global_vector:
self.global_ffn_l = nn.ModuleList(
[PositionwiseFFN(
units=global_dim_ratio * dim,
hidden_size=global_dim_ratio * 4 * dim,
activation_dropout=ffn_drop,
dropout=ffn_drop,
gated_proj=gated_ffn, activation=activation,
normalization=norm_layer,
pre_norm=True,
linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,)])
self.attn_l = nn.ModuleList(
[CuboidSelfAttentionLayer(
dim=dim, num_heads=num_heads,
cuboid_size=ele_cuboid_size,
shift_size=ele_shift_size,
strategy=ele_strategy,
padding_type=padding_type,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=proj_drop,
norm_layer=norm_layer,
use_global_vector=use_global_vector,
use_global_self_attn=use_global_self_attn,
separate_global_qkv=separate_global_qkv,
global_dim_ratio=global_dim_ratio,
checkpoint_level=checkpoint_level,
use_relative_pos=use_relative_pos,
use_final_proj=use_final_proj,
attn_linear_init_mode=attn_linear_init_mode,
ffn_linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,)
for ele_cuboid_size, ele_shift_size, ele_strategy
in zip(block_cuboid_size, block_shift_size, block_strategy)])
def reset_parameters(self):
for m in self.ffn_l:
m.reset_parameters()
if self.use_global_vector_ffn and self.use_global_vector:
for m in self.global_ffn_l:
m.reset_parameters()
for m in self.attn_l:
m.reset_parameters()
def forward(self, x, global_vectors=None):
if self.use_inter_ffn:
if self.use_global_vector:
for idx, (attn, ffn) in enumerate(zip(self.attn_l, self.ffn_l)):
if self.checkpoint_level >= 2 and self.training:
x_out, global_vectors_out = checkpoint.checkpoint(attn, x, global_vectors)
else:
x_out, global_vectors_out = attn(x, global_vectors)
x = x + x_out
global_vectors = global_vectors + global_vectors_out
if self.checkpoint_level >= 1 and self.training:
x = checkpoint.checkpoint(ffn, x)
if self.use_global_vector_ffn:
global_vectors = checkpoint.checkpoint(self.global_ffn_l[idx], global_vectors)
else:
x = ffn(x)
if self.use_global_vector_ffn:
global_vectors = self.global_ffn_l[idx](global_vectors)
return x, global_vectors
else:
for idx, (attn, ffn) in enumerate(zip(self.attn_l, self.ffn_l)):
if self.checkpoint_level >= 2 and self.training:
x = x + checkpoint.checkpoint(attn, x)
else:
x = x + attn(x)
if self.checkpoint_level >= 1 and self.training:
x = checkpoint.checkpoint(ffn, x)
else:
x = ffn(x)
return x
else:
if self.use_global_vector:
for idx, attn in enumerate(self.attn_l):
if self.checkpoint_level >= 2 and self.training:
x_out, global_vectors_out = checkpoint.checkpoint(attn, x, global_vectors)
else:
x_out, global_vectors_out = attn(x, global_vectors)
x = x + x_out
global_vectors = global_vectors + global_vectors_out
if self.checkpoint_level >= 1 and self.training:
x = checkpoint.checkpoint(self.ffn_l[0], x)
if self.use_global_vector_ffn:
global_vectors = checkpoint.checkpoint(self.global_ffn_l[0], global_vectors)
else:
x = self.ffn_l[0](x)
if self.use_global_vector_ffn:
global_vectors = self.global_ffn_l[0](global_vectors)
return x, global_vectors
else:
for idx, attn in enumerate(self.attn_l):
if self.checkpoint_level >= 2 and self.training:
out = checkpoint.checkpoint(attn, x)
else:
out = attn(x)
x = x + out
if self.checkpoint_level >= 1 and self.training:
x = checkpoint.checkpoint(self.ffn_l[0], x)
else:
x = self.ffn_l[0](x)
return x
@lru_cache()
def compute_cuboid_cross_attention_mask(T_x, T_mem, H, W, n_temporal, cuboid_hw, shift_hw, strategy,
padding_type, device):
"""
Parameters
----------
T_x
T_mem
H
W
n_temporal
cuboid_hw
shift_hw
strategy
padding_type
device
Returns
-------
attn_mask
Mask with shape (num_cuboid, x_cuboid_vol, mem_cuboid_vol)
The padded values will always be masked. The other masks will ensure that the shifted windows
will only attend to those in the shifted windows.
"""
pad_t_mem = (n_temporal - T_mem % n_temporal) % n_temporal
pad_t_x = (n_temporal - T_x % n_temporal) % n_temporal
pad_h = (cuboid_hw[0] - H % cuboid_hw[0]) % cuboid_hw[0]
pad_w = (cuboid_hw[1] - W % cuboid_hw[1]) % cuboid_hw[1]
mem_cuboid_size = ((T_mem + pad_t_mem) // n_temporal,) + cuboid_hw
x_cuboid_size = ((T_x + pad_t_x) // n_temporal,) + cuboid_hw
if pad_t_mem > 0 or pad_h > 0 or pad_w > 0:
if padding_type == 'ignore':
mem_mask = torch.ones((1, T_mem, H, W, 1), dtype=torch.bool, device=device)
mem_mask = F.pad(mem_mask, (0, 0, 0, pad_w, 0, pad_h, pad_t_mem, 0))
else:
mem_mask = torch.ones((1, T_mem + pad_t_mem, H + pad_h, W + pad_w, 1), dtype=torch.bool, device=device)
if pad_t_x > 0 or pad_h > 0 or pad_w > 0:
if padding_type == 'ignore':
x_mask = torch.ones((1, T_x, H, W, 1), dtype=torch.bool, device=device)
x_mask = F.pad(x_mask, (0, 0, 0, pad_w, 0, pad_h, 0, pad_t_x))
else:
x_mask = torch.ones((1, T_x + pad_t_x, H + pad_h, W + pad_w, 1), dtype=torch.bool, device=device)
if any(i > 0 for i in shift_hw):
if padding_type == 'ignore':
x_mask = torch.roll(x_mask, shifts=(-shift_hw[0], -shift_hw[1]), dims=(2, 3))
mem_mask = torch.roll(mem_mask, shifts=(-shift_hw[0], -shift_hw[1]), dims=(2, 3))
# (1, num_cuboids, cuboid_volume, 1)
x_mask = cuboid_reorder(x_mask, x_cuboid_size, strategy=strategy)
x_mask = x_mask.squeeze(-1).squeeze(0) # (num_cuboid, x_cuboid_volume)
num_cuboids, x_cuboid_volume = x_mask.shape
mem_mask = cuboid_reorder(mem_mask, mem_cuboid_size, strategy=strategy)
mem_mask = mem_mask.squeeze(-1).squeeze(0) # (num_cuboid, mem_cuboid_volume)
_, mem_cuboid_volume = mem_mask.shape
# Prepare mask based on index
shift_mask = torch.zeros((1, n_temporal, H + pad_h, W + pad_w, 1), device=device) # 1 1 H W 1
cnt = 0
for h in slice(-cuboid_hw[0]), slice(-cuboid_hw[0], -shift_hw[0]), slice(-shift_hw[0], None):
for w in slice(-cuboid_hw[1]), slice(-cuboid_hw[1], -shift_hw[1]), slice(-shift_hw[1], None):
shift_mask[:, :, h, w, :] = cnt
cnt += 1
shift_mask = cuboid_reorder(shift_mask, (1,) + cuboid_hw, strategy=strategy)
shift_mask = shift_mask.squeeze(-1).squeeze(0) # num_cuboids, bH * bW
shift_mask = (shift_mask.unsqueeze(1) - shift_mask.unsqueeze(2)) == 0 # num_cuboids, bH * bW, bH * bW
bh_bw = cuboid_hw[0] * cuboid_hw[1]
attn_mask = shift_mask.reshape((num_cuboids, 1, bh_bw, 1, bh_bw)) * x_mask.reshape((num_cuboids, -1, bh_bw, 1, 1))\
* mem_mask.reshape(num_cuboids, 1, 1, -1, bh_bw)
attn_mask = attn_mask.reshape(num_cuboids, x_cuboid_volume, mem_cuboid_volume)
return attn_mask
class CuboidCrossAttentionLayer(nn.Module):
"""Implements the cuboid cross attention.
The idea of Cuboid Cross Attention is to extend the idea of cuboid self attention to work for the
encoder-decoder-type cross attention.
Assume that there is a memory tensor with shape (T1, H, W, C) and another query tensor with shape (T2, H, W, C),
Here, we decompose the query tensor and the memory tensor into the same number of cuboids and attend the cuboid in
the query tensor with the corresponding cuboid in the memory tensor.
For the height and width axes, we reuse the grid decomposition techniques described in the cuboid self-attention.
For the temporal axis, the layer supports the "n_temporal" parameter, that controls the number of cuboids we can
get after cutting the tensors. For example, if the temporal dilation is 2, both the query and
memory will be decomposed into 2 cuboids along the temporal axis. Like in the Cuboid Self-attention,
we support "local" and "dilated" decomposition strategy.
The complexity of the layer is O((T2 / n_t * Bh * Bw) * (T1 / n_t * Bh * Bw) * n_t (H / Bh) (W / Bw)) = O(T2 * T1 / n_t H W Bh Bw)
"""
def __init__(self,
dim,
num_heads,
n_temporal=1,
cuboid_hw=(7, 7),
shift_hw=(0, 0),
strategy=('d', 'l', 'l'),
padding_type='ignore',
cross_last_n_frames=None,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
max_temporal_relative=50,
norm_layer='layer_norm',
use_global_vector=True,
separate_global_qkv=False,
global_dim_ratio=1,
checkpoint_level=1,
use_relative_pos=True,
attn_linear_init_mode="0",
ffn_linear_init_mode="0",
norm_init_mode="0",
):
"""
Parameters
----------
dim
num_heads
n_temporal
cuboid_hw
shift_hw
The shift window size as in shifted window attention
strategy
The decomposition strategy for the temporal axis, H axis and W axis
max_temporal_relative
The maximum temporal relative encoding difference
cross_last_n_frames
If provided, only cross attends to the last n frames of `mem`
use_global_vector
Whether the memory is coupled with global vectors
checkpoint_level
Level of checkpointing:
0 --> no_checkpointing
1 --> only checkpoint the FFN
2 --> checkpoint both FFN and attention
"""
super(CuboidCrossAttentionLayer, self).__init__()
# initialization
self.attn_linear_init_mode = attn_linear_init_mode
self.ffn_linear_init_mode = ffn_linear_init_mode
self.norm_init_mode = norm_init_mode
self.dim = dim
self.num_heads = num_heads
self.n_temporal = n_temporal
assert n_temporal > 0
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
shift_hw = list(shift_hw)
if strategy[1] == 'd':
shift_hw[0] = 0
if strategy[2] == 'd':
shift_hw[1] = 0
self.cuboid_hw = cuboid_hw
self.shift_hw = tuple(shift_hw)
self.strategy = strategy
self.padding_type = padding_type
self.max_temporal_relative = max_temporal_relative
self.cross_last_n_frames = cross_last_n_frames
self.use_relative_pos = use_relative_pos
# global vectors
self.use_global_vector = use_global_vector
self.separate_global_qkv = separate_global_qkv
if global_dim_ratio != 1:
assert separate_global_qkv == True, \
f"Setting global_dim_ratio != 1 requires separate_global_qkv == True."
self.global_dim_ratio = global_dim_ratio
assert self.padding_type in ['ignore', 'zeros', 'nearest']
if use_relative_pos:
# Create relative positional embedding bias table
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * max_temporal_relative - 1) * (2 * cuboid_hw[0] - 1) * (2 * cuboid_hw[1] - 1), num_heads))
nn.init.trunc_normal_(self.relative_position_bias_table, std=.02)
coords_t = torch.arange(max_temporal_relative)
coords_h = torch.arange(self.cuboid_hw[0])
coords_w = torch.arange(self.cuboid_hw[1])
coords = torch.stack(torch.meshgrid(coords_t, coords_h, coords_w)) # 3, maxT, Bh, Bw
coords_flatten = torch.flatten(coords, 1) # 3, maxT*Bh*Bw
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 3, maxT*Bh*Bw, maxT*Bh*Bw
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # maxT*Bh*Bw, maxT*Bh*Bw, 3
relative_coords[:, :, 0] += max_temporal_relative - 1 # shift to start from 0
relative_coords[:, :, 1] += self.cuboid_hw[0] - 1
relative_coords[:, :, 2] += self.cuboid_hw[1] - 1
# shape is (cuboid_volume, cuboid_volume)
relative_position_index = relative_coords[:, :, 0] * (2 * self.cuboid_hw[0] - 1) * (2 * self.cuboid_hw[1] - 1)\
+ relative_coords[:, :, 1] * (2 * self.cuboid_hw[1] - 1) + relative_coords[:, :, 2]
self.register_buffer("relative_position_index", relative_position_index)
self.q_proj = nn.Linear(dim, dim, bias=qkv_bias)
self.kv_proj = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
if self.use_global_vector:
if self.separate_global_qkv:
self.l2g_q_net = nn.Linear(dim, dim, bias=qkv_bias)
self.l2g_global_kv_net = nn.Linear(
in_features=global_dim_ratio * dim,
out_features=dim * 2,
bias=qkv_bias)
self.norm = get_norm_layer(norm_layer, in_channels=dim)
self._checkpoint_level = checkpoint_level
self.reset_parameters()
def reset_parameters(self):
apply_initialization(self.q_proj,
linear_mode=self.attn_linear_init_mode)
apply_initialization(self.kv_proj,
linear_mode=self.attn_linear_init_mode)
apply_initialization(self.proj,
linear_mode=self.ffn_linear_init_mode)
apply_initialization(self.norm,
norm_mode=self.norm_init_mode)
if self.use_global_vector:
if self.separate_global_qkv:
apply_initialization(self.l2g_q_net,
linear_mode=self.attn_linear_init_mode)
apply_initialization(self.l2g_global_kv_net,
linear_mode=self.attn_linear_init_mode)
def forward(self, x, mem, mem_global_vectors=None):
"""Calculate the forward
Along the temporal axis, we pad the mem tensor from the left and the x tensor from the right so that the
relative position encoding can be calculated correctly. For example:
mem: 0, 1, 2, 3, 4
x: 0, 1, 2, 3, 4, 5
n_temporal = 1
mem: 0, 1, 2, 3, 4 x: 0, 1, 2, 3, 4, 5
n_temporal = 2
mem: pad, 1, 3 x: 0, 2, 4
mem: 0, 2, 4 x: 1, 3, 5
n_temporal = 3
mem: pad, 2 dec: 0, 3
mem: 0, 3 dec: 1, 4
mem: 1, 4 dec: 2, 5
Parameters
----------
x
The input of the layer. It will have shape (B, T, H, W, C)
mem
The memory. It will have shape (B, T_mem, H, W, C)
mem_global_vectors
The global vectors from the memory. It will have shape (B, N, C)
Returns
-------
out
Output tensor should have shape (B, T, H, W, C_out)
"""
if self.cross_last_n_frames is not None:
cross_last_n_frames = int(min(self.cross_last_n_frames, mem.shape[1]))
mem = mem[:, -cross_last_n_frames:, ...]
if self.use_global_vector:
_, num_global, _ = mem_global_vectors.shape
x = self.norm(x)
B, T_x, H, W, C_in = x.shape
B_mem, T_mem, H_mem, W_mem, C_mem = mem.shape
assert T_x < self.max_temporal_relative and T_mem < self.max_temporal_relative
cuboid_hw = self.cuboid_hw
n_temporal = self.n_temporal
shift_hw = self.shift_hw
assert B_mem == B and H == H_mem and W == W_mem and C_in == C_mem,\
f'Shape of memory and the input tensor does not match. x.shape={x.shape}, mem.shape={mem.shape}'
pad_t_mem = (n_temporal - T_mem % n_temporal) % n_temporal
pad_t_x = (n_temporal - T_x % n_temporal) % n_temporal
pad_h = (cuboid_hw[0] - H % cuboid_hw[0]) % cuboid_hw[0]
pad_w = (cuboid_hw[1] - W % cuboid_hw[1]) % cuboid_hw[1]
# Step-1: Pad the memory and x
mem = _generalize_padding(mem, pad_t_mem, pad_h, pad_w, self.padding_type, t_pad_left=True)
x = _generalize_padding(x, pad_t_x, pad_h, pad_w, self.padding_type, t_pad_left=False)
# Step-2: Shift the tensor based on shift window attention.
if any(i > 0 for i in shift_hw):
shifted_x = torch.roll(x, shifts=(-shift_hw[0], -shift_hw[1]), dims=(2, 3))
shifted_mem = torch.roll(mem, shifts=(-shift_hw[0], -shift_hw[1]), dims=(2, 3))
else:
shifted_x = x
shifted_mem = mem
# Step-3: Reorder the tensors
mem_cuboid_size = (mem.shape[1] // n_temporal,) + cuboid_hw
x_cuboid_size = (x.shape[1] // n_temporal,) + cuboid_hw
# Mem shape is (B, num_cuboids, mem_cuboid_volume, C), x shape is (B, num_cuboids, x_cuboid_volume, C)
reordered_mem = cuboid_reorder(shifted_mem, cuboid_size=mem_cuboid_size, strategy=self.strategy)
reordered_x = cuboid_reorder(shifted_x, cuboid_size=x_cuboid_size, strategy=self.strategy)
_, num_cuboids_mem, mem_cuboid_volume, _ = reordered_mem.shape
_, num_cuboids, x_cuboid_volume, _ = reordered_x.shape
assert num_cuboids_mem == num_cuboids, f'Number of cuboids do not match. num_cuboids={num_cuboids},' \
f' num_cuboids_mem={num_cuboids_mem}'
# Step-4: Perform self-attention
# (num_cuboids, x_cuboid_volume, mem_cuboid_volume)
attn_mask = compute_cuboid_cross_attention_mask(T_x, T_mem, H, W, n_temporal, cuboid_hw, shift_hw,
strategy=self.strategy,
padding_type=self.padding_type,
device=x.device)
head_C = C_in // self.num_heads
# (2, B, num_heads, num_cuboids, mem_cuboid_volume, head_C)
kv = self.kv_proj(reordered_mem).reshape(B, num_cuboids, mem_cuboid_volume, 2, self.num_heads, head_C).permute(3, 0, 4, 1, 2, 5)
k, v = kv[0], kv[1] # Each has shape (B, num_heads, num_cuboids, mem_cuboid_volume, head_C)
q = self.q_proj(reordered_x).reshape(B, num_cuboids, x_cuboid_volume, self.num_heads, head_C).permute(0, 3, 1, 2, 4) # Shape (B, num_heads, num_cuboids, x_cuboids_volume, head_C)
q = q * self.scale
attn_score = q @ k.transpose(-2, -1) # Shape (B, num_heads, num_cuboids, x_cuboids_volume, mem_cuboid_volume)
if self.use_relative_pos:
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index[:x_cuboid_volume, :mem_cuboid_volume].reshape(-1)].reshape(
x_cuboid_volume, mem_cuboid_volume, -1) # (cuboid_volume, cuboid_volume, num_head)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous().unsqueeze(1) # num_heads, 1, x_cuboids_volume, mem_cuboid_volume
attn_score = attn_score + relative_position_bias # Shape (B, num_heads, num_cuboids, x_cuboids_volume, mem_cuboid_volume)
if self.use_global_vector:
if self.separate_global_qkv:
l2g_q = self.l2g_q_net(reordered_x) \
.reshape(B, num_cuboids, x_cuboid_volume, self.num_heads, head_C) \
.permute(0, 3, 1, 2, 4) # (B, num_heads, num_cuboids, cuboid_volume, head_C)
l2g_q = l2g_q * self.scale
l2g_global_kv = self.l2g_global_kv_net(mem_global_vectors) \
.reshape(B, 1, num_global, 2, self.num_heads, head_C) \
.permute(3, 0, 4, 1, 2, 5) # Shape (2, B, num_heads, 1, N, head_C)
l2g_global_k, l2g_global_v = l2g_global_kv[0], l2g_global_kv[1]
else:
kv_global = self.kv_proj(mem_global_vectors).reshape(B, 1, num_global, 2, self.num_heads, head_C).permute(3, 0, 4, 1, 2, 5)
l2g_global_k, l2g_global_v = kv_global[0], kv_global[1] # Shape (B, num_heads, 1, num_global, head_C)
l2g_q = q
l2g_attn_score = l2g_q @ l2g_global_k.transpose(-2, -1) # Shape (B, num_heads, num_cuboids, x_cuboid_volume, num_global)
attn_score_l2l_l2g = torch.cat((attn_score, l2g_attn_score),
dim=-1)
attn_mask_l2l_l2g = F.pad(attn_mask, (0, num_global), "constant", 1) # Shape (num_cuboids, x_cuboid_volume, mem_cuboid_volume + num_global)
v_l_g = torch.cat((v, l2g_global_v.expand(B, self.num_heads, num_cuboids, num_global, head_C)),
dim=3) # Shape (B, num_heads, num_cuboids, mem_cuboid_volume + num_global, head_C)
# local to local and global attention
attn_score_l2l_l2g = masked_softmax(attn_score_l2l_l2g, mask=attn_mask_l2l_l2g)
attn_score_l2l_l2g = self.attn_drop(attn_score_l2l_l2g) # Shape (B, num_heads, num_cuboids, x_cuboid_volume, mem_cuboid_volume + K))
reordered_x = (attn_score_l2l_l2g @ v_l_g).permute(0, 2, 3, 1, 4) \
.reshape(B, num_cuboids, x_cuboid_volume, self.dim)
else:
attn_score = masked_softmax(attn_score, mask=attn_mask)
attn_score = self.attn_drop(attn_score) # Shape (B, num_heads, num_cuboids, x_cuboid_volume, mem_cuboid_volume)
reordered_x = (attn_score @ v).permute(0, 2, 3, 1, 4).reshape(B, num_cuboids, x_cuboid_volume, self.dim)
reordered_x = self.proj_drop(self.proj(reordered_x))
# Step-5: Shift back and slice
shifted_x = cuboid_reorder_reverse(reordered_x, cuboid_size=x_cuboid_size, strategy=self.strategy,
orig_data_shape=(x.shape[1], x.shape[2], x.shape[3]))
if any(i > 0 for i in shift_hw):
x = torch.roll(shifted_x, shifts=(shift_hw[0], shift_hw[1]), dims=(2, 3))
else:
x = shifted_x
x = _generalize_unpadding(x, pad_t=pad_t_x, pad_h=pad_h, pad_w=pad_w, padding_type=self.padding_type)
return x
class DownSampling3D(nn.Module):
"""The 3D down-sampling layer.
3d_interp_2d:
x --> conv3d_3X3X3 (mid_dim) + leaky_relu --> downsample (bilinear) --> conv2d_3x3
2d_interp_2d:
x --> conv2d_3x3 (mid_dim) + leaky_relu --> downsample (bilinear) --> conv2d_3x3
We add an additional conv layer before the
For any options, if the target_size is the same as the input size, we will skip the bilinear downsampling layer.
"""
def __init__(self, original_size, target_size, in_channels, out_dim, mid_dim=16, act_type='leaky',
arch_type='2d_interp_2d'):
"""
Parameters
----------
original_size
The original size of the tensor. It will be a tuple/list that contains T, H, W
target_size
Will be a tuple/list that contains T_new, H_new, W_new
in_channels
The input channels
out_dim
The output dimension of the layer
mid_dim
Dimension of the intermediate projection layer
act_type
Type of the activation
arch_type
Type of the layer.
"""
super(DownSampling3D, self).__init__()
self.arch_type = arch_type
self.original_size = original_size
self.target_size = target_size
self.mid_dim = mid_dim
self.out_dim = out_dim
if self.arch_type == '3d_interp_2d':
self.inter_conv = nn.Conv3d(in_channels=in_channels, out_channels=mid_dim, kernel_size=(3, 3, 3),
padding=(1, 1, 1))
self.act = get_activation(act_type)
elif self.arch_type == '2d_interp_2d':
self.inter_conv = nn.Conv2d(in_channels=in_channels, out_channels=mid_dim, kernel_size=(3, 3),
padding=(1, 1))
self.act = get_activation(act_type)
else:
raise NotImplementedError
self.conv = nn.Conv2d(in_channels=mid_dim, out_channels=out_dim, kernel_size=(3, 3), padding=(1, 1))
self.init_weights()
def init_weights(self):
for m in self.children():
apply_initialization(m)
def forward(self, x):
"""
Parameters
----------
x
Shape (N, T, H, W, C)
Returns
-------
out
Shape (N, T_new, H_new, W_new, C_out)
"""
B, T, H, W, C_in = x.shape
if self.arch_type == '3d_interp_2d':
x = self.act(self.inter_conv(x.permute(0, 4, 1, 2, 3))) # Shape(B, mid_dim, T, H, W)
if self.original_size[0] == self.target_size[0]:
# Use 2D interpolation
x = F.interpolate(x.permute(0, 2, 1, 3, 4).reshape(B * T, self.mid_dim, H, W), size=self.target_size[1:]) # Shape (B * T_new, mid_dim, H_new, W_new)
else:
# Use 3D interpolation
x = F.interpolate(x, size=self.target_size) # Shape (B, mid_dim, T_new, H_new, W_new)
x = x.permute(0, 2, 1, 3, 4).reshape(B * self.target_size[0], self.mid_dim,
self.target_size[1], self.target_size[2])
elif self.arch_type == '2d_interp_2d':
x = self.act(self.inter_conv(x.permute(0, 1, 4, 2, 3).reshape(B * T, C_in, H, W))) # (B * T, mid_dim, H, W)
if self.original_size[0] == self.target_size[0]:
# Use 2D interpolation
x = F.interpolate(x, size=self.target_size[1:]) # Shape (B * T_new, mid_dim, H_new, W_new)
else:
# Use 3D interpolation
x = F.interpolate(x.reshape(B, T, C_in, H, W).permute(0, 2, 1, 3, 4), size=self.target_size) # Shape (B, mid_dim, T_new, H_new, W_new)
x = x.permute(0, 2, 1, 3, 4).reshape(B * self.target_size[0], self.mid_dim,
self.target_size[1], self.target_size[2])
else:
raise NotImplementedError
x = self.conv(x) # Shape (B * T_new, out_dim, H_new, W_new)
x = x.reshape(B, self.target_size[0], self.out_dim, self.target_size[1], self.target_size[2])\
.permute(0, 2, 1, 3, 4)
return x
class CuboidTransformerEncoder(nn.Module):
"""Encoder of the CuboidTransformer
x --> attn_block --> patch_merge --> attn_block --> patch_merge --> ... --> out
"""
def __init__(self,
input_shape,
base_units=128,
block_units=None,
scale_alpha=1.0,
depth=[4, 4, 4],
downsample=2,
downsample_type='patch_merge',
block_attn_patterns=None,
block_cuboid_size=[(4, 4, 4),
(4, 4, 4)],
block_strategy=[('l', 'l', 'l'),
('d', 'd', 'd')],
block_shift_size=[(0, 0, 0),
(0, 0, 0)],
num_heads=4,
attn_drop=0.0,
proj_drop=0.0,
ffn_drop=0.0,
activation="leaky",
ffn_activation='leaky',
gated_ffn=False,
norm_layer='layer_norm',
use_inter_ffn=True,
padding_type='ignore',
checkpoint_level=True,
use_relative_pos=True,
self_attn_use_final_proj=True,
# global vectors
use_global_vector=False,
use_global_vector_ffn=True,
use_global_self_attn=False,
separate_global_qkv=False,
global_dim_ratio=1,
# initialization
attn_linear_init_mode="0",
ffn_linear_init_mode="0",
conv_init_mode="0",
down_linear_init_mode="0",
norm_init_mode="0",
):
"""
Parameters
----------
input_shape
The shape of the input. Contains T, H, W, C
initial_data_thw
The shape of the first layer
base_units
The number of units
scale_alpha
We scale up the channels based on the formula:
- round_to(base_units * max(downsample_scale) ** units_alpha, 4)
depth
The number of layers for each block
downsample
The downsample ratio
downsample_type
Type of the downsampling layer
block_attn_patterns
Attention pattern for the cuboid attention for each block.
block_cuboid_size
A list of cuboid size parameters
block_strategy
A list of cuboid strategies
block_shift_size
A list of shift sizes
num_global
The number of global vectors
num_heads
The number of heads.
attn_drop
proj_drop
ffn_drop
gated_ffn
Whether to enable gated ffn or not
norm_layer
The normalization layer
use_inter_ffn
Whether to use intermediate FFN
padding_type
"""
super(CuboidTransformerEncoder, self).__init__()
# initialization mode
self.attn_linear_init_mode = attn_linear_init_mode
self.ffn_linear_init_mode = ffn_linear_init_mode
self.conv_init_mode = conv_init_mode
self.down_linear_init_mode = down_linear_init_mode
self.norm_init_mode = norm_init_mode
self.input_shape = input_shape
self.depth = depth
self.num_blocks = len(depth)
self.base_units = base_units
self.scale_alpha = scale_alpha
if not isinstance(downsample, (tuple, list)):
downsample = (1, downsample, downsample)
self.downsample = downsample
self.downsample_type = downsample_type
self.num_heads = num_heads
self.use_global_vector = use_global_vector
self.checkpoint_level = checkpoint_level
if block_units is None:
block_units = [round_to(base_units * int((max(downsample) ** scale_alpha) ** i), 4)
for i in range(self.num_blocks)]
else:
assert len(block_units) == self.num_blocks and block_units[0] == base_units
self.block_units = block_units
if self.num_blocks > 1:
if downsample_type == 'patch_merge':
self.down_layers = nn.ModuleList(
[PatchMerging3D(dim=self.block_units[i],
downsample=downsample,
# downsample=(1, 1, 1),
padding_type=padding_type,
out_dim=self.block_units[i + 1],
linear_init_mode=down_linear_init_mode,
norm_init_mode=norm_init_mode)
for i in range(self.num_blocks - 1)])
else:
raise NotImplementedError
if self.use_global_vector:
self.down_layer_global_proj = nn.ModuleList(
[nn.Linear(in_features=global_dim_ratio*self.block_units[i],
out_features=global_dim_ratio*self.block_units[i + 1])
for i in range(self.num_blocks - 1)])
if block_attn_patterns is not None:
mem_shapes = self.get_mem_shapes()
if isinstance(block_attn_patterns, (tuple, list)):
assert len(block_attn_patterns) == self.num_blocks
else:
block_attn_patterns = [block_attn_patterns for _ in range(self.num_blocks)]
block_cuboid_size = []
block_strategy = []
block_shift_size = []
for idx, key in enumerate(block_attn_patterns):
func = CuboidSelfAttentionPatterns.get(key)
cuboid_size, strategy, shift_size = func(mem_shapes[idx])
block_cuboid_size.append(cuboid_size)
block_strategy.append(strategy)
block_shift_size.append(shift_size)
else:
if not isinstance(block_cuboid_size[0][0], (list, tuple)):
block_cuboid_size = [block_cuboid_size for _ in range(self.num_blocks)]
else:
assert len(block_cuboid_size) == self.num_blocks,\
f'Incorrect input format! Received block_cuboid_size={block_cuboid_size}'
if not isinstance(block_strategy[0][0], (list, tuple)):
block_strategy = [block_strategy for _ in range(self.num_blocks)]
else:
assert len(block_strategy) == self.num_blocks,\
f'Incorrect input format! Received block_strategy={block_strategy}'
if not isinstance(block_shift_size[0][0], (list, tuple)):
block_shift_size = [block_shift_size for _ in range(self.num_blocks)]
else:
assert len(block_shift_size) == self.num_blocks,\
f'Incorrect input format! Received block_shift_size={block_shift_size}'
self.block_cuboid_size = block_cuboid_size
self.block_strategy = block_strategy
self.block_shift_size = block_shift_size
self.blocks = nn.ModuleList([nn.Sequential(
*[StackCuboidSelfAttentionBlock(
dim=self.block_units[i],
num_heads=num_heads,
block_cuboid_size=block_cuboid_size[i],
block_strategy=block_strategy[i],
block_shift_size=block_shift_size[i],
attn_drop=attn_drop,
proj_drop=proj_drop,
ffn_drop=ffn_drop,
activation=ffn_activation,
gated_ffn=gated_ffn,
norm_layer=norm_layer,
use_inter_ffn=use_inter_ffn,
padding_type=padding_type,
use_global_vector=use_global_vector,
use_global_vector_ffn=use_global_vector_ffn,
use_global_self_attn=use_global_self_attn,
separate_global_qkv=separate_global_qkv,
global_dim_ratio=global_dim_ratio,
checkpoint_level=checkpoint_level,
use_relative_pos=use_relative_pos,
use_final_proj=self_attn_use_final_proj,
# initialization
attn_linear_init_mode=attn_linear_init_mode,
ffn_linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,
) for _ in range(depth[i])])
for i in range(self.num_blocks)])
self.reset_parameters()
def reset_parameters(self):
if self.num_blocks > 1:
for m in self.down_layers:
m.reset_parameters()
if self.use_global_vector:
apply_initialization(self.down_layer_global_proj,
linear_mode=self.down_linear_init_mode)
for ms in self.blocks:
for m in ms:
m.reset_parameters()
def get_mem_shapes(self):
"""Get the shape of the output memory based on the input shape. This can be used for constructing the decoder.
Returns
-------
mem_shapes
A list of shapes of the output memory
"""
if self.num_blocks == 1:
return [self.input_shape]
else:
mem_shapes = [self.input_shape]
curr_shape = self.input_shape
for down_layer in self.down_layers:
curr_shape = down_layer.get_out_shape(curr_shape)
mem_shapes.append(curr_shape)
return mem_shapes
def forward(self, x, global_vectors=None):
"""
Parameters
----------
x
Shape (B, T, H, W, C)
Returns
-------
out
A list of tensors from the bottom layer to the top layer of the encoder. For example, it can have shape
- (B, T, H, W, C1)
- (B, T, H // 2, W // 2, 2 * C1)
- (B, T, H // 4, W // 4, 4 * C1)
...
global_mem_out
Optional
"""
B, T, H, W, C_in = x.shape
assert (T, H, W, C_in) == self.input_shape
if self.use_global_vector:
out = []
global_mem_out = []
for i in range(self.num_blocks):
for l in self.blocks[i]:
x, global_vectors = l(x, global_vectors)
out.append(x)
global_mem_out.append(global_vectors)
if self.num_blocks > 1 and i < self.num_blocks - 1:
x = self.down_layers[i](x)
global_vectors = self.down_layer_global_proj[i](global_vectors)
return out, global_mem_out
else:
out = []
for i in range(self.num_blocks):
x = self.blocks[i](x)
out.append(x)
if self.num_blocks > 1 and i < self.num_blocks - 1:
x = self.down_layers[i](x)
return out
class StackCuboidCrossAttentionBlock(nn.Module):
"""A stack of cuboid cross attention layers.
The advantage of cuboid attention is that we can combine cuboid attention building blocks with different
hyper-parameters to mimic a broad range of space-time correlation patterns.
- "use_inter_ffn" is True
x, mem --> attn1 -----+-------> ffn1 ---+---> attn2 --> ... --> ffn_k --> out
| ^ | ^
| | | |
|-------------|----|-------------|
- "use_inter_ffn" is False
x, mem --> attn1 -----+------> attn2 --> ... attnk --+----> ffnk ---+---> out, mem
| ^ | ^ ^ | ^
| | | | | | |
|-------------|----|------------|-- ----------|--|-----------|
"""
def __init__(self,
dim,
num_heads,
block_cuboid_hw=[(4, 4), (4, 4)],
block_shift_hw=[(0, 0), (2, 2)],
block_n_temporal=[1, 2],
block_strategy=[('d', 'd', 'd'),
('l', 'l', 'l')],
padding_type='ignore',
cross_last_n_frames=None,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
ffn_drop=0.0,
activation='leaky',
gated_ffn=False,
norm_layer='layer_norm',
use_inter_ffn=True,
max_temporal_relative=50,
checkpoint_level=1,
use_relative_pos=True,
# global vectors
use_global_vector=False,
separate_global_qkv=False,
global_dim_ratio=1,
# initialization
attn_linear_init_mode="0",
ffn_linear_init_mode="0",
norm_init_mode="0",
):
super(StackCuboidCrossAttentionBlock, self).__init__()
# initialization
self.attn_linear_init_mode = attn_linear_init_mode
self.ffn_linear_init_mode = ffn_linear_init_mode
self.norm_init_mode = norm_init_mode
assert len(block_cuboid_hw[0]) > 0 and len(block_shift_hw) > 0 and len(block_strategy) > 0,\
f'Incorrect format.' \
f' block_cuboid_hw={block_cuboid_hw}, block_shift_hw={block_shift_hw}, block_strategy={block_strategy}'
assert len(block_cuboid_hw) == len(block_shift_hw) == len(block_strategy)
self.num_attn = len(block_cuboid_hw)
self.checkpoint_level = checkpoint_level
self.use_inter_ffn = use_inter_ffn
self.use_global_vector = use_global_vector
if self.use_inter_ffn:
self.ffn_l = nn.ModuleList(
[PositionwiseFFN(
units=dim,
hidden_size=4 * dim,
activation_dropout=ffn_drop,
dropout=ffn_drop,
gated_proj=gated_ffn,
activation=activation,
normalization=norm_layer,
pre_norm=True,
linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,)
for _ in range(self.num_attn)])
else:
self.ffn_l = nn.ModuleList(
[PositionwiseFFN(
units=dim,
hidden_size=4 * dim,
activation_dropout=ffn_drop,
dropout=ffn_drop,
gated_proj=gated_ffn,
activation=activation,
normalization=norm_layer,
pre_norm=True,
linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,)])
self.attn_l = nn.ModuleList(
[CuboidCrossAttentionLayer(
dim=dim,
num_heads=num_heads,
cuboid_hw=ele_cuboid_hw,
shift_hw=ele_shift_hw,
strategy=ele_strategy,
n_temporal=ele_n_temporal,
cross_last_n_frames=cross_last_n_frames,
padding_type=padding_type,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=proj_drop,
norm_layer=norm_layer,
max_temporal_relative=max_temporal_relative,
use_global_vector=use_global_vector,
separate_global_qkv=separate_global_qkv,
global_dim_ratio=global_dim_ratio,
checkpoint_level=checkpoint_level,
use_relative_pos=use_relative_pos,
attn_linear_init_mode=attn_linear_init_mode,
ffn_linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,)
for ele_cuboid_hw, ele_shift_hw, ele_strategy, ele_n_temporal
in zip(block_cuboid_hw, block_shift_hw, block_strategy, block_n_temporal)])
def reset_parameters(self):
for m in self.ffn_l:
m.reset_parameters()
for m in self.attn_l:
m.reset_parameters()
def forward(self, x, mem, mem_global_vector=None):
"""
Parameters
----------
x
Shape (B, T_x, H, W, C)
mem
Shape (B, T_mem, H, W, C)
mem_global_vector
Shape (B, N_global, C)
Returns
-------
out
Shape (B, T_x, H, W, C_out)
"""
if self.use_inter_ffn:
for attn, ffn in zip(self.attn_l, self.ffn_l):
if self.checkpoint_level >= 2 and self.training:
x = x + checkpoint.checkpoint(attn, x, mem, mem_global_vector)
else:
x = x + attn(x, mem, mem_global_vector)
if self.checkpoint_level >= 1 and self.training:
x = checkpoint.checkpoint(ffn, x)
else:
x = ffn(x)
return x
else:
for attn in self.attn_l:
if self.checkpoint_level >= 2 and self.training:
x = x + checkpoint.checkpoint(attn, x, mem, mem_global_vector)
else:
x = x + attn(x, mem, mem_global_vector)
if self.checkpoint_level >= 1 and self.training:
x = checkpoint.checkpoint(self.ffn_l[0], x)
else:
x = self.ffn_l[0](x)
return x
class CuboidTransformerDecoder(nn.Module):
"""Decoder of the CuboidTransformer.
For each block, we first apply the StackCuboidSelfAttention and then apply the StackCuboidCrossAttention
Repeat the following structure K times
x --> StackCuboidSelfAttention --> |
|----> StackCuboidCrossAttention (If used) --> out
mem --> |
"""
def __init__(self,
target_temporal_length,
mem_shapes,
cross_start=0,
depth=[2, 2],
upsample_type="upsample",
upsample_kernel_size=3,
block_self_attn_patterns=None,
block_self_cuboid_size=[(4, 4, 4), (4, 4, 4)],
block_self_cuboid_strategy=[('l', 'l', 'l'), ('d', 'd', 'd')],
block_self_shift_size=[(1, 1, 1), (0, 0, 0)],
block_cross_attn_patterns=None,
block_cross_cuboid_hw=[(4, 4), (4, 4)],
block_cross_cuboid_strategy=[('l', 'l', 'l'), ('d', 'l', 'l')],
block_cross_shift_hw=[(0, 0), (0, 0)],
block_cross_n_temporal=[1, 2],
cross_last_n_frames=None,
num_heads=4,
attn_drop=0.0,
proj_drop=0.0,
ffn_drop=0.0,
ffn_activation='leaky',
gated_ffn=False,
norm_layer='layer_norm',
use_inter_ffn=False,
hierarchical_pos_embed=False,
pos_embed_type='t+hw',
max_temporal_relative=50,
padding_type='ignore',
checkpoint_level=True,
use_relative_pos=True,
self_attn_use_final_proj=True,
use_first_self_attn=False,
# global vectors
use_self_global=False,
self_update_global=True,
use_cross_global=False,
use_global_vector_ffn=True,
use_global_self_attn=False,
separate_global_qkv=False,
global_dim_ratio=1,
# initialization
attn_linear_init_mode="0",
ffn_linear_init_mode="0",
conv_init_mode="0",
up_linear_init_mode="0",
norm_init_mode="0",
):
"""
Parameters
----------
target_temporal_length
mem_shapes
cross_start
The block to start cross attention
depth
Depth of each block
upsample_type
The type of the upsampling layers
upsample_kernel_size
block_self_attn_patterns
Pattern of the block self attentions
block_self_cuboid_size
block_self_cuboid_strategy
block_self_shift_size
block_cross_attn_patterns
block_cross_cuboid_hw
block_cross_cuboid_strategy
block_cross_shift_hw
block_cross_n_temporal
num_heads
attn_drop
proj_drop
ffn_drop
ffn_activation
gated_ffn
norm_layer
use_inter_ffn
hierarchical_pos_embed
Whether to add pos embedding for each hierarchy.
max_temporal_relative
padding_type
checkpoint_level
"""
super(CuboidTransformerDecoder, self).__init__()
# initialization mode
self.attn_linear_init_mode = attn_linear_init_mode
self.ffn_linear_init_mode = ffn_linear_init_mode
self.conv_init_mode = conv_init_mode
self.up_linear_init_mode = up_linear_init_mode
self.norm_init_mode = norm_init_mode
assert len(depth) == len(mem_shapes)
self.target_temporal_length = target_temporal_length
self.num_blocks = len(mem_shapes)
self.cross_start = cross_start
self.mem_shapes = mem_shapes
self.depth = depth
self.upsample_type = upsample_type
self.hierarchical_pos_embed = hierarchical_pos_embed
self.checkpoint_level = checkpoint_level
self.use_self_global = use_self_global
self.self_update_global = self_update_global
self.use_cross_global = use_cross_global
self.use_global_vector_ffn = use_global_vector_ffn
self.use_first_self_attn = use_first_self_attn
if block_self_attn_patterns is not None:
if isinstance(block_self_attn_patterns, (tuple, list)):
assert len(block_self_attn_patterns) == self.num_blocks
else:
block_self_attn_patterns = [block_self_attn_patterns for _ in range(self.num_blocks)]
block_self_cuboid_size = []
block_self_cuboid_strategy = []
block_self_shift_size = []
for idx, key in enumerate(block_self_attn_patterns):
func = CuboidSelfAttentionPatterns.get(key)
cuboid_size, strategy, shift_size = func(mem_shapes[idx])
block_self_cuboid_size.append(cuboid_size)
block_self_cuboid_strategy.append(strategy)
block_self_shift_size.append(shift_size)
else:
if not isinstance(block_self_cuboid_size[0][0], (list, tuple)):
block_self_cuboid_size = [block_self_cuboid_size for _ in range(self.num_blocks)]
else:
assert len(block_self_cuboid_size) == self.num_blocks,\
f'Incorrect input format! Received block_self_cuboid_size={block_self_cuboid_size}'
if not isinstance(block_self_cuboid_strategy[0][0], (list, tuple)):
block_self_cuboid_strategy = [block_self_cuboid_strategy for _ in range(self.num_blocks)]
else:
assert len(block_self_cuboid_strategy) == self.num_blocks,\
f'Incorrect input format! Received block_self_cuboid_strategy={block_self_cuboid_strategy}'
if not isinstance(block_self_shift_size[0][0], (list, tuple)):
block_self_shift_size = [block_self_shift_size for _ in range(self.num_blocks)]
else:
assert len(block_self_shift_size) == self.num_blocks,\
f'Incorrect input format! Received block_self_shift_size={block_self_shift_size}'
self_blocks = []
for i in range(self.num_blocks):
if not self.use_first_self_attn and i == self.num_blocks - 1:
# For the top block, we won't use an additional self attention layer.
ele_depth = depth[i] - 1
else:
ele_depth = depth[i]
stack_cuboid_blocks =\
[StackCuboidSelfAttentionBlock(
dim=self.mem_shapes[i][-1],
num_heads=num_heads,
block_cuboid_size=block_self_cuboid_size[i],
block_strategy=block_self_cuboid_strategy[i],
block_shift_size=block_self_shift_size[i],
attn_drop=attn_drop,
proj_drop=proj_drop,
ffn_drop=ffn_drop,
activation=ffn_activation,
gated_ffn=gated_ffn,
norm_layer=norm_layer,
use_inter_ffn=use_inter_ffn,
padding_type=padding_type,
use_global_vector=use_self_global,
use_global_vector_ffn=use_global_vector_ffn,
use_global_self_attn=use_global_self_attn,
separate_global_qkv=separate_global_qkv,
global_dim_ratio=global_dim_ratio,
checkpoint_level=checkpoint_level,
use_relative_pos=use_relative_pos,
use_final_proj=self_attn_use_final_proj,
# initialization
attn_linear_init_mode=attn_linear_init_mode,
ffn_linear_init_mode=ffn_linear_init_mode,
norm_init_mode=norm_init_mode,
) for _ in range(ele_depth)]
self_blocks.append(nn.ModuleList(stack_cuboid_blocks))
self.self_blocks = nn.ModuleList(self_blocks)
if block_cross_attn_patterns is not None:
if isinstance(block_cross_attn_patterns, (tuple, list)):
assert len(block_cross_attn_patterns) == self.num_blocks
else:
block_cross_attn_patterns = [block_cross_attn_patterns for _ in range(self.num_blocks)]
block_cross_cuboid_hw = []
block_cross_cuboid_strategy = []
block_cross_shift_hw = []
block_cross_n_temporal = []
for idx, key in enumerate(block_cross_attn_patterns):
if key == "last_frame_dst":
cuboid_hw = None
shift_hw = None
strategy = None
n_temporal = None
else: | func = CuboidCrossAttentionPatterns.get(key) | 0 | 2023-10-23 11:45:50+00:00 | 8k |
camenduru/MiniGPT-v2-hf | app.py | [
{
"identifier": "Config",
"path": "minigpt4/common/config.py",
"snippet": "class Config:\n def __init__(self, args):\n self.config = {}\n\n self.args = args\n\n # Register the config and configuration for setup\n registry.register(\"configuration\", self)\n\n user_c... | import argparse
import os
import random
import cv2
import re
import numpy as np
import torch
import html
import gradio as gr
import torchvision.transforms as T
import torch.backends.cudnn as cudnn
from collections import defaultdict
from PIL import Image
from minigpt4.common.config import Config
from minigpt4.common.registry import registry
from minigpt4.conversation.conversation import Conversation, SeparatorStyle, Chat
from minigpt4.datasets.builders import *
from minigpt4.models import *
from minigpt4.processors import *
from minigpt4.runners import *
from minigpt4.tasks import * | 3,711 |
# imports modules for registration
def parse_args():
parser = argparse.ArgumentParser(description="Demo")
parser.add_argument("--cfg-path", default='eval_configs/minigptv2_eval.yaml',
help="path to configuration file.")
parser.add_argument("--gpu-id", type=int, default=0, help="specify the gpu to load the model.")
parser.add_argument(
"--options",
nargs="+",
help="override some settings in the used config, the key-value pair "
"in xxx=yyy format will be merged into config file (deprecate), "
"change to --cfg-options instead.",
)
args = parser.parse_args()
return args
random.seed(42)
np.random.seed(42)
torch.manual_seed(42)
cudnn.benchmark = False
cudnn.deterministic = True
print('Initializing Chat')
args = parse_args()
cfg = Config(args)
device = 'cuda:{}'.format(args.gpu_id)
model_config = cfg.model_cfg
model_config.device_8bit = args.gpu_id
model_cls = registry.get_model_class(model_config.arch)
model = model_cls.from_config(model_config).to(device)
bounding_box_size = 100
vis_processor_cfg = cfg.datasets_cfg.cc_sbu_align.vis_processor.train
vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg)
model = model.eval()
|
# imports modules for registration
def parse_args():
parser = argparse.ArgumentParser(description="Demo")
parser.add_argument("--cfg-path", default='eval_configs/minigptv2_eval.yaml',
help="path to configuration file.")
parser.add_argument("--gpu-id", type=int, default=0, help="specify the gpu to load the model.")
parser.add_argument(
"--options",
nargs="+",
help="override some settings in the used config, the key-value pair "
"in xxx=yyy format will be merged into config file (deprecate), "
"change to --cfg-options instead.",
)
args = parser.parse_args()
return args
random.seed(42)
np.random.seed(42)
torch.manual_seed(42)
cudnn.benchmark = False
cudnn.deterministic = True
print('Initializing Chat')
args = parse_args()
cfg = Config(args)
device = 'cuda:{}'.format(args.gpu_id)
model_config = cfg.model_cfg
model_config.device_8bit = args.gpu_id
model_cls = registry.get_model_class(model_config.arch)
model = model_cls.from_config(model_config).to(device)
bounding_box_size = 100
vis_processor_cfg = cfg.datasets_cfg.cc_sbu_align.vis_processor.train
vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg)
model = model.eval()
| CONV_VISION = Conversation( | 2 | 2023-10-15 19:54:22+00:00 | 8k |
nju-websoft/SCR | main.py | [
{
"identifier": "reset_id",
"path": "framework/utils.py",
"snippet": "def reset_id(labels, new_id):\n res = []\n for index in range(len(labels)):\n res.append(new_id[int(labels[index])])\n return torch.tensor(res)"
},
{
"identifier": "get_reset",
"path": "framework/utils.py",... | import torch
import random
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import math
import warnings
from framework.utils import reset_id, get_reset, trigger_combine_event, unpack_batch
from framework.optimization import BertAdam, AdamW
from argparse import ArgumentParser
from model.trigger_encoder import triggerEncoder
from model.argument_detection import argumentDetection
from model.classifier import classifier
from model.entity_detection import entityDetection
from framework.config import Config
from framework.dataloader import *
from transformers import logging
from sklearn.cluster import KMeans | 5,514 | logging.set_verbosity_warning()
logging.set_verbosity_error()
warnings.filterwarnings('ignore')
def eval_trigger(trigger_encoder, trigger_classifier, eval_data, config, new_id, save, ltlabel, id2label):
eval_data_loader = get_ACETriData_loader(eval_data, config, shuffle = True)
trigger_encoder.eval()
trigger_classifier.eval()
pred_num = 0
correct_num = 0
label_num = 0
pred_res = []
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(eval_data_loader):
| logging.set_verbosity_warning()
logging.set_verbosity_error()
warnings.filterwarnings('ignore')
def eval_trigger(trigger_encoder, trigger_classifier, eval_data, config, new_id, save, ltlabel, id2label):
eval_data_loader = get_ACETriData_loader(eval_data, config, shuffle = True)
trigger_encoder.eval()
trigger_classifier.eval()
pred_num = 0
correct_num = 0
label_num = 0
pred_res = []
for step, (sentence_ids, input_ids, input_masks, in_sent, segment_ids, labels, ners, sentence) in enumerate(eval_data_loader):
| sentence_ids, input_ids, input_masks, segment_ids, labels, ners = unpack_batch(sentence_ids, input_ids, input_masks, segment_ids, labels, ners, new_id, config.device) | 3 | 2023-10-17 02:40:04+00:00 | 8k |
IBM/VillanDiffusion | operate.py | [
{
"identifier": "fid",
"path": "fid_score.py",
"snippet": "def fid(path: List[str], batch_size: int=50, dims: int=2048, device: str=None, num_workers: int=None):\n if device is None:\n device = torch.device('cuda' if (torch.cuda.is_available()) else 'cpu')\n else:\n device = torch.de... | from functools import partial
from typing import List, Set, Tuple, Union
from diffusers import DiffusionPipeline, StableDiffusionPipeline, AutoencoderKL, UNet2DConditionModel, DPMSolverMultistepScheduler
from torchmetrics import StructuralSimilarityIndexMeasure
from torch import nn
from PIL import Image
from tqdm import tqdm
from accelerate import Accelerator
from fid_score import fid
from dataset import CaptionBackdoor, Backdoor, DatasetLoader, ImagePathDataset, ReplicateDataset
from config import SamplingStatic, MeasuringStatic, PromptDatasetStatic, DEFAULT_PROMPTS_POKEMON, DEFAULT_PROMPTS_CELEBA, ModelSchedStatic
from tools import batchify, batchify_generator, randn_images, encode_latents, save_grid, match_count
from tools import Log
import glob
import json
import os
import random
import pickle
import gc
import torch
import numpy as np | 6,933 | """
Some commly used operations
"""
# import argparse
# from math import ceil, sqrt
# from dataclasses import dataclass, field
# from transformers import AutoTokenizer, PretrainedConfig
class Sampling:
def __init__(self, backdoor_ds_root: str="datasets", num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS, guidance_scale: float=SamplingStatic.GUIDANCE_SCALE, max_batch_n: int=SamplingStatic.MAX_BATCH_N):
# self.__image_trigger_type: str = image_trigger
# self.__caption_trigger_type: str = caption_trigger
self.__num_inference_steps: int = num_inference_steps
self.__guidance_scale: float = guidance_scale
self.__max_batch_n: int = max_batch_n
self.__image_backdoor: Backdoor = Backdoor(root=backdoor_ds_root)
# self.__caption_backdoor: CaptionBackdoor = CaptionBackdoor()
@property
def image_backdoor(self):
return self.__image_backdoor
@staticmethod
def get_folder(sched_name: str=None, num_inference_steps: int=None, img_num: int=None, image_trigger: str=None, caption_trigger: str=None):
if caption_trigger is not None:
out_img_dir: str = "caption_backdoor_samples"
elif image_trigger is not None:
out_img_dir: str = "image_backdoor_samples"
else:
out_img_dir: str = "clean_samples"
if sched_name is not None:
out_img_dir += f"_{str(sched_name)}"
if num_inference_steps is not None:
out_img_dir += f"_step{str(num_inference_steps)}"
if img_num is not None:
out_img_dir += f"_n{str(img_num)}"
return out_img_dir
@staticmethod
def _batch_sampling(prompts: List[str], pipeline: DiffusionPipeline, inits: torch.Tensor=None,
num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS,
guidance_scale: float=SamplingStatic.GUIDANCE_SCALE,
max_batch_n: int=SamplingStatic.MAX_BATCH_N,
seed: int=SamplingStatic.SEED, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN,
return_imgs: bool=False):
with torch.no_grad():
tensor_dtype: torch.dtype = torch.FloatTensor
for i, param in enumerate(pipeline.unet.parameters()):
tensor_dtype: torch.dtype = param.type()
if i > 0:
break
device: str = pipeline.device
pipeline_call = partial(pipeline, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=torch.manual_seed(seed), output_type=None)
| """
Some commly used operations
"""
# import argparse
# from math import ceil, sqrt
# from dataclasses import dataclass, field
# from transformers import AutoTokenizer, PretrainedConfig
class Sampling:
def __init__(self, backdoor_ds_root: str="datasets", num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS, guidance_scale: float=SamplingStatic.GUIDANCE_SCALE, max_batch_n: int=SamplingStatic.MAX_BATCH_N):
# self.__image_trigger_type: str = image_trigger
# self.__caption_trigger_type: str = caption_trigger
self.__num_inference_steps: int = num_inference_steps
self.__guidance_scale: float = guidance_scale
self.__max_batch_n: int = max_batch_n
self.__image_backdoor: Backdoor = Backdoor(root=backdoor_ds_root)
# self.__caption_backdoor: CaptionBackdoor = CaptionBackdoor()
@property
def image_backdoor(self):
return self.__image_backdoor
@staticmethod
def get_folder(sched_name: str=None, num_inference_steps: int=None, img_num: int=None, image_trigger: str=None, caption_trigger: str=None):
if caption_trigger is not None:
out_img_dir: str = "caption_backdoor_samples"
elif image_trigger is not None:
out_img_dir: str = "image_backdoor_samples"
else:
out_img_dir: str = "clean_samples"
if sched_name is not None:
out_img_dir += f"_{str(sched_name)}"
if num_inference_steps is not None:
out_img_dir += f"_step{str(num_inference_steps)}"
if img_num is not None:
out_img_dir += f"_n{str(img_num)}"
return out_img_dir
@staticmethod
def _batch_sampling(prompts: List[str], pipeline: DiffusionPipeline, inits: torch.Tensor=None,
num_inference_steps: int=SamplingStatic.NUM_INFERENCE_STEPS,
guidance_scale: float=SamplingStatic.GUIDANCE_SCALE,
max_batch_n: int=SamplingStatic.MAX_BATCH_N,
seed: int=SamplingStatic.SEED, handle_batch_fn: callable=SamplingStatic.HANDLE_BATCH_FN,
return_imgs: bool=False):
with torch.no_grad():
tensor_dtype: torch.dtype = torch.FloatTensor
for i, param in enumerate(pipeline.unet.parameters()):
tensor_dtype: torch.dtype = param.type()
if i > 0:
break
device: str = pipeline.device
pipeline_call = partial(pipeline, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, generator=torch.manual_seed(seed), output_type=None)
| prompt_batchs = batchify(xs=prompts, max_batch_n=max_batch_n) | 8 | 2023-10-17 19:57:37+00:00 | 8k |
WHUlwb/Assisted_learning | train_s.py | [
{
"identifier": "Dice_loss",
"path": "loss.py",
"snippet": "def Dice_loss(inputs, target, beta=1, smooth = 1e-5):\r\n # inputs B, C, H, W, and target B, H, W, C. \r\n # There are C dimensions in total, each dimension representing a class.\r\n n, c, h, w = inputs.size()\r\n nt, ht, wt, ct = t... | import torch
import numpy as np
import os
import metric
import time
from torch.utils.data import DataLoader
from loss import Dice_loss,CE_Loss,global_kd_loss,local_kd_loss
from torch.autograd import Variable
from dataset import MyDataset
from config import config
from torch.cuda.amp import GradScaler as Gradscaler
from torch.cuda.amp import autocast
from tqdm import tqdm
from hrnet.hrnet import HRnet
from unet import U_Net | 3,657 |
scaler = Gradscaler()
traindd = MyDataset(config.trainroot,is_training=True)
traindata = DataLoader(traindd,batch_size=config.batch_size, shuffle=True)
valdata = DataLoader(MyDataset(config.valroot,is_training=False), num_workers=0, batch_size=config.batch_size, shuffle=False)
studentnet = HRnet(in_channel = 1,num_classes=config.classnum,backbone='hrnetv2_w32').cuda() #target modality
teachernet = HRnet(in_channel = 3,num_classes=config.classnum,backbone='hrnetv2_w32').cuda() #auxiliary modality
# teachernet = U_Net(4,config.classnum).cuda()
teachernet.load_state_dict(torch.load("..\model.pth")) # load the teacher model
teachernet.eval()
optimizer = torch.optim.SGD(studentnet.parameters(), lr=config.lr, momentum=0.9, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
iters = len(traindata)
train_size = len(traindata)
val_size = len(valdata)
print('train data size: %04d'%train_size)
print('val data size: %04d'%val_size)
global_Fb = 0
start = time.time()
cls_weights = np.ones([config.classnum], np.float32)
weights = torch.from_numpy(cls_weights)
weights = weights.cuda()
if __name__ == '__main__':
for epoch in range(config.epoch_start,config.n_epochs):
seg_loss_t = 0
l_kd_loss_t = 0
g_kd_loss = 0
conf_mat_tra = 0
conf_mat_val = 0
loop = tqdm(enumerate(traindata), total = len(traindata))
for i,data in loop:
rgbn,sar,m,seg = data
# traindd.updata_size()
rgbn = Variable(rgbn).cuda()
sar = Variable(sar).cuda()
m = Variable(m).cuda()
seg = Variable(seg).cuda()
optimizer.zero_grad()
if config.amp:
with autocast():
with torch.no_grad():
tea_result = teachernet(rgbn)
stu_result = studentnet(sar)
ce = CE_Loss(stu_result,seg)
dice = Dice_loss(stu_result,seg)
lkd = local_kd_loss(tea_result,stu_result,m)
|
scaler = Gradscaler()
traindd = MyDataset(config.trainroot,is_training=True)
traindata = DataLoader(traindd,batch_size=config.batch_size, shuffle=True)
valdata = DataLoader(MyDataset(config.valroot,is_training=False), num_workers=0, batch_size=config.batch_size, shuffle=False)
studentnet = HRnet(in_channel = 1,num_classes=config.classnum,backbone='hrnetv2_w32').cuda() #target modality
teachernet = HRnet(in_channel = 3,num_classes=config.classnum,backbone='hrnetv2_w32').cuda() #auxiliary modality
# teachernet = U_Net(4,config.classnum).cuda()
teachernet.load_state_dict(torch.load("..\model.pth")) # load the teacher model
teachernet.eval()
optimizer = torch.optim.SGD(studentnet.parameters(), lr=config.lr, momentum=0.9, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
iters = len(traindata)
train_size = len(traindata)
val_size = len(valdata)
print('train data size: %04d'%train_size)
print('val data size: %04d'%val_size)
global_Fb = 0
start = time.time()
cls_weights = np.ones([config.classnum], np.float32)
weights = torch.from_numpy(cls_weights)
weights = weights.cuda()
if __name__ == '__main__':
for epoch in range(config.epoch_start,config.n_epochs):
seg_loss_t = 0
l_kd_loss_t = 0
g_kd_loss = 0
conf_mat_tra = 0
conf_mat_val = 0
loop = tqdm(enumerate(traindata), total = len(traindata))
for i,data in loop:
rgbn,sar,m,seg = data
# traindd.updata_size()
rgbn = Variable(rgbn).cuda()
sar = Variable(sar).cuda()
m = Variable(m).cuda()
seg = Variable(seg).cuda()
optimizer.zero_grad()
if config.amp:
with autocast():
with torch.no_grad():
tea_result = teachernet(rgbn)
stu_result = studentnet(sar)
ce = CE_Loss(stu_result,seg)
dice = Dice_loss(stu_result,seg)
lkd = local_kd_loss(tea_result,stu_result,m) | gkd = global_kd_loss(tea_result,stu_result,m) | 2 | 2023-10-17 06:19:02+00:00 | 8k |
dagedarr/telegram-budget | handlers/main_handler.py | [
{
"identifier": "Config",
"path": "config.py",
"snippet": "class Config:\n API_TOKEN: str = os.getenv('API_TOKEN')\n DB_URL: str = os.getenv('DB_URL', '') # mysql\n ROOT_DIR: str = os.path.dirname(os.path.abspath(__file__))\n\n LATEST_TRANSACTIONS_NUM: int = 5\n PAGINATOR_BUTTONS: int = ... | from aiogram import F, Router
from aiogram.fsm.context import FSMContext
from aiogram.types import CallbackQuery, Message
from sqlalchemy.ext.asyncio import AsyncSession
from config import Config
from core.crud import get_by_attributes, remove
from filters import IsEndOnboardingFilter
from keyboards import (back_to_menu_keyboard, main_keyboard, other_keyboard,
universal_keyboard)
from models import Transaction
from utils.transactions import (amount_validate, create_transaction,
get_category_or_alias_id,
get_transactions_message,
parse_text_for_amount_and_category)
from utils.user_actions import callback_message | 4,135 |
router = Router(name='main_router')
@router.callback_query(F.data == 'main')
async def main(callback: CallbackQuery, state: FSMContext):
"""Обрабатывает основные функции бота."""
await state.clear()
await callback_message(
target=callback,
text='Основной функционал бота',
reply_markup=main_keyboard()
)
@router.callback_query(F.data == 'latest_transactions')
async def latest_transactions(callback: CallbackQuery, session: AsyncSession):
"""Выводит посление N транзакций пользователя."""
transactions = await get_by_attributes(
model=Transaction,
attributes={
'user_id': callback.from_user.id
},
session=session,
get_multi=True,
amount=Config.LATEST_TRANSACTIONS_NUM,
order_by='date'
)
text = await get_transactions_message(transactions=transactions)
await callback_message(
target=callback,
text=text,
reply_markup=back_to_menu_keyboard()
)
@router.callback_query(F.data == 'del_last_transaction')
async def del_last_transaction(callback: CallbackQuery, session: AsyncSession):
"""Удаляет последнюю транзакцию пользователя."""
last_transaction = await get_by_attributes(
model=Transaction,
attributes={
'user_id': callback.from_user.id
},
order_by='date',
session=session,
)
if not last_transaction:
await callback_message(
target=callback,
text='У Вас нет истории Трат!',
reply_markup=back_to_menu_keyboard()
)
return
await remove(
db_obj=last_transaction,
session=session
)
await callback_message(
target=callback,
text=f'Трата "{last_transaction}" успешно удалена!',
reply_markup=back_to_menu_keyboard()
)
@router.callback_query(F.data == 'other')
async def other(callback: CallbackQuery):
"""Выводит Категории и Статистику, и остальной функционал."""
await callback_message(
target=callback,
text='Просмотр Категории и Статистики',
|
router = Router(name='main_router')
@router.callback_query(F.data == 'main')
async def main(callback: CallbackQuery, state: FSMContext):
"""Обрабатывает основные функции бота."""
await state.clear()
await callback_message(
target=callback,
text='Основной функционал бота',
reply_markup=main_keyboard()
)
@router.callback_query(F.data == 'latest_transactions')
async def latest_transactions(callback: CallbackQuery, session: AsyncSession):
"""Выводит посление N транзакций пользователя."""
transactions = await get_by_attributes(
model=Transaction,
attributes={
'user_id': callback.from_user.id
},
session=session,
get_multi=True,
amount=Config.LATEST_TRANSACTIONS_NUM,
order_by='date'
)
text = await get_transactions_message(transactions=transactions)
await callback_message(
target=callback,
text=text,
reply_markup=back_to_menu_keyboard()
)
@router.callback_query(F.data == 'del_last_transaction')
async def del_last_transaction(callback: CallbackQuery, session: AsyncSession):
"""Удаляет последнюю транзакцию пользователя."""
last_transaction = await get_by_attributes(
model=Transaction,
attributes={
'user_id': callback.from_user.id
},
order_by='date',
session=session,
)
if not last_transaction:
await callback_message(
target=callback,
text='У Вас нет истории Трат!',
reply_markup=back_to_menu_keyboard()
)
return
await remove(
db_obj=last_transaction,
session=session
)
await callback_message(
target=callback,
text=f'Трата "{last_transaction}" успешно удалена!',
reply_markup=back_to_menu_keyboard()
)
@router.callback_query(F.data == 'other')
async def other(callback: CallbackQuery):
"""Выводит Категории и Статистику, и остальной функционал."""
await callback_message(
target=callback,
text='Просмотр Категории и Статистики', | reply_markup=other_keyboard(), | 6 | 2023-10-23 17:30:24+00:00 | 8k |
nchen909/Pass-Tuning | models_list/unified/bitfit.py | [
{
"identifier": "PushToHubFriendlyModel",
"path": "models_list/unified/base.py",
"snippet": "class PushToHubFriendlyModel(nn.Module, ModuleUtilsMixin, PushToHubMixin):\n def __init__(self):\n super().__init__()\n\n def save_pretrained(\n self,\n save_directory: Union[s... | import torch
from torch import nn
from transformers import AutoTokenizer
from .base import PushToHubFriendlyModel
from ..bitfit.bitfit import ParameterFreeze
from ..bitfit.modeling_plbart import PLBartForConditionalGeneration
from ..bitfit.modeling_t5 import T5ForConditionalGeneration
from ..bitfit.modeling_t5 import T5ForConditionalGeneration | 4,491 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# from ..bitfit.modeling_auto import AutoModelForSeq2SeqLM
class E2D_Model_Bitfit(PushToHubFriendlyModel):
def __init__(self, args):
super().__init__()
self.args = args
"""The bitfit and adapter and prefix-tuning code"""
self.preseqlen = args.max_source_length
self.mid_dim = args.gat_token_num
print("prefix-tuning sequence length is {}.".format(self.preseqlen))
print("bitfit is used.")
# Load tokenizer and model.
self.tokenizer = AutoTokenizer.from_pretrained(args.pretrained_model_name_or_path, use_fast=False)
# self.pretrain_model = AutoModelForSeq2SeqLM.from_pretrained(
# args.bert.location
# )
if "t5" in self.args.pretrained_model_name_or_path:
print(args.pretrained_model_name_or_path)
self.pretrain_model = T5ForConditionalGeneration.from_pretrained(
args.pretrained_model_name_or_path
)
assert isinstance(self.pretrain_model, (T5ForConditionalGeneration))
elif "bart" in self.args.pretrained_model_name_or_path:
self.pretrain_model = PLBartForConditionalGeneration.from_pretrained(
args.pretrained_model_name_or_path
)
assert isinstance(self.pretrain_model, (PLBartForConditionalGeneration))
self.config = self.pretrain_model.config
if args.prefix_tuning:
if isinstance(self.pretrain_model, T5ForConditionalGeneration):
self.match_n_layer = self.config.num_decoder_layers
self.match_n_head = self.config.num_heads
else:
raise ValueError("Other models are not supported yet!")
self.n_embd = self.config.d_model
assert self.n_embd % self.match_n_head == 0
self.match_n_embd = self.n_embd // self.match_n_head
# if args.special_tokens:
# self.tokenizer.add_tokens([v for k, v in args.special_tokens])
# self.pretrain_model.resize_token_embeddings(len(self.tokenizer))
if args.prefix_tuning:
# Prefix related.
self.register_buffer('input_tokens', torch.arange(self.preseqlen).long())
self.wte = nn.Embedding(self.preseqlen, self.n_embd)
self.control_trans = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
if self.args.knowledge_usage == 'separate':
self.knowledge_trans = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
self.wte_enc = nn.Embedding(self.preseqlen, self.n_embd)
self.control_trans_enc = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
if self.args.knowledge_usage == 'separate':
self.knowledge_trans_enc = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
self.wte_dec = nn.Embedding(self.preseqlen, self.n_embd)
self.control_trans_dec = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
# Knowledge prompt.
if self.args.knowledge_usage == 'separate':
self.knowledge_trans_dec = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
else:
if self.args.knowledge_usage == 'separate':
raise NotImplementedError()
if args.prefix_tuning:
self.dropout = nn.Dropout(args.prefix_dropout)
if self.args.fix_model_param and self.args.bitfit:
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# from ..bitfit.modeling_auto import AutoModelForSeq2SeqLM
class E2D_Model_Bitfit(PushToHubFriendlyModel):
def __init__(self, args):
super().__init__()
self.args = args
"""The bitfit and adapter and prefix-tuning code"""
self.preseqlen = args.max_source_length
self.mid_dim = args.gat_token_num
print("prefix-tuning sequence length is {}.".format(self.preseqlen))
print("bitfit is used.")
# Load tokenizer and model.
self.tokenizer = AutoTokenizer.from_pretrained(args.pretrained_model_name_or_path, use_fast=False)
# self.pretrain_model = AutoModelForSeq2SeqLM.from_pretrained(
# args.bert.location
# )
if "t5" in self.args.pretrained_model_name_or_path:
print(args.pretrained_model_name_or_path)
self.pretrain_model = T5ForConditionalGeneration.from_pretrained(
args.pretrained_model_name_or_path
)
assert isinstance(self.pretrain_model, (T5ForConditionalGeneration))
elif "bart" in self.args.pretrained_model_name_or_path:
self.pretrain_model = PLBartForConditionalGeneration.from_pretrained(
args.pretrained_model_name_or_path
)
assert isinstance(self.pretrain_model, (PLBartForConditionalGeneration))
self.config = self.pretrain_model.config
if args.prefix_tuning:
if isinstance(self.pretrain_model, T5ForConditionalGeneration):
self.match_n_layer = self.config.num_decoder_layers
self.match_n_head = self.config.num_heads
else:
raise ValueError("Other models are not supported yet!")
self.n_embd = self.config.d_model
assert self.n_embd % self.match_n_head == 0
self.match_n_embd = self.n_embd // self.match_n_head
# if args.special_tokens:
# self.tokenizer.add_tokens([v for k, v in args.special_tokens])
# self.pretrain_model.resize_token_embeddings(len(self.tokenizer))
if args.prefix_tuning:
# Prefix related.
self.register_buffer('input_tokens', torch.arange(self.preseqlen).long())
self.wte = nn.Embedding(self.preseqlen, self.n_embd)
self.control_trans = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
if self.args.knowledge_usage == 'separate':
self.knowledge_trans = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
self.wte_enc = nn.Embedding(self.preseqlen, self.n_embd)
self.control_trans_enc = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
if self.args.knowledge_usage == 'separate':
self.knowledge_trans_enc = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
self.wte_dec = nn.Embedding(self.preseqlen, self.n_embd)
self.control_trans_dec = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
# Knowledge prompt.
if self.args.knowledge_usage == 'separate':
self.knowledge_trans_dec = nn.Sequential(
nn.Linear(self.n_embd, self.mid_dim),
nn.Tanh(),
nn.Linear(self.mid_dim, self.match_n_layer * 2 * self.n_embd),
)
else:
if self.args.knowledge_usage == 'separate':
raise NotImplementedError()
if args.prefix_tuning:
self.dropout = nn.Dropout(args.prefix_dropout)
if self.args.fix_model_param and self.args.bitfit: | pf=ParameterFreeze() | 1 | 2023-10-20 09:24:44+00:00 | 8k |
openfoodfacts/open-prices | app/crud.py | [
{
"identifier": "config",
"path": "app/config.py",
"snippet": "ROOT_DIR = Path(__file__).parent.parent\nSTATIC_DIR = ROOT_DIR / \"static\"\n NOTSET: str = \"NOTSET\"\n DEBUG: str = \"DEBUG\"\n INFO: str = \"INFO\"\n WARNING: str = \"WARNING\"\n ERROR: str = \"ERROR\"\n CRITICAL: str = ... | import random
import string
from mimetypes import guess_extension
from fastapi import UploadFile
from sqlalchemy import select
from sqlalchemy.orm import Session, joinedload
from sqlalchemy.sql import func
from app import config
from app.enums import LocationOSMEnum, ProofTypeEnum
from app.models import Location, Price, Product, Proof, User
from app.schemas import (
LocationCreate,
LocationFilter,
LocationFull,
PriceCreate,
PriceFilter,
PriceFull,
ProductCreate,
ProductFilter,
ProductFull,
UserCreate,
) | 5,221 | def get_product_by_id(db: Session, id: int):
return db.query(Product).filter(Product.id == id).first()
def get_product_by_code(db: Session, code: str) -> Product:
return db.query(Product).filter(Product.code == code).first()
def create_product(
db: Session, product: ProductCreate, price_count: int = 0
) -> Product:
"""Create a product in the database.
:param db: the database session
:param product: the product to create
:param price_count: the number of prices linked to the product, defaults
to 0
:return: the created product
"""
db_product = Product(price_count=price_count, **product.model_dump())
db.add(db_product)
db.commit()
db.refresh(db_product)
return db_product
def get_or_create_product(
db: Session, product: ProductCreate, init_price_count: int = 0
):
"""Get or create a product in the database.
:param db: the database session
:param product: the product to create
:param init_price_count: the initial number of prices linked to the
product if a product is created, defaults to 0
:return: the created product and a boolean indicating whether the product
was created or not
"""
created = False
db_product = get_product_by_code(db, code=product.code)
if not db_product:
db_product = create_product(db, product=product, price_count=init_price_count)
created = True
return db_product, created
def update_product(db: Session, product: ProductFull, update_dict: dict):
for key, value in update_dict.items():
setattr(product, key, value)
db.commit()
db.refresh(product)
return product
def increment_product_price_count(db: Session, product: ProductFull):
"""Increment the price count of a product.
This is used to keep track of the number of prices linked to a product.
"""
product.price_count += 1
db.commit()
db.refresh(product)
return product
# Prices
# ------------------------------------------------------------------------------
def get_prices_query(
with_join_product: bool = True,
with_join_location: bool = True,
with_join_proof: bool = True,
filters: PriceFilter | None = None,
):
"""Useful for pagination."""
query = select(Price)
if with_join_product:
query = query.options(joinedload(Price.product))
if with_join_location:
query = query.options(joinedload(Price.location))
if with_join_proof:
query = query.options(joinedload(Price.proof))
if filters:
query = filters.filter(query)
query = filters.sort(query)
return query
def get_prices(db: Session, filters: PriceFilter | None = None):
return db.execute(get_prices_query(filters=filters)).all()
def create_price(db: Session, price: PriceCreate, user: UserCreate):
db_price = Price(**price.model_dump(), owner=user.user_id)
db.add(db_price)
db.commit()
db.refresh(db_price)
return db_price
def link_price_product(
db: Session, price: PriceFull, product: ProductFull
) -> PriceFull:
"""Link the product DB object to the price DB object and return the updated
price."""
price.product_id = product.id
db.commit()
db.refresh(price)
return price
def set_price_location(db: Session, price: PriceFull, location: LocationFull):
price.location_id = location.id
db.commit()
db.refresh(price)
return price
# Proofs
# ------------------------------------------------------------------------------
def get_proof(db: Session, proof_id: int):
|
# Users
# ------------------------------------------------------------------------------
def get_users_query(filters: ProductFilter | None = None):
"""Useful for pagination."""
query = select(User)
if filters:
query = filters.filter(query)
query = filters.sort(query)
return query
def get_users(db: Session, filters: ProductFilter | None = None):
return db.execute(get_users_query(filters=filters)).all()
def get_user(db: Session, user_id: str):
return db.query(User).filter(User.user_id == user_id).first()
def get_user_by_user_id(db: Session, user_id: str):
return db.query(User).filter(User.user_id == user_id).first()
def get_user_by_token(db: Session, token: str):
return db.query(User).filter(User.token == token).first()
def create_user(db: Session, user: UserCreate) -> User:
"""Create a user in the database.
:param db: the database session
:param product: the user to create
:return: the created user
"""
db_user = User(user_id=user.user_id, token=user.token)
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
def get_or_create_user(db: Session, user: UserCreate):
created = False
db_user = get_user_by_user_id(db, user_id=user.user_id)
if not db_user:
db_user = create_user(db, user=user)
created = True
return db_user, created
def update_user(db: Session, user: UserCreate, update_dict: dict):
for key, value in update_dict.items():
setattr(user, key, value)
db.commit()
db.refresh(user)
return user
def update_user_last_used_field(db: Session, user: UserCreate) -> UserCreate | None:
return update_user(db, user, {"last_used": func.now()})
def increment_user_price_count(db: Session, user: UserCreate):
"""Increment the price count of a user.
This is used to keep track of the number of prices linked to a user.
"""
user.price_count += 1
db.commit()
db.refresh(user)
return user
def delete_user(db: Session, user_id: UserCreate):
db_user = get_user_by_user_id(db, user_id=user_id)
if db_user:
db.delete(db_user)
db.commit()
return True
return False
# Products
# ------------------------------------------------------------------------------
def get_products_query(filters: ProductFilter | None = None):
"""Useful for pagination."""
query = select(Product)
if filters:
query = filters.filter(query)
query = filters.sort(query)
return query
def get_products(db: Session, filters: ProductFilter | None = None):
return db.execute(get_products_query(filters=filters)).all()
def get_product_by_id(db: Session, id: int):
return db.query(Product).filter(Product.id == id).first()
def get_product_by_code(db: Session, code: str) -> Product:
return db.query(Product).filter(Product.code == code).first()
def create_product(
db: Session, product: ProductCreate, price_count: int = 0
) -> Product:
"""Create a product in the database.
:param db: the database session
:param product: the product to create
:param price_count: the number of prices linked to the product, defaults
to 0
:return: the created product
"""
db_product = Product(price_count=price_count, **product.model_dump())
db.add(db_product)
db.commit()
db.refresh(db_product)
return db_product
def get_or_create_product(
db: Session, product: ProductCreate, init_price_count: int = 0
):
"""Get or create a product in the database.
:param db: the database session
:param product: the product to create
:param init_price_count: the initial number of prices linked to the
product if a product is created, defaults to 0
:return: the created product and a boolean indicating whether the product
was created or not
"""
created = False
db_product = get_product_by_code(db, code=product.code)
if not db_product:
db_product = create_product(db, product=product, price_count=init_price_count)
created = True
return db_product, created
def update_product(db: Session, product: ProductFull, update_dict: dict):
for key, value in update_dict.items():
setattr(product, key, value)
db.commit()
db.refresh(product)
return product
def increment_product_price_count(db: Session, product: ProductFull):
"""Increment the price count of a product.
This is used to keep track of the number of prices linked to a product.
"""
product.price_count += 1
db.commit()
db.refresh(product)
return product
# Prices
# ------------------------------------------------------------------------------
def get_prices_query(
with_join_product: bool = True,
with_join_location: bool = True,
with_join_proof: bool = True,
filters: PriceFilter | None = None,
):
"""Useful for pagination."""
query = select(Price)
if with_join_product:
query = query.options(joinedload(Price.product))
if with_join_location:
query = query.options(joinedload(Price.location))
if with_join_proof:
query = query.options(joinedload(Price.proof))
if filters:
query = filters.filter(query)
query = filters.sort(query)
return query
def get_prices(db: Session, filters: PriceFilter | None = None):
return db.execute(get_prices_query(filters=filters)).all()
def create_price(db: Session, price: PriceCreate, user: UserCreate):
db_price = Price(**price.model_dump(), owner=user.user_id)
db.add(db_price)
db.commit()
db.refresh(db_price)
return db_price
def link_price_product(
db: Session, price: PriceFull, product: ProductFull
) -> PriceFull:
"""Link the product DB object to the price DB object and return the updated
price."""
price.product_id = product.id
db.commit()
db.refresh(price)
return price
def set_price_location(db: Session, price: PriceFull, location: LocationFull):
price.location_id = location.id
db.commit()
db.refresh(price)
return price
# Proofs
# ------------------------------------------------------------------------------
def get_proof(db: Session, proof_id: int): | return db.query(Proof).filter(Proof.id == proof_id).first() | 6 | 2023-10-21 14:02:15+00:00 | 8k |
JoaoPedro9674/django-ledger | django_ledger/models/journal_entry.py | [
{
"identifier": "ASSET_CA_CASH",
"path": "django_ledger/io/roles.py",
"snippet": "ASSET_CA_CASH = 'asset_ca_cash'"
},
{
"identifier": "GROUP_CFS_FIN_DIVIDENDS",
"path": "django_ledger/io/roles.py",
"snippet": "GROUP_CFS_FIN_DIVIDENDS = [EQUITY_DIVIDENDS]"
},
{
"identifier": "GROU... | from datetime import date, datetime
from decimal import Decimal
from enum import Enum
from itertools import chain
from typing import Set, Union, Optional, Dict, Tuple, List
from uuid import uuid4, UUID
from django.core.exceptions import FieldError, ObjectDoesNotExist, ValidationError
from django.db import models, transaction, IntegrityError
from django.db.models import Q, Sum, QuerySet, F
from django.db.models.functions import Coalesce
from django.db.models.signals import pre_save
from django.urls import reverse
from django.utils.timezone import localtime
from django.utils.translation import gettext_lazy as _
from django_ledger.io.roles import (ASSET_CA_CASH, GROUP_CFS_FIN_DIVIDENDS, GROUP_CFS_FIN_ISSUING_EQUITY,
GROUP_CFS_FIN_LT_DEBT_PAYMENTS, GROUP_CFS_FIN_ST_DEBT_PAYMENTS,
GROUP_CFS_INVESTING_AND_FINANCING, GROUP_CFS_INVESTING_PPE,
GROUP_CFS_INVESTING_SECURITIES, validate_roles)
from django_ledger.models.accounts import CREDIT, DEBIT
from django_ledger.models.entity import EntityStateModel, EntityModel
from django_ledger.models.mixins import CreateUpdateMixIn
from django_ledger.models.transactions import TransactionModelQuerySet, TransactionModel
from django_ledger.models.utils import lazy_loader
from django_ledger.settings import (DJANGO_LEDGER_JE_NUMBER_PREFIX, DJANGO_LEDGER_DOCUMENT_NUMBER_PADDING,
DJANGO_LEDGER_JE_NUMBER_NO_UNIT_PREFIX) | 5,269 | if not isinstance(txs_qs, TransactionModelQuerySet):
raise JournalEntryValidationError(
message=f'Must pass a TransactionModelQuerySet. Got {txs_qs.__class__.__name__}'
)
# todo: add maximum transactions per JE model as a setting...
is_valid = self.is_txs_qs_valid(txs_qs)
if not is_valid:
raise JournalEntryValidationError(
message='Invalid Transaction QuerySet used. Must be from same Journal Entry'
)
balances = txs_qs.values('tx_type').annotate(
amount__sum=Coalesce(Sum('amount'),
Decimal('0.00'),
output_field=models.DecimalField()))
if as_dict:
return {
tx['tx_type']: tx['amount__sum'] for tx in balances
}
return balances
def get_txs_roles(self,
txs_qs: Optional[TransactionModelQuerySet] = None,
exclude_cash_role: bool = False) -> Set[str]:
"""
Determines the list of account roles involved in the JournalEntryModel instance.
It reaches into the AccountModel associated with each TransactionModel of the JE to determine a Set of
all roles involved in transactions. This method is important in determining the nature of the
Parameters
----------
txs_qs: TransactionModelQuerySet
Prefetched TransactionModelQuerySet. Will be validated if provided.
Avoids additional DB query if provided.
exclude_cash_role: bool
Removes CASH role from the Set if present.
Useful in some cases where cash role must be excluded for additional validation.
Returns
-------
set
The set of account roles as strings associated with the JournalEntryModel instance.
"""
if not txs_qs:
txs_qs = self.get_transaction_queryset(select_accounts=True)
else:
self.is_txs_qs_valid(txs_qs)
# todo: implement distinct for non SQLite Backends...
if exclude_cash_role:
return set([i.account.role for i in txs_qs if i.account.role != ASSET_CA_CASH])
return set([i.account.role for i in txs_qs])
def has_activity(self) -> bool:
return self.activity is not None
def get_activity_name(self) -> Optional[str]:
"""
Returns a human-readable, GAAP string representing the JournalEntryModel activity.
Returns
-------
str or None
Representing the JournalEntryModel activity in the statement of cash flows.
"""
if self.activity:
if self.is_operating():
return ActivityEnum.OPERATING.value
elif self.is_investing():
return ActivityEnum.INVESTING.value
elif self.is_financing():
return ActivityEnum.FINANCING.value
@classmethod
def get_activity_from_roles(cls,
role_set: Union[List[str], Set[str]],
validate: bool = False,
raise_exception: bool = True) -> Optional[str]:
if validate:
role_set = validate_roles(roles=role_set)
else:
if isinstance(role_set, list):
role_set = set(role_set)
activity = None
# no roles involved
if not len(role_set):
return
# determining if investing....
is_investing_for_ppe = all([
# all roles must be in group
all([r in GROUP_CFS_INVESTING_PPE for r in role_set]),
# at least one role
sum([r in GROUP_CFS_INVESTING_PPE for r in role_set]) > 0,
# at least one role
# sum([r in GROUP_CFS_INV_LTD_OF_PPE for r in role_set]) > 0,
])
is_investing_for_securities = all([
# all roles must be in group
all([r in GROUP_CFS_INVESTING_SECURITIES for r in role_set]),
# at least one role
sum([r in GROUP_CFS_INVESTING_SECURITIES for r in role_set]) > 0,
# at least one role
# sum([r in GROUP_CFS_INV_LTD_OF_SECURITIES for r in role_set]) > 0,
])
# IS INVESTING OTHERS....?
# determining if financing...
is_financing_dividends = all([r in GROUP_CFS_FIN_DIVIDENDS for r in role_set])
is_financing_issuing_equity = all([r in GROUP_CFS_FIN_ISSUING_EQUITY for r in role_set])
is_financing_st_debt = all([r in GROUP_CFS_FIN_ST_DEBT_PAYMENTS for r in role_set])
is_financing_lt_debt = all([r in GROUP_CFS_FIN_LT_DEBT_PAYMENTS for r in role_set])
| """
Django Ledger created by Miguel Sanda <msanda@arrobalytics.com>.
Copyright© EDMA Group Inc licensed under the GPLv3 Agreement.
Contributions to this module:
* Miguel Sanda <msanda@arrobalytics.com>
A Journal Entry (JE) is the foundation of all double entry accounting and financial data of any EntityModel.
A JE encapsulates a collection of TransactionModel, which must contain two transactions at a minimum. Each transaction
must perform a DEBIT or a CREDIT to an AccountModel. The JE Model performs additional validation to make sure that
the sum of all DEBITs and the sum of all CREDITs are equal to keep the books balanced.
A JE by default will be un-posted, which means that simply creating a JE will have no effect on the EntityModel
books. This behavior allows for constant refinement and persistence of JEs in the database without any impact on the
books. Only Journal Entries contained within a *POSTED* LedgerModel (see LedgerModel for documentation) will have an
impact in the EntityModel finances.
The JournalEntryModel also carries an optional EntityUnitModel, which are logical user-defined labels which help
segregate the different financial statements into different business operations (see EntityUnitModel for documentation).
Examples of EntityModelUnits are offices, departments, divisions, etc. *The user may request financial statements by
unit*.
All JEs automatically generate a sequential Journal Entry Number, which takes into consideration the Fiscal Year of the
JournalEntryModel instance. This functionality enables a human-readable tracking mechanism which helps with audits. It
is also searchable and indexed to support quick searches and queries.
The JournalEntryModel is also responsible for validating the Financial Activity involved in the operations of the
business. Whenever an account with ASSET_CA_CASH role is involved in a Journal Entry (see roles for more details), the
JE is responsible for programmatically determine the kind of operation for the JE (Operating, Financing, Investing).
"""
class JournalEntryValidationError(ValidationError):
pass
class JournalEntryModelQuerySet(QuerySet):
"""
Custom defined JournalEntryQuerySet.
"""
def create(self, verify_on_save: bool = False, force_create: bool = False, **kwargs):
"""
Overrides the standard Django QuerySet create() method to avoid the creation of POSTED Journal Entries without
proper business logic validation. New JEs using the create() method don't have any transactions to validate.
therefore, it is not necessary to query DB to balance TXS
Parameters
----------
verify_on_save: bool
Executes a Journal Entry verification hook before saving. Avoids additional queries to
validate the Journal Entry
force_create: bool
If True, will create return a new JournalEntryModel even if Posted at time of creation.
Use only if you know what you are doing.
Returns
-------
JournalEntryModel
The newly created Journal Entry Model.
"""
is_posted = kwargs.get('posted')
if is_posted and not force_create:
raise FieldError('Cannot create Journal Entries as posted')
obj = self.model(**kwargs)
self._for_write = True
# verify_on_save option avoids additional queries to validate the journal entry.
# New JEs using the create() method don't have any transactions to validate.
# therefore, it is not necessary to query DB to balance TXS.
obj.save(force_insert=True, using=self.db, verify=verify_on_save)
return obj
def posted(self):
"""
Filters the QuerySet to only posted Journal Entries.
Returns
-------
JournalEntryModelQuerySet
A QuerySet with applied filters.
"""
return self.filter(posted=True)
def unposted(self):
return self.filter(posted=False)
def locked(self):
"""
Filters the QuerySet to only locked Journal Entries.
Returns
-------
JournalEntryModelQuerySet
A QuerySet with applied filters.
"""
return self.filter(locked=True)
def unlocked(self):
return self.filter(locked=False)
class JournalEntryModelManager(models.Manager):
"""
A custom defined Journal Entry Model Manager that supports additional complex initial Queries based on the
EntityModel and authenticated UserModel.
"""
def for_entity(self, entity_slug, user_model):
"""
Fetches a QuerySet of JournalEntryModels associated with a specific EntityModel & UserModel.
May pass an instance of EntityModel or a String representing the EntityModel slug.
Parameters
__________
entity_slug: str or EntityModel
The entity slug or EntityModel used for filtering the QuerySet.
user_model
Logged in and authenticated django UserModel instance.
Examples
________
>>> request_user = request.user
>>> slug = kwargs['entity_slug'] # may come from request kwargs
>>> journal_entry_qs = JournalEntryModel.objects.for_entity(user_model=request_user, entity_slug=slug)
Returns
_______
JournalEntryModelQuerySet
Returns a JournalEntryModelQuerySet with applied filters.
"""
if isinstance(entity_slug, lazy_loader.get_entity_model()):
return self.get_queryset().filter(
Q(ledger__entity=entity_slug) &
(
Q(ledger__entity__admin=user_model) |
Q(ledger__entity__managers__in=[user_model])
)
)
return self.get_queryset().filter(
Q(ledger__entity__slug__iexact=entity_slug) &
(
Q(ledger__entity__admin=user_model) |
Q(ledger__entity__managers__in=[user_model])
)
)
def for_ledger(self, ledger_pk: Union[str, UUID], entity_slug, user_model):
"""
Fetches a QuerySet of JournalEntryModels associated with a specific EntityModel & UserModel & LedgerModel.
May pass an instance of EntityModel or a String representing the EntityModel slug.
Parameters
__________
entity_slug: str or EntityModel
The entity slug or EntityModel used for filtering the QuerySet.
user_model
Logged in and authenticated django UserModel instance.
ledger_pk: str or UUID
The LedgerModel uuid as a string or UUID.
Examples
________
>>> request_user = request.user
>>> slug = kwargs['entity_slug'] # may come from request kwargs
>>> ledger_pk = kwargs['ledger_pk'] # may come from request kwargs
>>> journal_entry_qs = JournalEntryModel.objects.for_ledger(ledger_pk=ledger_pk, user_model=request_user, entity_slug=slug)
Returns
_______
JournalEntryModelQuerySet
Returns a JournalEntryModelQuerySet with applied filters.
"""
qs = self.for_entity(entity_slug=entity_slug, user_model=user_model)
return qs.filter(ledger__uuid__exact=ledger_pk)
class ActivityEnum(Enum):
"""
The database string representation of each accounting activity prefix in the database.
Attributes
__________
OPERATING: str
The database representation prefix of a Journal Entry that is an Operating Activity.
INVESTING: str
The database representation prefix of a Journal Entry that is an Investing Activity.
FINANCING: str
The database representation prefix of a Journal Entry that is an Financing Activity.
"""
OPERATING = 'op'
INVESTING = 'inv'
FINANCING = 'fin'
class JournalEntryModelAbstract(CreateUpdateMixIn):
"""
The base implementation of the JournalEntryModel.
Attributes
----------
uuid: UUID
This is a unique primary key generated for the table. The default value of this field is uuid4().
je_number: str
A unique, sequential, human-readable alphanumeric Journal Entry Number (a.k.a Voucher or Document Number in
other commercial bookkeeping software). Contains the fiscal year under which the JE takes place within the
EntityModel as a prefix.
timestamp: datetime
The date of the JournalEntryModel. This date is applied to all TransactionModels contained within the JE, and
drives the financial statements of the EntityModel.
description: str
A user defined description for the JournalEntryModel.
entity_unit: EntityUnitModel
A logical, self-contained, user defined class or structure defined withing the EntityModel.
See EntityUnitModel documentation for more details.
activity: str
Programmatically determined based on the JE transactions and must be a value from ACTIVITIES. Gives
additional insight of the nature of the JournalEntryModel in order to produce the Statement of Cash Flows for the
EntityModel.
origin: str
A string giving additional information behind the origin or trigger of the JournalEntryModel.
For example: reconciliations, migrations, auto-generated, etc. Any string value is valid. Max 30 characters.
posted: bool
Determines if the JournalLedgerModel is posted, which means is affecting the books. Defaults to False.
locked: bool
Determines if the JournalEntryModel is locked, which the creation or updates of new transactions are not
allowed.
ledger: LedgerModel
The LedgerModel associated with this JournalEntryModel. Cannot be null.
"""
OPERATING_ACTIVITY = ActivityEnum.OPERATING.value
FINANCING_OTHER = ActivityEnum.FINANCING.value
INVESTING_OTHER = ActivityEnum.INVESTING.value
INVESTING_SECURITIES = f'{ActivityEnum.INVESTING.value}_securities'
INVESTING_PPE = f'{ActivityEnum.INVESTING.value}_ppe'
FINANCING_STD = f'{ActivityEnum.FINANCING.value}_std'
FINANCING_LTD = f'{ActivityEnum.FINANCING.value}_ltd'
FINANCING_EQUITY = f'{ActivityEnum.FINANCING.value}_equity'
FINANCING_DIVIDENDS = f'{ActivityEnum.FINANCING.value}_dividends'
ACTIVITIES = [
(_('Operating'), (
(OPERATING_ACTIVITY, _('Operating')),
)),
(_('Investing'), (
(INVESTING_PPE, _('Purchase/Disposition of PPE')),
(INVESTING_SECURITIES, _('Purchase/Disposition of Securities')),
(INVESTING_OTHER, _('Investing Activity Other')),
)),
(_('Financing'), (
(FINANCING_STD, _('Payoff of Short Term Debt')),
(FINANCING_LTD, _('Payoff of Long Term Debt')),
(FINANCING_EQUITY, _('Issuance of Common Stock, Preferred Stock or Capital Contribution')),
(FINANCING_DIVIDENDS, _('Dividends or Distributions to Shareholders')),
(FINANCING_OTHER, _('Financing Activity Other')),
)),
]
VALID_ACTIVITIES = list(chain.from_iterable([[a[0] for a in cat[1]] for cat in ACTIVITIES]))
MAP_ACTIVITIES = dict(chain.from_iterable([[(a[0], cat[0]) for a in cat[1]] for cat in ACTIVITIES]))
NON_OPERATIONAL_ACTIVITIES = [a for a in VALID_ACTIVITIES if ActivityEnum.OPERATING.value not in a]
uuid = models.UUIDField(default=uuid4, editable=False, primary_key=True)
je_number = models.SlugField(max_length=25, editable=False, verbose_name=_('Journal Entry Number'))
timestamp = models.DateTimeField(verbose_name=_('Timestamp'), default=localtime)
description = models.CharField(max_length=70, blank=True, null=True, verbose_name=_('Description'))
entity_unit = models.ForeignKey('django_ledger.EntityUnitModel',
on_delete=models.RESTRICT,
blank=True,
null=True,
verbose_name=_('Associated Entity Unit'))
activity = models.CharField(choices=ACTIVITIES,
max_length=20,
null=True,
blank=True,
editable=False,
verbose_name=_('Activity'))
origin = models.CharField(max_length=30, blank=True, null=True, verbose_name=_('Origin'))
posted = models.BooleanField(default=False, verbose_name=_('Posted'))
locked = models.BooleanField(default=False, verbose_name=_('Locked'))
is_closing_entry = models.BooleanField(default=False)
# todo: rename to ledger_model?
ledger = models.ForeignKey('django_ledger.LedgerModel',
verbose_name=_('Ledger'),
related_name='journal_entries',
on_delete=models.CASCADE)
objects = JournalEntryModelManager.from_queryset(queryset_class=JournalEntryModelQuerySet)()
class Meta:
abstract = True
ordering = ['-created']
verbose_name = _('Journal Entry')
verbose_name_plural = _('Journal Entries')
indexes = [
models.Index(fields=['ledger']),
models.Index(fields=['timestamp']),
models.Index(fields=['activity']),
models.Index(fields=['entity_unit']),
models.Index(fields=['locked']),
models.Index(fields=['posted']),
models.Index(fields=['je_number']),
models.Index(fields=['is_closing_entry']),
]
def __str__(self):
if self.je_number:
return 'JE: {x1} - Desc: {x2}'.format(x1=self.je_number, x2=self.description)
return 'JE ID: {x1} - Desc: {x2}'.format(x1=self.pk, x2=self.description)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._verified = False
self._last_closing_date: Optional[date] = None
def can_post(self, ignore_verify: bool = True) -> bool:
"""
Determines if a JournalEntryModel can be posted.
Parameters
----------
ignore_verify: bool
Skips JournalEntryModel verification if True. Defaults to False.
Returns
-------
bool
True if JournalEntryModel can be posted, otherwise False.
"""
return all([
self.is_locked(),
not self.is_posted(),
self.is_verified() if not ignore_verify else True,
not self.ledger.is_locked(),
not self.is_in_locked_period()
])
def can_unpost(self) -> bool:
"""
Determines if a JournalEntryModel can be un-posted.
Returns
-------
bool
True if JournalEntryModel can be un-posted, otherwise False.
"""
return all([
self.is_posted(),
not self.ledger.is_locked(),
not self.is_in_locked_period()
])
def can_lock(self) -> bool:
"""
Determines if a JournalEntryModel can be locked.
Locked JournalEntryModels cannot be modified.
Returns
-------
bool
True if JournalEntryModel can be locked, otherwise False.
"""
return all([
not self.is_locked(),
not self.ledger.is_locked()
])
def can_unlock(self) -> bool:
"""
Determines if a JournalEntryModel can be un-locked.
Locked transactions cannot be modified.
Returns
-------
bool
True if JournalEntryModel can be un-locked, otherwise False.
"""
return all([
self.is_locked(),
not self.is_posted(),
not self.is_in_locked_period(),
not self.ledger.is_locked()
])
def can_delete(self) -> bool:
return all([
not self.is_locked(),
not self.is_posted(),
])
def can_edit_timestamp(self) -> bool:
return not self.is_locked()
def is_posted(self):
return self.posted is True
def is_in_locked_period(self, new_timestamp: Optional[Union[date, datetime]] = None) -> bool:
last_closing_date = self.get_entity_last_closing_date()
if last_closing_date is not None:
if not new_timestamp:
return last_closing_date >= self.timestamp.date()
elif isinstance(new_timestamp, datetime):
return last_closing_date >= new_timestamp.date()
else:
return last_closing_date >= new_timestamp
return False
def is_locked(self):
if self.is_posted():
return True
return any([
self.locked is True,
any([
self.is_in_locked_period(),
self.ledger.is_locked()
])
])
def is_verified(self) -> bool:
"""
Determines if the JournalEntryModel is verified.
Returns
-------
bool
True if is verified, otherwise False.
"""
return self._verified
def is_balance_valid(self, txs_qs: Optional[TransactionModelQuerySet] = None) -> bool:
"""
Checks if CREDITs and DEBITs are equal.
Parameters
----------
txs_qs: TransactionModelQuerySet
Optional pre-fetched JE instance TransactionModelQuerySet. Will be validated if provided.
Returns
-------
bool
True if JE balances are valid (i.e. are equal).
"""
if len(txs_qs) > 0:
balances = self.get_txs_balances(txs_qs=txs_qs, as_dict=True)
return balances[CREDIT] == balances[DEBIT]
return True
def is_cash_involved(self, txs_qs=None):
return ASSET_CA_CASH in self.get_txs_roles(txs_qs=None)
def is_operating(self):
return self.activity in [
self.OPERATING_ACTIVITY
]
def is_financing(self):
return self.activity in [
self.FINANCING_EQUITY,
self.FINANCING_LTD,
self.FINANCING_DIVIDENDS,
self.FINANCING_STD,
self.FINANCING_OTHER
]
def is_investing(self):
return self.activity in [
self.INVESTING_SECURITIES,
self.INVESTING_PPE,
self.INVESTING_OTHER
]
def is_txs_qs_valid(self, txs_qs: TransactionModelQuerySet, raise_exception: bool = True) -> bool:
"""
Validates a given TransactionModelQuerySet against the JournalEntryModel instance.
Parameters
----------
txs_qs: TransactionModelQuerySet
The queryset to validate.
raise_exception: bool
Raises JournalEntryValidationError if TransactionModelQuerySet is not valid.
Raises
------
JournalEntryValidationError if JE model is invalid and raise_exception is True.
Returns
-------
bool
True if valid, otherwise False.
"""
if not isinstance(txs_qs, TransactionModelQuerySet):
raise JournalEntryValidationError('Must pass an instance of TransactionModelQuerySet')
is_valid = all(tx.journal_entry_id == self.uuid for tx in txs_qs)
if not is_valid and raise_exception:
raise JournalEntryValidationError('Invalid TransactionModelQuerySet provided. All Transactions must be ',
f'associated with LedgerModel {self.uuid}')
return is_valid
def get_absolute_url(self) -> str:
return reverse('django_ledger:je-detail',
kwargs={
'je_pk': self.id,
'ledger_pk': self.ledger_id,
# pylint: disable=no-member
'entity_slug': self.ledger.entity.slug
})
def get_entity_unit_name(self, no_unit_name: str = ''):
if self.entity_unit_id:
return self.entity_unit.name
return no_unit_name
def get_entity_last_closing_date(self) -> Optional[date]:
return self.ledger.entity.last_closing_date
def mark_as_posted(self,
commit: bool = False,
verify: bool = True,
force_lock: bool = False,
raise_exception: bool = False,
**kwargs):
"""
Posted transactions show on the EntityModel ledger and financial statements.
Parameters
----------
commit: bool
Commits changes into the Database, Defaults to False.
verify: bool
Verifies JournalEntryModel before marking as posted. Defaults to False.
force_lock: bool
Forces to lock the JournalEntry before is posted.
raise_exception: bool
Raises JournalEntryValidationError if cannot post. Defaults to False.
kwargs: dict
Additional keyword arguments.
"""
if verify and not self.is_verified():
txs_qs, verified = self.verify()
if not len(txs_qs):
raise JournalEntryValidationError(
message=_('Cannot post an empty Journal Entry.')
)
if force_lock and not self.is_locked():
self.mark_as_locked(commit=False, raise_exception=True)
if not self.can_post(ignore_verify=False):
if raise_exception:
raise JournalEntryValidationError(f'Journal Entry {self.uuid} cannot post.'
f' Is verified: {self.is_verified()}')
else:
if not self.is_posted():
self.posted = True
if self.is_posted():
if commit:
self.save(verify=False,
update_fields=[
'posted',
'locked',
'activity',
'updated'
])
def mark_as_unposted(self, commit: bool = False, raise_exception: bool = False, **kwargs):
"""
Un-posted JournalEntryModels do not show on the EntityModel ledger and financial statements.
Parameters
----------
commit: bool
Commits changes into the Database, Defaults to False.
raise_exception: bool
Raises JournalEntryValidationError if cannot post. Defaults to False.
kwargs: dict
Additional keyword arguments.
"""
if not self.can_unpost():
if raise_exception:
raise JournalEntryValidationError(f'Journal Entry {self.uuid} cannot unpost.')
else:
if self.is_posted():
self.posted = False
self.activity = None
if not self.is_posted():
if commit:
self.save(verify=False,
update_fields=[
'posted',
'activity',
'updated'
])
def mark_as_locked(self, commit: bool = False, raise_exception: bool = False, **kwargs):
"""
Locked JournalEntryModels do not allow transactions to be edited.
Parameters
----------
commit: bool
Commits changes into the Database, Defaults to False.
raise_exception: bool
Raises JournalEntryValidationError if cannot lock. Defaults to False.
kwargs: dict
Additional keyword arguments.
"""
if not self.can_lock():
if raise_exception:
raise JournalEntryValidationError(f'Journal Entry {self.uuid} is already locked.')
else:
if not self.is_locked():
self.generate_activity(force_update=True)
self.locked = True
if self.is_locked():
if commit:
self.save(verify=False)
def mark_as_unlocked(self, commit: bool = False, raise_exception: bool = False, **kwargs):
"""
Unlocked JournalEntryModels allow transactions to be edited.
Parameters
----------
commit: bool
Commits changes into the Database, Defaults to False.
raise_exception: bool
Raises JournalEntryValidationError if cannot lock. Defaults to False.
kwargs: dict
Additional keyword arguments.
"""
if not self.can_unlock():
if raise_exception:
raise JournalEntryValidationError(f'Journal Entry {self.uuid} is already unlocked.')
else:
if self.is_locked():
self.locked = False
if not self.is_locked():
if commit:
self.save(verify=False)
def get_transaction_queryset(self, select_accounts: bool = True) -> TransactionModelQuerySet:
"""
Fetches the TransactionModelQuerySet associated with the JournalEntryModel instance.
Parameters
----------
select_accounts: bool
Fetches the associated AccountModel of each transaction. Defaults to True.
Returns
-------
TransactionModelQuerySet
The TransactionModelQuerySet associated with the current JournalEntryModel instance.
"""
if select_accounts:
return self.transactionmodel_set.all().select_related('account')
return self.transactionmodel_set.all()
def get_txs_balances(self,
txs_qs: Optional[TransactionModelQuerySet] = None,
as_dict: bool = False) -> Union[TransactionModelQuerySet, Dict]:
"""
Fetches the sum total of CREDITs and DEBITs associated with the JournalEntryModel instance. This method
performs a reduction/aggregation at the database level and fetches exactly two records. Optionally,
may pass an existing TransactionModelQuerySet if previously fetched. Additional validation occurs to ensure
that all TransactionModels in QuerySet are of the JE instance. Due to JournalEntryModel pre-save validation
and basic rules of accounting, CREDITs and DEBITS will always match.
Parameters
----------
txs_qs: TransactionModelQuerySet
The JE TransactionModelQuerySet to use if previously fetched. Will be validated to make sure all
TransactionModel in QuerySet belong to the JournalEntryModel instance.
as_dict: bool
If True, returns the result as a dictionary, with exactly two keys: 'credit' and 'debit'.
The values will be the total CREDIT or DEBIT amount as Decimal.
Examples
--------
>>> je_model: JournalEntryModel = je_qs.first() # any existing JournalEntryModel QuerySet...
>>> balances = je_model.get_txs_balances()
>>> balances
Returns exactly two records:
<TransactionModelQuerySet [{'tx_type': 'credit', 'amount__sum': Decimal('2301.5')},
{'tx_type': 'debit', 'amount__sum': Decimal('2301.5')}]>
Examples
--------
>>> balances = je_model.get_txs_balances(as_dict=True)
>>> balances
Returns a dictionary:
{'credit': Decimal('2301.5'), 'debit': Decimal('2301.5')}
Raises
------
JournalEntryValidationError
If JE is not valid or TransactionModelQuerySet provided does not belong to JE instance.
Returns
-------
TransactionModelQuerySet or dict
An aggregated queryset containing exactly two records. The total CREDIT or DEBIT amount as Decimal.
"""
if not txs_qs:
txs_qs = self.get_transaction_queryset(select_accounts=False)
else:
if not isinstance(txs_qs, TransactionModelQuerySet):
raise JournalEntryValidationError(
message=f'Must pass a TransactionModelQuerySet. Got {txs_qs.__class__.__name__}'
)
# todo: add maximum transactions per JE model as a setting...
is_valid = self.is_txs_qs_valid(txs_qs)
if not is_valid:
raise JournalEntryValidationError(
message='Invalid Transaction QuerySet used. Must be from same Journal Entry'
)
balances = txs_qs.values('tx_type').annotate(
amount__sum=Coalesce(Sum('amount'),
Decimal('0.00'),
output_field=models.DecimalField()))
if as_dict:
return {
tx['tx_type']: tx['amount__sum'] for tx in balances
}
return balances
def get_txs_roles(self,
txs_qs: Optional[TransactionModelQuerySet] = None,
exclude_cash_role: bool = False) -> Set[str]:
"""
Determines the list of account roles involved in the JournalEntryModel instance.
It reaches into the AccountModel associated with each TransactionModel of the JE to determine a Set of
all roles involved in transactions. This method is important in determining the nature of the
Parameters
----------
txs_qs: TransactionModelQuerySet
Prefetched TransactionModelQuerySet. Will be validated if provided.
Avoids additional DB query if provided.
exclude_cash_role: bool
Removes CASH role from the Set if present.
Useful in some cases where cash role must be excluded for additional validation.
Returns
-------
set
The set of account roles as strings associated with the JournalEntryModel instance.
"""
if not txs_qs:
txs_qs = self.get_transaction_queryset(select_accounts=True)
else:
self.is_txs_qs_valid(txs_qs)
# todo: implement distinct for non SQLite Backends...
if exclude_cash_role:
return set([i.account.role for i in txs_qs if i.account.role != ASSET_CA_CASH])
return set([i.account.role for i in txs_qs])
def has_activity(self) -> bool:
return self.activity is not None
def get_activity_name(self) -> Optional[str]:
"""
Returns a human-readable, GAAP string representing the JournalEntryModel activity.
Returns
-------
str or None
Representing the JournalEntryModel activity in the statement of cash flows.
"""
if self.activity:
if self.is_operating():
return ActivityEnum.OPERATING.value
elif self.is_investing():
return ActivityEnum.INVESTING.value
elif self.is_financing():
return ActivityEnum.FINANCING.value
@classmethod
def get_activity_from_roles(cls,
role_set: Union[List[str], Set[str]],
validate: bool = False,
raise_exception: bool = True) -> Optional[str]:
if validate:
role_set = validate_roles(roles=role_set)
else:
if isinstance(role_set, list):
role_set = set(role_set)
activity = None
# no roles involved
if not len(role_set):
return
# determining if investing....
is_investing_for_ppe = all([
# all roles must be in group
all([r in GROUP_CFS_INVESTING_PPE for r in role_set]),
# at least one role
sum([r in GROUP_CFS_INVESTING_PPE for r in role_set]) > 0,
# at least one role
# sum([r in GROUP_CFS_INV_LTD_OF_PPE for r in role_set]) > 0,
])
is_investing_for_securities = all([
# all roles must be in group
all([r in GROUP_CFS_INVESTING_SECURITIES for r in role_set]),
# at least one role
sum([r in GROUP_CFS_INVESTING_SECURITIES for r in role_set]) > 0,
# at least one role
# sum([r in GROUP_CFS_INV_LTD_OF_SECURITIES for r in role_set]) > 0,
])
# IS INVESTING OTHERS....?
# determining if financing...
is_financing_dividends = all([r in GROUP_CFS_FIN_DIVIDENDS for r in role_set])
is_financing_issuing_equity = all([r in GROUP_CFS_FIN_ISSUING_EQUITY for r in role_set])
is_financing_st_debt = all([r in GROUP_CFS_FIN_ST_DEBT_PAYMENTS for r in role_set])
is_financing_lt_debt = all([r in GROUP_CFS_FIN_LT_DEBT_PAYMENTS for r in role_set])
| is_operating = all([r not in GROUP_CFS_INVESTING_AND_FINANCING for r in role_set]) | 5 | 2023-10-20 01:07:20+00:00 | 8k |
facebookresearch/HighResCanopyHeight | inference.py | [
{
"identifier": "SSLVisionTransformer",
"path": "models/backbone.py",
"snippet": "class SSLVisionTransformer(DinoVisionTransformer):\n \"\"\"Vision Transformer.\n \"\"\"\n\n def __init__(self,\n interpolate_mode='bicubic',\n init_cfg=None,\n pretrain... | import argparse
import os
import torch
import pandas as pd
import numpy as np
import torchvision.transforms as T
import matplotlib.pyplot as plt
import torchmetrics
import torch.nn as nn
import math
import torchvision.transforms.functional as TF
import torchvision
import pytorch_lightning as pl
from pathlib import Path
from tqdm import tqdm
from PIL import Image
from torchvision.utils import save_image
from models.backbone import SSLVisionTransformer
from models.dpt_head import DPTHead
from models.regressor import RNet | 3,937 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the Apache License, Version 2.0
# found in the LICENSE file in the root directory of this source tree.
class SSLAE(nn.Module):
def __init__(self, pretrained=None, classify=True, n_bins=256, huge=False):
super().__init__()
if huge == True:
| # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the Apache License, Version 2.0
# found in the LICENSE file in the root directory of this source tree.
class SSLAE(nn.Module):
def __init__(self, pretrained=None, classify=True, n_bins=256, huge=False):
super().__init__()
if huge == True: | self.backbone = SSLVisionTransformer( | 0 | 2023-10-17 15:31:34+00:00 | 8k |
HLTCHKUST/InstructAlign | run_t2t_finetuning.py | [
{
"identifier": "load_flores_datasets",
"path": "data_utils.py",
"snippet": "def load_flores_datasets(pivot_langs=['eng_Latn'], augmentation='multilingual', num_train_ratio=1.0):\n def inject_lang(row, lang1, lang2):\n row['lang1'] = lang_map[lang1]\n row['lang2'] = lang_map[lang2]\n ... | import logging
import os
import sys
import random
import numpy as np
import pandas as pd
import torch
import transformers
import datasets
from dataclasses import dataclass, field
from typing import Optional
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoModelForCausalLM,
AutoTokenizer,
DataCollatorWithPadding,
DataCollatorForLanguageModeling,
DataCollatorForSeq2Seq,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from peft import prepare_model_for_int8_training
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
from data_utils import load_flores_datasets, load_rehearsal_dataset
from augmentation_utils import do_augment
from prompt_utils import prompt_monolingual, prompt_translation, prompt_xss, prompt_bilingual | 4,389 | if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Load the datasets
raw_datasets = load_flores_datasets(pivot_langs=['eng_Latn'], augmentation=data_args.augmentation_type, num_train_ratio=data_args.num_train_ratio)
# raw_datasets = load_flores_datasets(pivot_langs=['eng_Latn', 'ind_Latn'], augmentation=data_args.augmentation_type)
print('=============')
print('raw_datasets')
print(raw_datasets)
print('=============')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if config.is_encoder_decoder:
model = AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
# device_map='auto',
# load_in_8bit=True
)
else:
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
# device_map='auto',
# load_in_8bit=True
)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Model size: ', count_parameters(model))
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
column_names = raw_datasets["train"].column_names
# Handle Continual Flag
if data_args.continual_type is not None:
# Append training data with rehearsal
# (sample_en_dset, sample_id_dset) = load_rehearsal_dataset(n_samples=data_args.continual_size, random_seed=training_args.seed)
# raw_datasets["train"] = datasets.interleave_datasets([
# datasets.Dataset.from_list(list(sample_en_dset)), datasets.Dataset.from_list(list(sample_id_dset)), raw_datasets["train"]
# ], stopping_strategy='all_exhausted')
sample_dset = load_rehearsal_dataset(n_samples=data_args.continual_size, random_seed=training_args.seed)
sample_dset = datasets.Dataset.from_list(list(sample_dset))
raw_datasets["train"] = datasets.interleave_datasets([sample_dset, raw_datasets["train"]], stopping_strategy='all_exhausted')
def self_prompt(sent1, sent2, lang1, lang2, augmentation_type, is_encoder_decoder):
# Random Choice
if augmentation_type == 'random':
augmentation_type = random.choice(['monolingual', 'translation', 'bilingual'])
elif augmentation_type == 'random-xss':
augmentation_type = random.choice(['monolingual', 'translation', 'bilingual', 'xss'])
elif augmentation_type == 'pair':
augmentation_type = random.choice(['translation', 'bilingual'])
elif augmentation_type == 'pair-xss':
augmentation_type = random.choice(['translation', 'bilingual', 'xss'])
elif augmentation_type == 'bilingual-xss':
augmentation_type = random.choice(['bilingual', 'xss'])
else:
augmentation_types = augmentation_type.split(',')
augmentation_type = random.choice(augmentation_types)
if augmentation_type == 'monolingual':
rand_proba = random.random()
aug_list = None
if rand_proba < 0.24:
aug_list = ['infilling']
elif rand_proba < 0.48:
aug_list = ['deletion']
elif rand_proba < 0.72:
aug_list = ['permutation']
elif rand_proba < 0.8:
aug_list = ['infilling', 'deletion']
elif rand_proba < 0.88:
aug_list = ['infilling', 'permutation']
elif rand_proba < 0.96:
aug_list = ['deletion', 'permutation']
else: # elif rand_proba < 1.0:
aug_list = ['infilling', 'deletion', 'permutation']
# Apply monolingual perturbation
src_text = sent1
tgt_text = sent1
for aug in aug_list:
| #!/usr/bin/env python
# coding=utf-8
# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for sequence to sequence.
"""
# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_source_length: Optional[int] = field(
default=128,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
max_target_length: Optional[int] = field(
default=128,
metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": (
"Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
},
)
num_beams: Optional[int] = field(
default=1,
metadata={
"help": (
"Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
)
},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
},
)
augmentation_type: str = field(
default='monolingual',
metadata={
"help": "Mode for data augmentation (monolingual / translation / bilingual / random)."
},
)
continual_type: str = field(
default=None,
metadata={
"help": "Mode for continual learning method (rehearsal / None)."
},
)
continual_size: int = field(
default=100,
metadata={
"help": "Mode for data (monolingual / translation / bilingual / random)."
},
)
num_train_ratio: float = field(
default=1.0,
metadata={
"help": "Number of samples to be taken from FLORES"
},
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Load the datasets
raw_datasets = load_flores_datasets(pivot_langs=['eng_Latn'], augmentation=data_args.augmentation_type, num_train_ratio=data_args.num_train_ratio)
# raw_datasets = load_flores_datasets(pivot_langs=['eng_Latn', 'ind_Latn'], augmentation=data_args.augmentation_type)
print('=============')
print('raw_datasets')
print(raw_datasets)
print('=============')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if config.is_encoder_decoder:
model = AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
# device_map='auto',
# load_in_8bit=True
)
else:
model = AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
# device_map='auto',
# load_in_8bit=True
)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Model size: ', count_parameters(model))
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
column_names = raw_datasets["train"].column_names
# Handle Continual Flag
if data_args.continual_type is not None:
# Append training data with rehearsal
# (sample_en_dset, sample_id_dset) = load_rehearsal_dataset(n_samples=data_args.continual_size, random_seed=training_args.seed)
# raw_datasets["train"] = datasets.interleave_datasets([
# datasets.Dataset.from_list(list(sample_en_dset)), datasets.Dataset.from_list(list(sample_id_dset)), raw_datasets["train"]
# ], stopping_strategy='all_exhausted')
sample_dset = load_rehearsal_dataset(n_samples=data_args.continual_size, random_seed=training_args.seed)
sample_dset = datasets.Dataset.from_list(list(sample_dset))
raw_datasets["train"] = datasets.interleave_datasets([sample_dset, raw_datasets["train"]], stopping_strategy='all_exhausted')
def self_prompt(sent1, sent2, lang1, lang2, augmentation_type, is_encoder_decoder):
# Random Choice
if augmentation_type == 'random':
augmentation_type = random.choice(['monolingual', 'translation', 'bilingual'])
elif augmentation_type == 'random-xss':
augmentation_type = random.choice(['monolingual', 'translation', 'bilingual', 'xss'])
elif augmentation_type == 'pair':
augmentation_type = random.choice(['translation', 'bilingual'])
elif augmentation_type == 'pair-xss':
augmentation_type = random.choice(['translation', 'bilingual', 'xss'])
elif augmentation_type == 'bilingual-xss':
augmentation_type = random.choice(['bilingual', 'xss'])
else:
augmentation_types = augmentation_type.split(',')
augmentation_type = random.choice(augmentation_types)
if augmentation_type == 'monolingual':
rand_proba = random.random()
aug_list = None
if rand_proba < 0.24:
aug_list = ['infilling']
elif rand_proba < 0.48:
aug_list = ['deletion']
elif rand_proba < 0.72:
aug_list = ['permutation']
elif rand_proba < 0.8:
aug_list = ['infilling', 'deletion']
elif rand_proba < 0.88:
aug_list = ['infilling', 'permutation']
elif rand_proba < 0.96:
aug_list = ['deletion', 'permutation']
else: # elif rand_proba < 1.0:
aug_list = ['infilling', 'deletion', 'permutation']
# Apply monolingual perturbation
src_text = sent1
tgt_text = sent1
for aug in aug_list: | src_text = do_augment(src_text, aug) | 2 | 2023-10-24 07:46:05+00:00 | 8k |
acolas1/KGSimple | T5/data.py | [
{
"identifier": "COCO",
"path": "eval_webnlg/pycocotools/coco.py",
"snippet": "class COCO(object):\n def __init__(self, annotation_file=None):\n \"\"\"\n Constructor of Microsoft COCO helper class for reading and visualizing annotations.\n :param annotation_file (str): location o... | import os
import json
import re
import string
import numpy as np
import sys
import copy
import random
import time
import torch
from tqdm import tqdm
from torch.utils.data import Dataset, TensorDataset, DataLoader, RandomSampler, SequentialSampler
from eval_webnlg.pycocotools.coco import COCO
from eval_webnlg.pycocoevalcap.eval import COCOEvalCap | 4,561 |
def run_coco_eval(data_ref, data_sys):
"""Run the COCO evaluator, return the resulting evaluation object (contains both
system- and segment-level scores."""
# convert references and system outputs to MS-COCO format in-memory
coco_ref = create_coco_refs(data_ref)
coco_sys = create_coco_sys(data_sys)
print('Running MS-COCO evaluator...', file=sys.stderr)
|
def run_coco_eval(data_ref, data_sys):
"""Run the COCO evaluator, return the resulting evaluation object (contains both
system- and segment-level scores."""
# convert references and system outputs to MS-COCO format in-memory
coco_ref = create_coco_refs(data_ref)
coco_sys = create_coco_sys(data_sys)
print('Running MS-COCO evaluator...', file=sys.stderr) | coco = COCO() | 0 | 2023-10-24 13:24:23+00:00 | 8k |
SKYeve/Transcript-Combiner | pull_images.py | [
{
"identifier": "YoudaoNoteApi",
"path": "youDaoNoteApi.py",
"snippet": "class YoudaoNoteApi(object):\r\n \"\"\"\r\n 有道云笔记 API 封装\r\n 原理:https://depp.wang/2020/06/11/how-to-find-the-api-of-a-website-eg-note-youdao-com/\r\n \"\"\"\r\n\r\n ROOT_ID_URL = 'https://note.youdao.com/yws/api/pers... | import re
import os
import glob
import requests
from typing import Tuple
from urllib import parse
from urllib.parse import urlparse
from youDaoNoteApi import YoudaoNoteApi
from public import covert_config
| 4,218 | try:
response = self.youdaonote_api.http_get(url)
except requests.exceptions.ProxyError as err:
error_msg = '网络错误,「{}」下载失败。错误提示:{}'.format(url, format(err))
print(error_msg)
return ''
content_type = response.headers.get('Content-Type')
file_type = '图片'
if response.status_code != 200 or not content_type:
error_msg = '下载「{}」失败!{}可能已失效,可浏览器登录有道云笔记后,查看{}是否能正常加载'.format(url, file_type,
file_type)
print(error_msg)
return ''
# 默认下载图片到 images 文件夹
file_dirname = IMAGES
# 后缀 png 和 jpeg 后可能出现 ; `**.png;`, 原因未知
content_type_arr = content_type.split('/')
file_suffix = '.' + content_type_arr[1].replace(';', '') if len(content_type_arr) == 2 else "jpg"
local_file_dir = os.path.join(os.path.dirname(file_path),file_dirname)
if not os.path.exists(local_file_dir):
os.mkdir(local_file_dir)
file_name = os.path.basename(os.path.splitext(file_path)[0])
file_name = self._optimize_file_name(file_name)
#请求后的真实的URL中才有东西
realUrl = parse.parse_qs(urlparse(response.url).query)
real_filename = realUrl.get('filename')
if real_filename:
# dict 不为空时,去获取真实文件名称
read_file_name = real_filename[0]
file_suffix = '.' + read_file_name.split('.')[-1]
file_name = os.path.basename(os.path.splitext(file_path)[0]) + '_image_' + str(index) + file_suffix
else:
file_name = os.path.basename(os.path.splitext(file_path)[0]) + '_image_' + str(index) + file_suffix
local_file_path = os.path.join(local_file_dir, file_name)
# 使md附件或者图片的路径分隔符为"/"
local_file_path = local_file_path.replace('\\', '/')
try:
with open(local_file_path, 'wb') as f:
f.write(response.content) # response.content 本身就为字节类型
print('已将{}「{}」转换为「{}」'.format(file_type, url, local_file_path))
except:
error_msg = '{} {}有误!'.format(url, file_type)
print(error_msg)
return ''
return local_file_path
def _download_attach_url(self, file_path, url,attach_name=None) -> str:
"""
下载文件到本地,返回本地路径
:param file_path:
:param url:
:param attach_name:
:return: path
"""
try:
response = self.youdaonote_api.http_get(url)
except requests.exceptions.ProxyError as err:
error_msg = '网络错误,「{}」下载失败。错误提示:{}'.format(url, format(err))
print(error_msg)
return ''
content_type = response.headers.get('Content-Type')
file_type = '附件'
if response.status_code != 200 or not content_type:
error_msg = '下载「{}」失败!{}可能已失效,可浏览器登录有道云笔记后,查看{}是否能正常加载'.format(url, file_type,file_type)
print(error_msg)
return ''
file_dirname = ATTACH
attach_name = self._optimize_file_name(attach_name)
file_suffix = attach_name
local_file_dir = os.path.join(os.path.dirname(file_path),file_dirname)
if not os.path.exists(local_file_dir):
os.mkdir(local_file_dir)
local_file_path: str = os.path.join(local_file_dir,file_suffix)
# 使md附件或者图片的路径分隔符为"/"
local_file_path = local_file_path.replace('\\', '/')
try:
with open(local_file_path, 'wb') as f:
f.write(response.content) # response.content 本身就为字节类型
print('已将{}「{}」转换为「{}」'.format(file_type, url, local_file_path))
except:
error_msg = '{} {}有误!'.format(url, file_type)
print(error_msg)
return ''
return local_file_path
def _optimize_file_name(self, name) -> str:
"""
优化文件名,替换下划线
:param name:
:return:
"""
# 去除换行符,首尾的空格,文件名有空格识别不出图片
name = name.strip()
regex_symbol = re.compile(r'[\\/:\*\?"<>\|、]') # 符号:\ / : * ? " < > | ( )
name = regex_symbol.sub('_', name)
return name
def login(self):
self.youdaonote_api = YoudaoNoteApi()
error_msg = self.youdaonote_api.login_by_cookies()
if error_msg:
return '', error_msg
def load_config(self):
|
REGEX_IMAGE_URL = re.compile(r'!\[.*?\]\((.*?note\.youdao\.com.*?)\)')
REGEX_ATTACH = re.compile(r'\[(.*?)\]\(((http|https)://note\.youdao\.com.*?)\)')
MARKDOWN_SUFFIX = '.md'
NOTE_SUFFIX = '.note'
# 有道云笔记的图片地址
# IMAGES = 'images'
IMAGES = 'attachments'
# 有道云笔记的附件地址
ATTACH = 'attachments'
CONFIG_PATH = 'config.json'
class PullImages():
def __init__(self, youdaonote_api=None, smms_secret_token: str=None, is_relative_path: bool=None):
self.youdaonote_api = youdaonote_api
self.smms_secret_token = smms_secret_token
self.is_relative_path = is_relative_path # 是否使用相对路径
if not self.smms_secret_token and not self.is_relative_path:
self.load_config()
if not self.youdaonote_api:
self.login()
def migration_ydnote_url(self, file_path):
"""
迁移有道云笔记文件 URL
:param file_path:
:return:
"""
with open(file_path, 'rb') as f:
content = f.read().decode('utf-8')
# 图片
image_urls = REGEX_IMAGE_URL.findall(content)
if len(image_urls) > 0:
print('正在转换有道云笔记「{}」中的有道云图片链接...'.format(file_path))
for index,image_url in enumerate(image_urls):
image_path = self._get_new_image_path(file_path, image_url,index)
if image_url == image_path:
continue
#将绝对路径替换为相对路径,实现满足 Obsidian 格式要求
#将 image_path 路径中 images 之前的路径去掉,只保留以 images 开头的之后的路径
if self.is_relative_path:
image_path = image_path[image_path.find(IMAGES):]
image_path = self.url_encode(image_path)
content = content.replace(image_url, image_path)
# 附件
attach_name_and_url_list = REGEX_ATTACH.findall(content)
if len(attach_name_and_url_list) > 0:
print('正在转换有道云笔记「{}」中的有道云附件链接...'.format(file_path))
for attach_name_and_url in attach_name_and_url_list:
attach_url = attach_name_and_url[1]
attach_path = self._download_attach_url(file_path, attach_url, attach_name_and_url[0])
if not attach_path:
continue
# 将 attach_path 路径中 attachments 之前的路径去掉,只保留以 attachments 开头的之后的路径
if self.is_relative_path:
attach_path = attach_path[attach_path.find(ATTACH):]
content = content.replace(attach_url, attach_path)
with open(file_path, 'wb') as f:
f.write(content.encode())
return
def _get_new_image_path(self, file_path, image_url,index) -> str:
"""
将图片链接转换为新的链接
:param file_path:
:param image_url:
:return: new_image_path
"""
# 当 smms_secret_token 为空(不上传到 SM.MS),下载到图片到本地
if not self.smms_secret_token:
image_path = self._download_image_url(file_path, image_url,index)
return image_path or image_url
# smms_secret_token 不为空,上传到 SM.MS
new_file_url, error_msg = ImageUpload.upload_to_smms(youdaonote_api=self.youdaonote_api, image_url=image_url,
smms_secret_token=self.smms_secret_token)
# 如果上传失败,仍下载到本地
if not error_msg:
return new_file_url
print(error_msg)
image_path = self._download_image_url(file_path, image_url,index)
return image_path or image_url
def _download_image_url(self, file_path, url,index) -> str:
"""
下载文件到本地,返回本地路径
:param file_path:
:param url:
:param attach_name:
:return: path
"""
try:
response = self.youdaonote_api.http_get(url)
except requests.exceptions.ProxyError as err:
error_msg = '网络错误,「{}」下载失败。错误提示:{}'.format(url, format(err))
print(error_msg)
return ''
content_type = response.headers.get('Content-Type')
file_type = '图片'
if response.status_code != 200 or not content_type:
error_msg = '下载「{}」失败!{}可能已失效,可浏览器登录有道云笔记后,查看{}是否能正常加载'.format(url, file_type,
file_type)
print(error_msg)
return ''
# 默认下载图片到 images 文件夹
file_dirname = IMAGES
# 后缀 png 和 jpeg 后可能出现 ; `**.png;`, 原因未知
content_type_arr = content_type.split('/')
file_suffix = '.' + content_type_arr[1].replace(';', '') if len(content_type_arr) == 2 else "jpg"
local_file_dir = os.path.join(os.path.dirname(file_path),file_dirname)
if not os.path.exists(local_file_dir):
os.mkdir(local_file_dir)
file_name = os.path.basename(os.path.splitext(file_path)[0])
file_name = self._optimize_file_name(file_name)
#请求后的真实的URL中才有东西
realUrl = parse.parse_qs(urlparse(response.url).query)
real_filename = realUrl.get('filename')
if real_filename:
# dict 不为空时,去获取真实文件名称
read_file_name = real_filename[0]
file_suffix = '.' + read_file_name.split('.')[-1]
file_name = os.path.basename(os.path.splitext(file_path)[0]) + '_image_' + str(index) + file_suffix
else:
file_name = os.path.basename(os.path.splitext(file_path)[0]) + '_image_' + str(index) + file_suffix
local_file_path = os.path.join(local_file_dir, file_name)
# 使md附件或者图片的路径分隔符为"/"
local_file_path = local_file_path.replace('\\', '/')
try:
with open(local_file_path, 'wb') as f:
f.write(response.content) # response.content 本身就为字节类型
print('已将{}「{}」转换为「{}」'.format(file_type, url, local_file_path))
except:
error_msg = '{} {}有误!'.format(url, file_type)
print(error_msg)
return ''
return local_file_path
def _download_attach_url(self, file_path, url,attach_name=None) -> str:
"""
下载文件到本地,返回本地路径
:param file_path:
:param url:
:param attach_name:
:return: path
"""
try:
response = self.youdaonote_api.http_get(url)
except requests.exceptions.ProxyError as err:
error_msg = '网络错误,「{}」下载失败。错误提示:{}'.format(url, format(err))
print(error_msg)
return ''
content_type = response.headers.get('Content-Type')
file_type = '附件'
if response.status_code != 200 or not content_type:
error_msg = '下载「{}」失败!{}可能已失效,可浏览器登录有道云笔记后,查看{}是否能正常加载'.format(url, file_type,file_type)
print(error_msg)
return ''
file_dirname = ATTACH
attach_name = self._optimize_file_name(attach_name)
file_suffix = attach_name
local_file_dir = os.path.join(os.path.dirname(file_path),file_dirname)
if not os.path.exists(local_file_dir):
os.mkdir(local_file_dir)
local_file_path: str = os.path.join(local_file_dir,file_suffix)
# 使md附件或者图片的路径分隔符为"/"
local_file_path = local_file_path.replace('\\', '/')
try:
with open(local_file_path, 'wb') as f:
f.write(response.content) # response.content 本身就为字节类型
print('已将{}「{}」转换为「{}」'.format(file_type, url, local_file_path))
except:
error_msg = '{} {}有误!'.format(url, file_type)
print(error_msg)
return ''
return local_file_path
def _optimize_file_name(self, name) -> str:
"""
优化文件名,替换下划线
:param name:
:return:
"""
# 去除换行符,首尾的空格,文件名有空格识别不出图片
name = name.strip()
regex_symbol = re.compile(r'[\\/:\*\?"<>\|、]') # 符号:\ / : * ? " < > | ( )
name = regex_symbol.sub('_', name)
return name
def login(self):
self.youdaonote_api = YoudaoNoteApi()
error_msg = self.youdaonote_api.login_by_cookies()
if error_msg:
return '', error_msg
def load_config(self):
| config_dict, error_msg = covert_config(CONFIG_PATH)
| 1 | 2023-10-17 11:21:50+00:00 | 8k |
JerBouma/FinancePortfolio | financeportfolio/portfolio_controller.py | [
{
"identifier": "excel_model",
"path": "financeportfolio/excel_model.py",
"snippet": "def create_portfolio_performance_excel_report(\n writer: pd.ExcelWriter, dataset: pd.DataFrame, sheet_name: str, currency: str = \"$\"\n):\ndef create_transactions_performance_excel_report(\n writer: pd.ExcelWrit... | import pandas as pd
from financetoolkit import Toolkit
from financeportfolio import excel_model, helpers, portfolio_model
| 3,743 | """
Read and consolidate cash flow data from Excel or CSV files into a single DataFrame.
This function reads cash flow data from one or more Excel or CSV files specified by the
'excel_location' parameter. It can accept a single file path as a string or a list of file
paths. If 'excel_location' is not provided, it will use the default file location from the
configuration ('self._cfg["general"]["file_location"]').
The function identifies additional files within directories specified in 'excel_location'
and includes them in the data consolidation. It supports Excel (.xlsx) and CSV (.csv) file
formats.
If the cash flow dataset is initially empty, it reads and consolidates the data, performs
optional adjustments for duplicated rows, and sets column names to lowercase. The resulting
dataset is sorted by index in descending order and has its index converted to daily frequency
('D').
Next to that, this function performs various formatting and preprocessing steps to ensure
data consistency and facilitate analysis. It includes options to customize column names
for dates, descriptions, amounts, and cost/income categories.
Parameters:
excel_location (str | list | None): A file path or a list of file paths to Excel or CSV
files containing cash flow data. If None, the default file location from the
configuration is used.
adjust_duplicates (bool | None): A boolean value indicating whether to adjust duplicated
rows in the dataset. If None, it defaults to the value specified in the configuration
('self._cfg["general"]["adjust_duplicates"]').
date_column (list[str] | None): A list of column names representing date information
in the dataset. If None, it defaults to the date columns specified in the
configuration ('self._cfg["general"]["date_columns"]').
date_format (str | None): A string representing the date format in the dataset. If None,
it defaults to the date format specified in the configuration ('self._cfg["general"]["date_format"]').
description_columns (list[str] | None): A list of column names representing
transaction descriptions in the dataset. If None, it defaults to the description
columns specified in the configuration ('self._cfg["general"]["description_columns"]').
amount_column (list[str] | None): A list of column names representing transaction
amounts in the dataset. If None, it defaults to the amount columns specified in
the configuration ('self._cfg["general"]["amount_columns"]').
cost_or_income_column (list[str] | None): A list of column names representing
cost or income categories in the dataset. If None, it defaults to the cost/income
columns specified in the configuration ('self._cfg["general"]["cost_or_income_columns"]').
decimal_seperator (str | None): A string representing the decimal separator used in
the dataset. If None, it defaults to the decimal separator specified in the
configuration ('self._cfg["general"]["decimal_seperator"]').
Returns:
pd.DataFrame: A DataFrame containing the consolidated cash flow data.
Raises:
FileNotFoundError: If any of the specified files or directories in 'excel_location'
cannot be found.
ValueError: If essential columns (date, description, amount) are not found in the dataset.
- For missing columns, specify them in the configuration or provide them explicitly.
- For cost or income columns, raise an exception if not found and configuration is empty.
Note:
- Duplicates in individual datasets are adjusted based on configuration settings
('self._cfg["general"]["adjust_duplicates"]').
- If duplicates are found in the combination of datasets, they are removed to prevent
double-counting.
- The function handles formatting of date columns, converting them to datetime objects.
- Transaction description columns are converted to categorical data.
- Transaction amount columns are converted to float, with support for different decimal separators.
- Cost or income columns are converted to categorical data, with optional customization.
"""
date_column = (
date_column if date_column else self._cfg["general"]["date_columns"]
)
date_format = (
date_format if date_format else self._cfg["general"]["date_format"]
)
name_columns = (
name_columns if name_columns else self._cfg["general"]["name_columns"]
)
ticker_columns = (
ticker_columns if ticker_columns else self._cfg["general"]["ticker_columns"]
)
price_columns = (
price_columns if price_columns else self._cfg["general"]["price_columns"]
)
volume_columns = (
volume_columns if volume_columns else self._cfg["general"]["volume_columns"]
)
currency_columns = (
currency_columns
if currency_columns
else self._cfg["adjustments"]["currency_columns"]
)
costs_columns = (
costs_columns if costs_columns else self._cfg["general"]["costs_columns"]
)
column_mapping = (
column_mapping if column_mapping else self._cfg["general"]["column_mapping"]
)
if self._portfolio_dataset.empty:
if not self._custom_dataset.empty:
(
self._portfolio_dataset,
self._date_column,
self._name_column,
self._ticker_column,
self._price_column,
self._volume_column,
self._currency_column,
self._costs_column,
| """Portfolio Module"""
# pylint: disable=too-many-instance-attributes,abstract-class-instantiated,
# pylint: disable=too-few-public-methods,protected-access,too-many-lines
class Portfolio:
"""
A class for managing and analyzing your portfolio.
This class provides functionality for loading, preprocessing, categorizing, and analyzing
cash flow data based on a specified configuration file. It offers methods to read and format
the dataset, apply cost or income indicators, categorize transactions, and create periodical
cash flow overviews.
Parameters:
configuration_file (str): The file path to the configuration file in YAML format. The
configuration file should define various settings and columns used in cash flow
analysis.
Attributes:
_configuration_file (str): The file path to the configuration file.
_cash_flow_dataset (pd.DataFrame): The cash flow dataset as a pandas DataFrame.
Note:
- The configuration file should be in YAML format and contain settings for date columns,
description columns, amount columns, and optionally cost/income columns.
- Initialize an instance of this class to begin cash flow analysis.
"""
def __init__(
self,
configuration_file: str | None = None,
portfolio_dataset: pd.DataFrame = pd.DataFrame(),
example: bool = False,
):
"""
Initialize a Cashflow instance with the provided configuration file.
This constructor sets up the Cashflow instance by loading the configuration file, defining
default attributes, and initializing the cash flow dataset as an empty DataFrame.
Parameters:
configuration_file (str): The file path to the configuration file in YAML format.
Raises:
ValueError: If the provided configuration file does not have a '.yaml' extension.
Only '.yaml' configuration files are supported.
"""
if example:
configuration_file = helpers.download_yaml_configuration(example=True)
helpers.download_example_datasets()
print(
f"Creating new Portfolio Configuration file at {configuration_file} and "
"downloading example datasets.\nRunning the Portfolio class with this example "
"dataset which illustrates the functionality of the Portfolio class."
)
elif configuration_file is None:
configuration_file = helpers.download_yaml_configuration(example=False)
print(
f"Creating new Portfolio file at {configuration_file}. Please provide this file "
"path to the Portfolio class to prevent overwriting the existing file."
)
self._configuration_file = str(configuration_file)
self._custom_dataset = portfolio_dataset
self._yearly_overview: pd.DataFrame = pd.DataFrame()
self._quarterly_overview: pd.DataFrame = pd.DataFrame()
self._monthly_overview: pd.DataFrame = pd.DataFrame()
self._yearly_cash_flow_dataset: pd.DataFrame = pd.DataFrame()
self._quarterly_cash_flow_dataset: pd.DataFrame = pd.DataFrame()
self._monthly_cash_flow_dataset: pd.DataFrame = pd.DataFrame()
# Tickers
self._ticker_combinations: dict[str, str] = {}
self._original_ticker_combinations: dict[str, str] = {}
# Historical Data
self._daily_historical_data: pd.DataFrame = pd.DataFrame()
self._weekly_historical_data: pd.DataFrame = pd.DataFrame()
self._monthly_historical_data: pd.DataFrame = pd.DataFrame()
self._quarterly_historical_data: pd.DataFrame = pd.DataFrame()
self._yearly_historical_data: pd.DataFrame = pd.DataFrame()
self._historical_statistics: pd.DataFrame = pd.DataFrame()
# Benchmark Historical Data
self._benchmark_tickers: dict[str, str] = {}
self._daily_benchmark_data: pd.DataFrame = pd.DataFrame()
self._weekly_benchmark_data: pd.DataFrame = pd.DataFrame()
self._monthly_benchmark_data: pd.DataFrame = pd.DataFrame()
self._quarterly_benchmark_data: pd.DataFrame = pd.DataFrame()
self._yearly_benchmark_data: pd.DataFrame = pd.DataFrame()
self._benchmark_prices: pd.DataFrame = pd.DataFrame()
self._benchmark_specific_prices: pd.Series = pd.Series()
self._benchmark_prices_per_ticker: pd.DataFrame = pd.DataFrame()
self._latest_benchmark_price: pd.Series = pd.Series()
# Portfolio Overveiw
self._portfolio_overview: pd.DataFrame = pd.DataFrame()
self._portfolio_performance: pd.DataFrame = pd.DataFrame()
self._transactions_performance: pd.DataFrame = pd.DataFrame()
self._portfolio_dataset: pd.DataFrame = pd.DataFrame()
self._positions_overview: pd.DataFrame = pd.DataFrame()
self._transactions_overview: pd.DataFrame = pd.DataFrame()
# Finance Toolkit Initialization
self._tickers: list | None = None
self._toolkit: Toolkit | None = None
self._benchmark_toolkit: Toolkit | None = None
self._currency_toolkit: Toolkit | None = None
self._latest_price: pd.Series = pd.Series()
self._daily_currency_data: pd.DataFrame = pd.DataFrame()
if self._configuration_file.endswith(".yaml"):
self._cfg: dict[str, dict] = helpers.read_yaml_file(
location=self._configuration_file
)
else:
raise ValueError("File type not supported. Please use .yaml")
if (
self._cfg["general"]["file_location"] == "REPLACE_ME"
and self._custom_dataset.empty
):
print(
f"{helpers.Style.BOLD}Please provide a file location in the configuration file (change "
f"'REPLACE_ME' within the general section) or provide a custom dataset.{helpers.Style.RESET}"
"\nSee https://github.com/JerBouma/FinancePortfolio for instructions"
)
else:
# Column Names
self._date_column: str = self._cfg["general"]["date_columns"]
self._name_column: str = self._cfg["general"]["name_columns"]
self._ticker_column: str = self._cfg["general"]["ticker_columns"]
self._price_column: str = self._cfg["general"]["price_columns"]
self._volume_column: str = self._cfg["general"]["volume_columns"]
self._costs_column: str = self._cfg["general"]["costs_columns"]
self.read_portfolio_dataset()
def to_toolkit(
self,
api_key: str | None = None,
quarterly: bool = False,
custom_ratios: dict | None = None,
rounding: int = 4,
remove_invalid_tickers: bool = False,
sleep_timer: bool = False,
progress_bar: bool = True,
) -> Toolkit:
"""
Converts the Portfolio to a Finance Toolkit object.
This method allows you to convert your Portfolio to a Finance Toolkit object,
giving access to 30+ years of fundamental and historical data, 130+ financial
metrics and much more. It intentilligently understands the assets you have
purchased and generated a "Portfolio" column automatically which is based off
your portfolio weights and the assets you have purchased. This allows you to
easily calculate portfolio metrics such as the Sharpe Ratio, Sortino Ratio,
Treynor Ratio, Value at Risk and many more that would fit precisely to your
portfolio.
Args:
api_key (str, optional):
Your API key for access to additional data. If not provided, only historical
data and indicators are available.
start_date (str, optional):
The start date for historical data retrieval. If not provided, it defaults
to the earliest available date.
end_date (str, optional):
The end date for historical data retrieval. If not provided, it defaults to
the current date.
quarterly (bool, optional):
Set to True to retrieve quarterly data. Defaults to False.
risk_free_rate (str, optional):
The risk-free rate used for calculations. Defaults to "10y".
benchmark_ticker (str, optional):
The benchmark ticker symbol. Defaults to "^GSPC".
custom_ratios (dict, optional):
Custom ratios to calculate. Should be a dictionary of ratio names and formulas.
rounding (int, optional):
The number of decimal places to round data. Defaults to 4.
remove_invalid_tickers (bool, optional):
Remove invalid tickers from the toolkit. Defaults to True.
sleep_timer (bool, optional):
Enable a sleep timer to avoid rate limiting. Defaults to False.
progress_bar (bool, optional):
Show a progress bar during data retrieval. Defaults to True.
Returns:
Toolkit:
A Finance Toolkit object.
"""
if api_key is None:
print(
"The parameter api_key is not set. Therefore, only historical data and "
"indicators are available. Consider obtaining a key with the following link: "
"https://intelligence.financialmodelingprep.com/pricing-plans?couponCode=jeroen"
"\nThe free plan has a limit of 5 years fundamental data and has no quarterly data. "
"You can get 15% off by using the above affiliate link to get access to 30+ years "
"of (quarterly) data which also supports the project."
)
if self._daily_historical_data.empty:
self.collect_historical_data()
if self._daily_benchmark_data.empty:
self.collect_benchmark_historical_data()
if self._positions_overview.empty:
self.get_positions_overview()
symbols = list(self._tickers) + ["Portfolio"] # type: ignore
historical_columns = self._daily_historical_data.columns.get_level_values(
0
).unique()
benchmark_ticker = self._cfg["general"]["benchmark_ticker"]
benchmark_data = self._daily_benchmark_data.xs(
benchmark_ticker, axis=1, level=1
)
for column in historical_columns:
self._daily_historical_data[column, "Benchmark"] = benchmark_data[column]
self._daily_historical_data[column, "Portfolio"] = (
self._positions_overview["Current Weight"]
.mul(self._daily_historical_data[column], axis=1)
.sum(axis=1)
)
historical = (
self._daily_historical_data.sort_index(axis=1)
.reindex(historical_columns, axis=1, level=0)
.reindex(list(self._tickers) + ["Portfolio", "Benchmark"], axis=1, level=1) # type: ignore
)
historical = historical.round(rounding)
toolkit = Toolkit(
tickers=symbols,
api_key=api_key,
historical=historical,
start_date=self._start_date,
quarterly=quarterly,
benchmark_ticker=benchmark_ticker,
custom_ratios=custom_ratios,
rounding=rounding,
remove_invalid_tickers=remove_invalid_tickers,
sleep_timer=sleep_timer,
progress_bar=progress_bar,
)
return toolkit
def read_portfolio_dataset(
self,
excel_location: str | list | None = None,
adjust_duplicates: bool | None = None,
date_column: list[str] | None = None,
date_format: str | None = None,
name_columns: list[str] | None = None,
ticker_columns: list[str] | None = None,
price_columns: list[str] | None = None,
volume_columns: list[str] | None = None,
currency_columns: list[str] | None = None,
costs_columns: list[str] | None = None,
column_mapping: dict[str, str] | None = None,
):
"""
Read and consolidate cash flow data from Excel or CSV files into a single DataFrame.
This function reads cash flow data from one or more Excel or CSV files specified by the
'excel_location' parameter. It can accept a single file path as a string or a list of file
paths. If 'excel_location' is not provided, it will use the default file location from the
configuration ('self._cfg["general"]["file_location"]').
The function identifies additional files within directories specified in 'excel_location'
and includes them in the data consolidation. It supports Excel (.xlsx) and CSV (.csv) file
formats.
If the cash flow dataset is initially empty, it reads and consolidates the data, performs
optional adjustments for duplicated rows, and sets column names to lowercase. The resulting
dataset is sorted by index in descending order and has its index converted to daily frequency
('D').
Next to that, this function performs various formatting and preprocessing steps to ensure
data consistency and facilitate analysis. It includes options to customize column names
for dates, descriptions, amounts, and cost/income categories.
Parameters:
excel_location (str | list | None): A file path or a list of file paths to Excel or CSV
files containing cash flow data. If None, the default file location from the
configuration is used.
adjust_duplicates (bool | None): A boolean value indicating whether to adjust duplicated
rows in the dataset. If None, it defaults to the value specified in the configuration
('self._cfg["general"]["adjust_duplicates"]').
date_column (list[str] | None): A list of column names representing date information
in the dataset. If None, it defaults to the date columns specified in the
configuration ('self._cfg["general"]["date_columns"]').
date_format (str | None): A string representing the date format in the dataset. If None,
it defaults to the date format specified in the configuration ('self._cfg["general"]["date_format"]').
description_columns (list[str] | None): A list of column names representing
transaction descriptions in the dataset. If None, it defaults to the description
columns specified in the configuration ('self._cfg["general"]["description_columns"]').
amount_column (list[str] | None): A list of column names representing transaction
amounts in the dataset. If None, it defaults to the amount columns specified in
the configuration ('self._cfg["general"]["amount_columns"]').
cost_or_income_column (list[str] | None): A list of column names representing
cost or income categories in the dataset. If None, it defaults to the cost/income
columns specified in the configuration ('self._cfg["general"]["cost_or_income_columns"]').
decimal_seperator (str | None): A string representing the decimal separator used in
the dataset. If None, it defaults to the decimal separator specified in the
configuration ('self._cfg["general"]["decimal_seperator"]').
Returns:
pd.DataFrame: A DataFrame containing the consolidated cash flow data.
Raises:
FileNotFoundError: If any of the specified files or directories in 'excel_location'
cannot be found.
ValueError: If essential columns (date, description, amount) are not found in the dataset.
- For missing columns, specify them in the configuration or provide them explicitly.
- For cost or income columns, raise an exception if not found and configuration is empty.
Note:
- Duplicates in individual datasets are adjusted based on configuration settings
('self._cfg["general"]["adjust_duplicates"]').
- If duplicates are found in the combination of datasets, they are removed to prevent
double-counting.
- The function handles formatting of date columns, converting them to datetime objects.
- Transaction description columns are converted to categorical data.
- Transaction amount columns are converted to float, with support for different decimal separators.
- Cost or income columns are converted to categorical data, with optional customization.
"""
date_column = (
date_column if date_column else self._cfg["general"]["date_columns"]
)
date_format = (
date_format if date_format else self._cfg["general"]["date_format"]
)
name_columns = (
name_columns if name_columns else self._cfg["general"]["name_columns"]
)
ticker_columns = (
ticker_columns if ticker_columns else self._cfg["general"]["ticker_columns"]
)
price_columns = (
price_columns if price_columns else self._cfg["general"]["price_columns"]
)
volume_columns = (
volume_columns if volume_columns else self._cfg["general"]["volume_columns"]
)
currency_columns = (
currency_columns
if currency_columns
else self._cfg["adjustments"]["currency_columns"]
)
costs_columns = (
costs_columns if costs_columns else self._cfg["general"]["costs_columns"]
)
column_mapping = (
column_mapping if column_mapping else self._cfg["general"]["column_mapping"]
)
if self._portfolio_dataset.empty:
if not self._custom_dataset.empty:
(
self._portfolio_dataset,
self._date_column,
self._name_column,
self._ticker_column,
self._price_column,
self._volume_column,
self._currency_column,
self._costs_column,
| ) = portfolio_model.format_portfolio_dataset(
| 2 | 2023-10-15 09:16:04+00:00 | 8k |
gschramm/2023-MIC-ImageRecon-Shortcourse | 06_osem_varnet.py | [
{
"identifier": "EMUpdateModule",
"path": "layers.py",
"snippet": "class EMUpdateModule(torch.nn.Module):\n\n def __init__(\n self,\n projector: parallelproj.LinearOperator,\n ) -> None:\n\n super().__init__()\n self._projector = projector\n\n self._fwd_op_layer ... | import argparse
import json
import utils
import parallelproj
import array_api_compat.torch as torch
from datetime import datetime
from layers import EMUpdateModule
from models import Unet3D, SimpleOSEMVarNet, PostReconNet
from data import load_brain_image_batch, simulate_data_batch, download_brainweb_data
from pathlib import Path | 4,107 | parser.add_argument('--num_epochs_post', type=int, default=500)
parser.add_argument('--batch_size', type=int, default=10)
parser.add_argument('--num_features', type=int, default=32)
parser.add_argument('--num_rings', type=int, default=4)
parser.add_argument('--radial_trim', type=int, default=181)
parser.add_argument('--random_seed', type=int, default=1)
parser.add_argument('--sens', type=float, default=1)
parser.add_argument('--voxel_size',
nargs='+',
type=float,
default=[2.5, 2.5, 2.66])
parser.add_argument('--fusion_mode', type=str, default = 'simple', choices=['simple', 'de_pierro'])
args = parser.parse_args()
num_datasets = args.num_datasets
num_training = args.num_training
num_validation = args.num_validation
num_subsets = args.num_subsets
depth = args.depth
num_epochs = args.num_epochs
num_epochs_post = args.num_epochs_post
batch_size = args.batch_size
num_features = args.num_features
num_rings = args.num_rings
radial_trim = args.radial_trim
random_seed = args.random_seed
sens = args.sens
voxel_size = tuple(args.voxel_size)
fusion_mode = args.fusion_mode
# device variable (cpu or cuda) that determines whether calculations
# are performed on the cpu or cuda gpu
if parallelproj.cuda_present:
dev = 'cuda'
else:
dev = 'cpu'
output_dir = Path(
'run_osem_varnet') / f'{datetime.now().strftime("%Y%m%d_%H%M%S")}'
output_dir.mkdir(exist_ok=True, parents=True)
with open(output_dir / 'input_cfg.json', 'w') as f:
json.dump(vars(args), f)
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
#--- setup the scanner / LOR geometry ---------------------------------------
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# setup a line of response descriptor that describes the LOR start / endpoints of
# a "narrow" clinical PET scanner with 9 rings
lor_descriptor = utils.DemoPETScannerLORDescriptor(torch,
dev,
num_rings=num_rings,
radial_trim=radial_trim)
axial_fov_mm = float(lor_descriptor.scanner.num_rings *
(lor_descriptor.scanner.ring_positions[1] -
lor_descriptor.scanner.ring_positions[0]))
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
#--- load the brainweb images -----------------------------------------------
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# download and extract the brainweb PET/MR images into ./data if not present
download_brainweb_data()
# image properties
ids = tuple([i for i in range(num_datasets)])
emission_image_database, attenuation_image_database = load_brain_image_batch(
ids,
torch,
dev,
voxel_size=voxel_size,
axial_fov_mm=0.95 * axial_fov_mm,
verbose=False)
img_shape = tuple(emission_image_database.shape[2:])
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
subset_projectors = parallelproj.SubsetOperator([
utils.RegularPolygonPETProjector(
lor_descriptor,
img_shape,
voxel_size,
views=torch.arange(i,
lor_descriptor.num_views,
num_subsets,
device=dev)) for i in range(num_subsets)
])
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
print(f'simulating emission and correction data')
# simulate all emission and correction sinograms we need
emission_data_database, correction_database, contamination_database, adjoint_ones_database = simulate_data_batch(
emission_image_database,
attenuation_image_database,
subset_projectors,
sens=sens,
random_seed=random_seed)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# run OSEM reconstructions of the simulated data
osem_update_modules = [
| """miminal script that trains an OSEM varnet on simulated brainweb data
"""
from __future__ import annotations
parser = argparse.ArgumentParser(description='OSEM-VARNet reconstruction')
parser.add_argument('--num_datasets', type=int, default=60)
parser.add_argument('--num_training', type=int, default=40)
parser.add_argument('--num_validation', type=int, default=20)
parser.add_argument('--num_subsets', type=int, default=4)
parser.add_argument('--depth', type=int, default=8)
parser.add_argument('--num_epochs', type=int, default=500)
parser.add_argument('--num_epochs_post', type=int, default=500)
parser.add_argument('--batch_size', type=int, default=10)
parser.add_argument('--num_features', type=int, default=32)
parser.add_argument('--num_rings', type=int, default=4)
parser.add_argument('--radial_trim', type=int, default=181)
parser.add_argument('--random_seed', type=int, default=1)
parser.add_argument('--sens', type=float, default=1)
parser.add_argument('--voxel_size',
nargs='+',
type=float,
default=[2.5, 2.5, 2.66])
parser.add_argument('--fusion_mode', type=str, default = 'simple', choices=['simple', 'de_pierro'])
args = parser.parse_args()
num_datasets = args.num_datasets
num_training = args.num_training
num_validation = args.num_validation
num_subsets = args.num_subsets
depth = args.depth
num_epochs = args.num_epochs
num_epochs_post = args.num_epochs_post
batch_size = args.batch_size
num_features = args.num_features
num_rings = args.num_rings
radial_trim = args.radial_trim
random_seed = args.random_seed
sens = args.sens
voxel_size = tuple(args.voxel_size)
fusion_mode = args.fusion_mode
# device variable (cpu or cuda) that determines whether calculations
# are performed on the cpu or cuda gpu
if parallelproj.cuda_present:
dev = 'cuda'
else:
dev = 'cpu'
output_dir = Path(
'run_osem_varnet') / f'{datetime.now().strftime("%Y%m%d_%H%M%S")}'
output_dir.mkdir(exist_ok=True, parents=True)
with open(output_dir / 'input_cfg.json', 'w') as f:
json.dump(vars(args), f)
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
#--- setup the scanner / LOR geometry ---------------------------------------
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# setup a line of response descriptor that describes the LOR start / endpoints of
# a "narrow" clinical PET scanner with 9 rings
lor_descriptor = utils.DemoPETScannerLORDescriptor(torch,
dev,
num_rings=num_rings,
radial_trim=radial_trim)
axial_fov_mm = float(lor_descriptor.scanner.num_rings *
(lor_descriptor.scanner.ring_positions[1] -
lor_descriptor.scanner.ring_positions[0]))
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
#--- load the brainweb images -----------------------------------------------
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# download and extract the brainweb PET/MR images into ./data if not present
download_brainweb_data()
# image properties
ids = tuple([i for i in range(num_datasets)])
emission_image_database, attenuation_image_database = load_brain_image_batch(
ids,
torch,
dev,
voxel_size=voxel_size,
axial_fov_mm=0.95 * axial_fov_mm,
verbose=False)
img_shape = tuple(emission_image_database.shape[2:])
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
subset_projectors = parallelproj.SubsetOperator([
utils.RegularPolygonPETProjector(
lor_descriptor,
img_shape,
voxel_size,
views=torch.arange(i,
lor_descriptor.num_views,
num_subsets,
device=dev)) for i in range(num_subsets)
])
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
print(f'simulating emission and correction data')
# simulate all emission and correction sinograms we need
emission_data_database, correction_database, contamination_database, adjoint_ones_database = simulate_data_batch(
emission_image_database,
attenuation_image_database,
subset_projectors,
sens=sens,
random_seed=random_seed)
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# run OSEM reconstructions of the simulated data
osem_update_modules = [ | EMUpdateModule(projector) for projector in subset_projectors.operators | 0 | 2023-10-16 07:18:26+00:00 | 8k |
TUM-ITR/koopcore | koopcore/model/kkr_estimator.py | [
{
"identifier": "make_koopman_kernel",
"path": "koopcore/jax/invariant_kernels.py",
"snippet": "def make_koopman_kernel(base_kernel, eigenvalues_dt, H, einsum_kwargs={\"optimize\": True}, normalize=_normalize_by_h):\n\n D = eigenvalues_dt.shape[0]\n\n # backwards in time **(-h)\n pullback_mu_DH... | import jax
import jax.numpy as jnp
import koopcore
import numpy as np
import joblib, os
from sklearn.base import MultiOutputMixin, RegressorMixin, BaseEstimator
from sklearn.preprocessing import MinMaxScaler
from functools import partial
from typing import Tuple, Any, Callable, Union
from jaxtyping import Num, Array
from copy import deepcopy
from koopcore.jax.invariant_kernels import make_koopman_kernel
from koopcore.jax.explicit_invariant_kernels import koopman_kernel as koopman_kernel_expl
from koopcore.jax.explicit_invariant_kernels import (
make_linear_trajectory_kernel as make_linear_trajectory_kernel_expl,
)
from koopcore.auxilliary.data_classes import trajectory
from joblib import dump | 3,886 |
class KoopmanKernelDTRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
def __init__(
self,
kernel_name="square-exponential",
kernel_params={"scale": 0.01},
eigenvalues=jnp.array([1.0, 1.0j, -1.0j]),
regularizer_invariant=1e-8,
preprocessor=None,
normalize_eigenfunctions="norm",
einsum_kwargs={"optimize": True},
timestep=1.0,
regularizer_isometric=1e-8,
device=jax.devices("cpu")[0],
predictor_timestamps=None,
inducing_points=None,
invariant_weights=None,
isometric_weights=None,
):
self.kernel_name = kernel_name
self.kernel_params = kernel_params
self.regularizer_invariant = regularizer_invariant
self.regularizer_isometric = regularizer_isometric
self.preprocessor = preprocessor
self.normalize_eigenfunctions = normalize_eigenfunctions
self.device = jax.device_get(device)
self.timestep = timestep
self.einsum_kwargs = einsum_kwargs
self.eigenvalues = eigenvalues
self.predictor_timestamps = predictor_timestamps
self.inducing_points = inducing_points
self.invariant_weights = invariant_weights
self.isometric_weights = isometric_weights
|
class KoopmanKernelDTRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
def __init__(
self,
kernel_name="square-exponential",
kernel_params={"scale": 0.01},
eigenvalues=jnp.array([1.0, 1.0j, -1.0j]),
regularizer_invariant=1e-8,
preprocessor=None,
normalize_eigenfunctions="norm",
einsum_kwargs={"optimize": True},
timestep=1.0,
regularizer_isometric=1e-8,
device=jax.devices("cpu")[0],
predictor_timestamps=None,
inducing_points=None,
invariant_weights=None,
isometric_weights=None,
):
self.kernel_name = kernel_name
self.kernel_params = kernel_params
self.regularizer_invariant = regularizer_invariant
self.regularizer_isometric = regularizer_isometric
self.preprocessor = preprocessor
self.normalize_eigenfunctions = normalize_eigenfunctions
self.device = jax.device_get(device)
self.timestep = timestep
self.einsum_kwargs = einsum_kwargs
self.eigenvalues = eigenvalues
self.predictor_timestamps = predictor_timestamps
self.inducing_points = inducing_points
self.invariant_weights = invariant_weights
self.isometric_weights = isometric_weights
| def fit(self, X: trajectory, y: trajectory): | 3 | 2023-10-24 09:18:39+00:00 | 8k |
ZiaWang/jqtrade | jqtrade/scheduler/runner.py | [
{
"identifier": "TaskError",
"path": "jqtrade/common/exceptions.py",
"snippet": "class TaskError(UserError):\n \"\"\" 用户任务错误 \"\"\"\n pass"
},
{
"identifier": "sys_logger",
"path": "jqtrade/common/log.py",
"snippet": "class SystemLogFormatter(logging.Formatter):\n class ContextF... | import os
import sys
from ..common.exceptions import TaskError
from ..common.log import sys_logger, setup_file_logger, setup_logger
from .loader import Loader
from .strategy import Strategy
from .event_source import EventSourceScheduler
from .loop import EventLoop
from .bus import EventBus
from .context import Context
from .utils import get_activate_task_process, parse_task_info, parse_env
from .config import setup_scheduler_config, get_config as get_scheduler_config | 6,465 | # -*- coding: utf-8 -*-
logger = sys_logger.getChild("runner")
def _exist_repeated_task(task_name):
current_pid = os.getpid()
parent_pid = os.getppid()
active_tasks = get_activate_task_process()
for _task in active_tasks:
if _task.pid in (current_pid, parent_pid):
continue
| # -*- coding: utf-8 -*-
logger = sys_logger.getChild("runner")
def _exist_repeated_task(task_name):
current_pid = os.getpid()
parent_pid = os.getppid()
active_tasks = get_activate_task_process()
for _task in active_tasks:
if _task.pid in (current_pid, parent_pid):
continue | _task_info = parse_task_info(_task.cmdline()) | 9 | 2023-10-24 01:34:27+00:00 | 8k |
Glasgow-AI4BioMed/GenKIE | models/taming/models/vqgan.py | [
{
"identifier": "instantiate_from_config",
"path": "models/taming/util.py",
"snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict... | import torch
import torch.nn.functional as F
import pytorch_lightning as pl
from models.taming.util import instantiate_from_config
from models.taming.modules.diffusionmodules.model import Encoder, Decoder
from models.taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
from models.taming.modules.vqvae.quantize import GumbelQuantize | 4,779 |
class VQModel(pl.LightningModule):
def __init__(self,
ddconfig,
lossconfig,
n_embed,
embed_dim,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None,
monitor=None,
remap=None,
sane_index_shape=False, # tell vector quantizer to return indices as bhw
):
super().__init__()
self.image_key = image_key
self.encoder = Encoder(**ddconfig)
|
class VQModel(pl.LightningModule):
def __init__(self,
ddconfig,
lossconfig,
n_embed,
embed_dim,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None,
monitor=None,
remap=None,
sane_index_shape=False, # tell vector quantizer to return indices as bhw
):
super().__init__()
self.image_key = image_key
self.encoder = Encoder(**ddconfig) | self.decoder = Decoder(**ddconfig) | 2 | 2023-10-20 20:01:42+00:00 | 8k |
ArnaudParant/sel | tests/test_parser_n_formator.py | [
{
"identifier": "query_string_parser",
"path": "sel/query_string_parser.py",
"snippet": "AGGREG_TYPES = [\"aggreg\", \"histogram\", \"count\", \"distinct\", \"min\", \"max\", \"sum\", \"average\", \"stats\"]\nAGGREG_PARAMETER_MAPPING = {\n \"subaggreg\": None,\n \"interval\": None,\n \"size\": ... | import json
import pytest
import traceback
from sel import query_string_parser
from sel.query_string_parser import (
Value, QueryString, Comparator, Not, RangeFilter, Filter, Context,
Aggreg, Sort, Group, NoBracketGroup, Query
)
from sel import query_object_formator | 4,618 |
["aggreg toto: color",
{"type": "aggreg", "field": "color", "name": "toto"}],
["aggreg: tag subaggreg by (distinct: .author.id)",
{"type": "aggreg", "field": "tag",
"subaggreg": {"by": {"type": "distinct", "field": ".author.id"}}}
],
["aggreg: date subaggreg by (sum: like)",
{"type": "aggreg", "field": "date",
"subaggreg": { "by": {"type": "sum", "field": "like"}}}
],
["aggreg: date subaggrego by (sum: like)", None],
["aggreg: date subaggreg by (sum: like) subaggreg by (distinct: author.id)", None],
["aggreg: tag size 5",
{"type": "aggreg", "field": "tag", "size": 5}],
["aggreg: tag sizeo 5", None],
["aggreg: tag size cinq", None],
["aggreg: date interval month",
{"type": "aggreg", "field": "date", "interval": "month"}],
["aggreg: date intervalo month", None],
["histogram: date",
{"type": "histogram", "field": "date"}],
["aggreg: image.color",
{"type": "aggreg", "field": "image.color"}],
["aggreg: image.tag.color",
{"type": "aggreg", "field": "image.tag.color"}],
["average: tag.score",
{"type": "average", "field": "tag.score"}],
["stats: tag.score",
{"type": "stats", "field": "tag.score"}],
["min: tag.score",
{"type": "min", "field": "tag.score"}],
["max: tag.score",
{"type": "max", "field": "tag.score"}],
["aggreg: color where label = bag",
{"type": "aggreg", "field": "color", "where":
{"field": "label", "comparator": "=", "value": "bag"}}],
["aggreg: color where (label = bag and model = foo)",
{"type": "aggreg", "field": "color", "where":
{"operator": "and", "items": [
{"field": "label", "comparator": "=", "value": "bag"},
{"field": "model", "comparator": "=", "value": "foo"},
]}}
],
["aggreg: color where (label = bag and model = foo", None],
["aggreg: label subaggreg texture (aggreg: texture) subaggreg color (aggreg: color)",
{"type": "aggreg", "field": "label", "subaggreg": {
"texture": {"type": "aggreg", "field": "texture"},
"color": {"type": "aggreg", "field": "color"},
}}],
["aggreg: label subaggreg color (aggreg: texture) subaggreg color (aggreg: color)", None]
])
def test_aggreg(self, query, expected):
try:
res = query_string_parser.parse(query, grammar=Aggreg)
res = query_object_formator.formator(res)
assert res == expected, f"Query: '{query}'\nExpected: {expected}\nGot: {red}\n"
except Exception as exc:
print(traceback.format_exc())
assert expected is None, str(exc)
@pytest.mark.parametrize(["query", "expected"], [
["sort: image.color",
{"field": "image.color"}],
["sort: color asc",
{"field": "color", "order": "asc"}],
["sort: color asco", None],
["sort: color mode min",
{"field": "color", "mode": "min"}],
["sort: color modez min", None],
["sort: color asc where color = red",
{"field": "color", "order": "asc", "where":
{"field": "color", "comparator": "=", "value": "red"}}],
["sort: color under label where label = bag",
{"field": "color", "under": "label", "where":
{"field": "label", "comparator": "=", "value": "bag"}}],
["sort: color undero label where label = bag", None],
["sort: color asc where (color = red and model = foo)",
{"field": "color", "order": "asc", "where":
{"operator": "and", "items": [
{"field": "color", "comparator": "=", "value": "red"},
{"field": "model", "comparator": "=", "value": "foo"},
]}}
],
["sort: color asc where (color = red and model = foo)", None]
])
def test_sort(self, query, expected):
try:
|
class TestParserNFormator:
@pytest.mark.parametrize(["query", "expected"], [
["toto", "toto"],
['"toto tata titi"', "toto tata titi"],
["toto tata titi", None], # Exception, does not match type Value
])
def test_value(self, query, expected):
try:
res = query_string_parser.parse(query, grammar=Value)
res = query_object_formator.formator(res)
assert res == expected, f"Query: '{query}'\nExpected: {expected}\nGot: {red}\n"
except Exception as exc:
print(traceback.format_exc())
assert expected is None, str(exc)
@pytest.mark.parametrize(["query", "expected"], [
["foo", None], # Exception, must be delimited by quotes
["'\"foo bar\"'", {"query_string": '"foo bar"'}],
])
def test_query_string(self, query, expected):
try:
res = query_string_parser.parse(query, grammar=QueryString)
res = query_object_formator.formator(res)
assert res == expected, f"Query: '{query}'\nExpected: {expected}\nGot: {red}\n"
except Exception as exc:
print(traceback.format_exc())
assert expected is None, str(exc)
@pytest.mark.parametrize(["query", "expected"], [
["'toto'", "toto"],
["'to\"to'", 'to"to'],
["'to'to'", None], # Exception, quoting error
["''toto''", "toto"],
["''to'to''", "to'to"],
["'''to\"\"to'''", 'to""to'],
["''to''to''", None], # Exception, quoting error
["'''toto'''", "toto"],
["'''to'to'''", "to'to"],
["'''to''to'''", "to''to"],
["'''to\"\"\"to'''", 'to"""to'],
["'''to'''to'''", None], # Exception, quoting error
['"toto"', "toto"],
['"to\'to"', "to'to"],
['"to"to"', None], # Exception, quoting error
['""toto""', "toto"],
['""to"to""', 'to"to'],
['""to\'\'to""', "to''to"],
['""to""to""', None], # Exception, quoting error
['"""toto"""', "toto"],
['"""to"to"""', 'to"to'],
['"""to""to"""', 'to""to'],
['"""to\'\'\'to"""', "to'''to"],
['"""to"""to"""', None], # Exception quoting error
])
def test_quoting(self, query, expected):
try:
res = query_string_parser.parse(query, grammar=Value)
res = query_object_formator.formator(res)
assert res == expected, f"Query: '{query}'\nExpected: {expected}\nGot: {red}\n"
res = query_string_parser.parse(query, grammar=QueryString)
res = query_object_formator.formator(res)
assert res["query_string"] == expected, ("Query: '%s'\nExpected: %s\nGot: %s\n" % (query, json.dumps(expected), json.dumps(res["query_string"])))
except Exception as exc:
print(traceback.format_exc())
assert expected is None, str(exc)
@pytest.mark.parametrize(["query", "expected"], [
["=", "="],
["!=", "!="],
["<=", "<="],
["<", "<"],
[">=", ">="],
[">", ">"],
["~", "~"],
["!~", "!~"],
["==", None], # Exception does not match type Comparator
])
def test_comparator(self, query, expected):
try:
res = query_string_parser.parse(query, grammar=Comparator)
res = query_object_formator.formator(res)
assert res == expected, f"Query: '{query}'\nExpected: {expected}\nGot: {red}\n"
except Exception as exc:
print(traceback.format_exc())
assert expected is None, str(exc)
@pytest.mark.parametrize(["query", "expected"], [
["color = blue",
{"field": "color", "comparator": "=", "value": "blue"}],
["content ~ #moet",
{"field": "content", "comparator": "~", "value": "#moet"}],
["label.color = blue",
{"field": "label.color", "comparator": "=", "value": "blue"}],
[".media.label.color = blue",
{"field": ".media.label.color", "comparator": "=", "value": "blue"}],
[".media.label.color == toto", None],
[".media.label.color in toto", None],
[".media.label.color in toto, tata", None],
[".media.label.color in [toto, ]", None],
[".media.label.color ino ['toto 1', 'tata']", None],
[".media.label.color in ['toto 1', 'tata']",
{"field": ".media.label.color", "comparator": "in", "value": ["toto 1", "tata"]}
],
[".media.label.color in ['toto 1']",
{"field": ".media.label.color", "comparator": "in", "value": ["toto 1"]}
],
[".media.label.color nin [toto, tata]",
{"field": ".media.label.color", "comparator": "nin", "value": ["toto", "tata"]}
],
[".media.label.color nin [toto]",
{"field": ".media.label.color", "comparator": "nin", "value": ["toto"]}
],
[".media.label.color not in [toto]",
{"field": ".media.label.color", "comparator": "nin", "value": ["toto"]}
],
["date range (> 2018)", None],
["date range (> 2018, > 2019)", None],
["date range (> 2018, = 2019)", None],
["date range (> 2018, <= 2019)",
{"field": "date", "comparator": "range", "value": {">": "2018", "<=": "2019"}}
],
["date nrange (> 2018, <= 2019)",
{"field": "date", "comparator": "nrange", "value": {">": "2018", "<=": "2019"}}
],
["date not range (> 2018, <= 2019)",
{"field": "date", "comparator": "nrange", "value": {">": "2018", "<=": "2019"}}
],
["date not rangeo (> 2018, <= 2019)", None],
["label prefix h",
{"field": "label", "comparator": "prefix", "value": "h"}
],
["label nprefix h",
{"field": "label", "comparator": "nprefix", "value": "h"}
],
["label not prefix h",
{"field": "label", "comparator": "nprefix", "value": "h"}
],
["label not prefixo h", None],
["label in person, human", None],
["label in (person, human)", None],
["label in [person human]", None],
["label in [person, human]",
{"field": "label", "comparator": "in", "value": ["person", "human"]}
],
["label nin [person, human]",
{"field": "label", "comparator": "nin", "value": ["person", "human"]}
],
["color = blue where label = bag",
{"field": "color", "comparator": "=", "value": "blue",
"where": {"field": "label", "comparator": "=", "value": "bag"}}],
["color = blue whereo label = bag", None],
["image.tag.color = blue where image.tag = bag",
{"field": "image.tag.color", "comparator": "=", "value": "blue",
"where": {"field": "image.tag", "comparator": "=", "value": "bag"}}],
['color = blue where (label = "bag it" and label = foo)',
{"field": "color", "comparator": "=", "value": "blue",
"where": {"operator": "and", "items": [
{"field": "label", "comparator": "=", "value": "bag it"},
{"field": "label", "comparator": "=", "value": "foo"}
]}}],
["foo = something",
{"field": "foo", "comparator": "=", "value": "something"}],
["color = blue where (label = bag it)", None],
])
def test_filter(self, query, expected):
try:
res = query_string_parser.parse(query, grammar=Filter)
res = query_object_formator.formator(res)
assert res == expected, f"Query: '{query}'\nExpected: {expected}\nGot: {red}\n"
except Exception as exc:
print(traceback.format_exc())
assert expected is None, str(exc)
@pytest.mark.parametrize(["query", "expected"], [
["2018 < date <= 2019",
{"field": "date", "comparator": "range", "value": {">": "2018", "<=": "2019"}}
],
["2018 >= date <= 2019", None],
["2018 < date <= ", None],
["2018 < date <= 2019 where label = bag",
{
"field": "date", "comparator": "range", "value": {">": "2018", "<=": "2019"},
"where": {"field": "label", "comparator": "=", "value": "bag"}
}
],
])
def test_range_filter(self, query, expected):
try:
res = query_string_parser.parse(query, grammar=RangeFilter)
res = query_object_formator.formator(res)
assert res == expected, f"Query: '{query}'\nExpected: {expected}\nGot: {red}\n"
except Exception as exc:
print(traceback.format_exc())
assert expected is None, str(exc)
@pytest.mark.parametrize(["query", "expected"], [
["label where (label = bag)",
{"field": "label", "where": {"field": "label", "comparator": "=", "value": "bag"}}],
["label where (label = bag or label.color = red)",
{"field": "label", "where": {"operator": "or", "items": [
{"field": "label", "comparator": "=", "value": "bag"},
{"field": "label.color", "comparator": "=", "value": "red"}
]}}],
])
def test_context(self, query, expected):
try:
res = query_string_parser.parse(query, grammar=Context)
res = query_object_formator.formator(res)
assert res == expected, f"Query: '{query}'\nExpected: {expected}\nGot: {red}\n"
except Exception as exc:
print(traceback.format_exc())
assert expected is None, str(exc)
@pytest.mark.parametrize(["query", "expected"], [
["aggreg: color",
{"type": "aggreg", "field": "color"}],
["aggreg: color label", None],
["aggrego: color", None],
["aggreg toto: color",
{"type": "aggreg", "field": "color", "name": "toto"}],
["aggreg: tag subaggreg by (distinct: .author.id)",
{"type": "aggreg", "field": "tag",
"subaggreg": {"by": {"type": "distinct", "field": ".author.id"}}}
],
["aggreg: date subaggreg by (sum: like)",
{"type": "aggreg", "field": "date",
"subaggreg": { "by": {"type": "sum", "field": "like"}}}
],
["aggreg: date subaggrego by (sum: like)", None],
["aggreg: date subaggreg by (sum: like) subaggreg by (distinct: author.id)", None],
["aggreg: tag size 5",
{"type": "aggreg", "field": "tag", "size": 5}],
["aggreg: tag sizeo 5", None],
["aggreg: tag size cinq", None],
["aggreg: date interval month",
{"type": "aggreg", "field": "date", "interval": "month"}],
["aggreg: date intervalo month", None],
["histogram: date",
{"type": "histogram", "field": "date"}],
["aggreg: image.color",
{"type": "aggreg", "field": "image.color"}],
["aggreg: image.tag.color",
{"type": "aggreg", "field": "image.tag.color"}],
["average: tag.score",
{"type": "average", "field": "tag.score"}],
["stats: tag.score",
{"type": "stats", "field": "tag.score"}],
["min: tag.score",
{"type": "min", "field": "tag.score"}],
["max: tag.score",
{"type": "max", "field": "tag.score"}],
["aggreg: color where label = bag",
{"type": "aggreg", "field": "color", "where":
{"field": "label", "comparator": "=", "value": "bag"}}],
["aggreg: color where (label = bag and model = foo)",
{"type": "aggreg", "field": "color", "where":
{"operator": "and", "items": [
{"field": "label", "comparator": "=", "value": "bag"},
{"field": "model", "comparator": "=", "value": "foo"},
]}}
],
["aggreg: color where (label = bag and model = foo", None],
["aggreg: label subaggreg texture (aggreg: texture) subaggreg color (aggreg: color)",
{"type": "aggreg", "field": "label", "subaggreg": {
"texture": {"type": "aggreg", "field": "texture"},
"color": {"type": "aggreg", "field": "color"},
}}],
["aggreg: label subaggreg color (aggreg: texture) subaggreg color (aggreg: color)", None]
])
def test_aggreg(self, query, expected):
try:
res = query_string_parser.parse(query, grammar=Aggreg)
res = query_object_formator.formator(res)
assert res == expected, f"Query: '{query}'\nExpected: {expected}\nGot: {red}\n"
except Exception as exc:
print(traceback.format_exc())
assert expected is None, str(exc)
@pytest.mark.parametrize(["query", "expected"], [
["sort: image.color",
{"field": "image.color"}],
["sort: color asc",
{"field": "color", "order": "asc"}],
["sort: color asco", None],
["sort: color mode min",
{"field": "color", "mode": "min"}],
["sort: color modez min", None],
["sort: color asc where color = red",
{"field": "color", "order": "asc", "where":
{"field": "color", "comparator": "=", "value": "red"}}],
["sort: color under label where label = bag",
{"field": "color", "under": "label", "where":
{"field": "label", "comparator": "=", "value": "bag"}}],
["sort: color undero label where label = bag", None],
["sort: color asc where (color = red and model = foo)",
{"field": "color", "order": "asc", "where":
{"operator": "and", "items": [
{"field": "color", "comparator": "=", "value": "red"},
{"field": "model", "comparator": "=", "value": "foo"},
]}}
],
["sort: color asc where (color = red and model = foo)", None]
])
def test_sort(self, query, expected):
try: | res = query_string_parser.parse(query, grammar=Sort) | 9 | 2023-10-16 09:03:13+00:00 | 8k |
Qualcomm-AI-research/outlier-free-transformers | transformers_language/models/quantized_opt.py | [
{
"identifier": "QuantEmbedding",
"path": "quantization/autoquant_utils.py",
"snippet": "class QuantEmbedding(QuantizationHijacker, nn.Embedding):\n def __init__(self, *args, activation=None, **kwargs):\n super().__init__(*args, activation=activation, **kwargs)\n # NB: We should not (re... | import random
import torch
import torch.nn as nn
from typing import List, Optional, Tuple, Union
from torch.nn import CrossEntropyLoss
from transformers.models.opt.modeling_opt import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
OPTDecoderLayer,
_expand_mask,
_make_causal_mask,
)
from quantization.autoquant_utils import (
QuantEmbedding,
QuantLayerNorm,
get_embedding_args,
quantize_model,
)
from quantization.base_quantized_classes import QuantizedActivation
from quantization.base_quantized_model import QuantizedModel
from transformers_language.models.bert_attention import AttentionGateType | 4,352 |
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# NOTE: scaling factor d**-0.5 can be absorbed into the query quantizer scale
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
# >> re-quantize QK^T
attn_weights = self.attn_scores_act_quantizer(attn_weights)
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = torch.max(
attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
# upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437
if attn_weights.dtype == torch.float16:
attn_weights = self.softmax_fn(attn_weights, dim=-1, dtype=torch.float32).to(
torch.float16
)
else:
attn_weights = self.softmax_fn(attn_weights, dim=-1)
# >> quantize output of the softmax
attn_weights = self.attn_probs_act_quantizer(attn_weights)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
# >> re-quantize P@V
attn_output = self.context_act_quantizer(attn_output)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
# attn_output - (B,H,T,d_head)
#
# *** Gating ***
| # Copyright (c) 2023 Qualcomm Technologies, Inc.
# All Rights Reserved.
class QuantizedOPTLearnedPositionalEmbedding(QuantizedModel):
def __init__(self, org_model, **quant_params):
super().__init__()
# copy attributes
self.offset = org_model.offset
# quantized embedding
embd_kw = get_embedding_args(org_model)
self.quant_embedding = QuantEmbedding(**embd_kw, **quant_params)
self.quant_embedding.weight.data = org_model.weight.data.clone()
def forward(self, attention_mask: torch.LongTensor, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
attention_mask = attention_mask.long()
# create positions depending on attention_mask
positions = (
torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask
).long() - 1
# cut positions if `past_key_values_length` is > 0
positions = positions[:, past_key_values_length:]
return self.quant_embedding(positions + self.offset)
class QuantizedOPTAttentionWithExtras(QuantizedModel):
def __init__(self, org_model, **quant_params):
super().__init__()
# copy attributes
self.embed_dim = org_model.embed_dim
self.num_heads = org_model.num_heads
self.dropout = org_model.dropout
self.head_dim = org_model.head_dim
self.scaling = org_model.scaling # d_head ** -0.5
self.is_decoder = org_model.is_decoder
# quantized modules
self.k_proj = quantize_model(org_model.k_proj, **quant_params)
self.v_proj = quantize_model(org_model.v_proj, **quant_params)
self.q_proj = quantize_model(org_model.q_proj, **quant_params)
self.out_proj = quantize_model(org_model.out_proj, **quant_params)
# activation quantizers
self.attn_scores_act_quantizer = QuantizedActivation(**quant_params)
self.attn_probs_act_quantizer = QuantizedActivation(**quant_params)
self.context_act_quantizer = QuantizedActivation(**quant_params)
# softmax fn
self.softmax_fn = org_model.softmax_fn
# attention gating
self.attn_gate_type = org_model.attn_gate_type
self.attn_gate_init = org_model.attn_gate_init
self.attn_gate_mlp = org_model.attn_gate_mlp
self.attn_gate_mlp2 = org_model.attn_gate_mlp2
self.attn_gate_linear_all_features = org_model.attn_gate_linear_all_features
self.alpha = org_model.alpha # do not quantize for now
self.gate_fn = org_model.gate_fn
self.pooling_fn = org_model.pooling_fn
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# NOTE: scaling factor d**-0.5 can be absorbed into the query quantizer scale
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
# >> re-quantize QK^T
attn_weights = self.attn_scores_act_quantizer(attn_weights)
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = torch.max(
attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
# upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437
if attn_weights.dtype == torch.float16:
attn_weights = self.softmax_fn(attn_weights, dim=-1, dtype=torch.float32).to(
torch.float16
)
else:
attn_weights = self.softmax_fn(attn_weights, dim=-1)
# >> quantize output of the softmax
attn_weights = self.attn_probs_act_quantizer(attn_weights)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
# >> re-quantize P@V
attn_output = self.context_act_quantizer(attn_output)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
# attn_output - (B,H,T,d_head)
#
# *** Gating *** | if self.attn_gate_type == AttentionGateType.unconditional_per_head: | 6 | 2023-10-23 15:59:50+00:00 | 8k |
QgZhan/ESVAE | main_fsvae.py | [
{
"identifier": "parse",
"path": "network_parser.py",
"snippet": "class parse(object):\r\n \"\"\"\r\n This class reads yaml parameter file and allows dictionary like access to the members.\r\n \"\"\"\r\n def __init__(self, path):\r\n with open(path, 'r') as file:\r\n self.p... | import os
import os.path
import random
import numpy as np
import logging
import argparse
import pycuda.driver as cuda
import torch
import torchvision
import global_v as glv
import svae_models.fsvae as fsvae
import metrics.inception_score as inception_score
import metrics.clean_fid as clean_fid
import metrics.autoencoder_fid as autoencoder_fid
from torch.utils.tensorboard import SummaryWriter
from network_parser import parse
from datasets import load_dataset_snn
from utils import aboutCudaDevices
from utils import AverageMeter
from utils import CountMulAddSNN
from svae_models.snn_layers import LIFSpike
| 4,873 | f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/test/epoch{epoch}_recons.png')
writer.add_images('Test/input_img', (real_img + 1) / 2, epoch)
writer.add_images('Test/recons_img', (x_recon + 1) / 2, epoch)
# break
logging.info(f"Test [{epoch}] Loss: {loss_meter.avg} ReconsLoss: {recons_meter.avg} DISTANCE: {dist_meter.avg}")
writer.add_scalar('Test/loss', loss_meter.avg, epoch)
writer.add_scalar('Test/recons_loss', recons_meter.avg, epoch)
writer.add_scalar('Test/distance', dist_meter.avg, epoch)
writer.add_scalar('Test/mean_q', mean_q_z.mean().item(), epoch)
writer.add_scalar('Test/mean_p', mean_p_z.mean().item(), epoch)
writer.add_scalar('Test/mul', count_mul_add.mul_sum.item() / len(testloader), epoch)
writer.add_scalar('Test/add', count_mul_add.add_sum.item() / len(testloader), epoch)
for handle in hook_handles:
handle.remove()
writer.add_image('Test/mean_sampled_z', mean_sampled_z.unsqueeze(0), epoch)
writer.add_histogram('Test/mean_sampled_z_distribution', mean_sampled_z.sum(-1), epoch)
mean_q_z = mean_q_z.permute(1, 0, 2) # # (k,C,T)
mean_p_z = mean_p_z.permute(1, 0, 2) # # (k,C,T)
writer.add_image(f'Test/mean_q_z', mean_q_z.mean(0).unsqueeze(0))
writer.add_image(f'Test/mean_p_z', mean_p_z.mean(0).unsqueeze(0))
return loss_meter.avg
def sample(network, epoch, batch_size=128):
network = network.eval()
with torch.no_grad():
sampled_x, sampled_z = network.sample(batch_size)
writer.add_images('Sample/sample_img', (sampled_x + 1) / 2, epoch)
writer.add_image('Sample/mean_sampled_z', sampled_z.mean(0).unsqueeze(0), epoch)
writer.add_histogram('Sample/mean_sampled_z_distribution', sampled_z.mean(0).sum(-1), epoch)
os.makedirs(f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/sample/', exist_ok=True)
torchvision.utils.save_image((sampled_x + 1) / 2, f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/sample/epoch{epoch}_sample.png')
def calc_inception_score(network, epoch, batch_size=256):
network = network.eval()
with torch.no_grad():
if (epoch % 5 == 0) or epoch == glv.network_config['epochs'] - 1:
batch_times = 10
else:
batch_times = 4
inception_mean, inception_std = inception_score.get_inception_score(network, device=init_device,
batch_size=batch_size,
batch_times=batch_times)
writer.add_scalar('Sample/inception_score_mean', inception_mean, epoch)
writer.add_scalar('Sample/inception_score_std', inception_std, epoch)
def calc_clean_fid(network, epoch):
network = network.eval()
with torch.no_grad():
num_gen = 5000
fid_score = clean_fid.get_clean_fid_score(network, glv.network_config['dataset'], init_device, num_gen)
writer.add_scalar('Sample/FID', fid_score, epoch)
def calc_autoencoder_frechet_distance(network, epoch):
network = network.eval()
if glv.network_config['dataset'] == "MNIST":
dataset = 'mnist'
elif glv.network_config['dataset'] == "FashionMNIST":
dataset = 'fashion'
elif glv.network_config['dataset'] == "CelebA":
dataset = 'celeba'
elif glv.network_config['dataset'] == "CIFAR10":
dataset = 'cifar10'
else:
raise ValueError()
with torch.no_grad():
fid_score = autoencoder_fid.get_autoencoder_frechet_distance(network, dataset, init_device, 5000)
writer.add_scalar('Sample/AutoencoderDist', fid_score, epoch)
def seed_all(seed=42):
"""
set random seed.
"""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if __name__ == '__main__':
seed_all()
parser = argparse.ArgumentParser()
parser.add_argument('-name', default='tmp', type=str)
parser.add_argument('-config', action='store', dest='config', help='The path of config file')
parser.add_argument('-checkpoint', action='store', dest='checkpoint',
help='The path of checkpoint, if use checkpoint')
parser.add_argument('-device', type=int)
parser.add_argument('-project_save_path', default='/data/zhan/FullySpikingVAE-master/', type=str)
try:
args = parser.parse_args()
except:
parser.print_help()
exit(0)
if args.config is None:
raise Exception('Unrecognized config file.')
if args.device is None:
init_device = torch.device("cuda:0")
else:
init_device = torch.device(f"cuda:{args.device}")
logging.info("start parsing settings")
|
max_accuracy = 0
min_loss = 1000
def add_hook(net):
count_mul_add = CountMulAddSNN()
hook_handles = []
for m in net.modules():
if isinstance(m, torch.nn.Conv3d) or isinstance(m, torch.nn.Linear) or isinstance(m,
torch.nn.ConvTranspose3d) or isinstance(
m, LIFSpike):
handle = m.register_forward_hook(count_mul_add)
hook_handles.append(handle)
return count_mul_add, hook_handles
def write_weight_hist(net, index):
for n, m in net.named_parameters():
root, name = os.path.splitext(n)
writer.add_histogram(root + '/' + name, m, index)
def train(network, trainloader, opti, epoch):
n_steps = glv.network_config['n_steps']
max_epoch = glv.network_config['epochs']
loss_meter = AverageMeter()
recons_meter = AverageMeter()
dist_meter = AverageMeter()
mean_q_z = 0
mean_p_z = 0
mean_sampled_z = 0
network = network.train()
for batch_idx, (real_img, labels) in enumerate(trainloader):
opti.zero_grad()
real_img = real_img.to(init_device, non_blocking=True)
labels = labels.to(init_device, non_blocking=True)
# direct spike input
spike_input = real_img.unsqueeze(-1).repeat(1, 1, 1, 1, n_steps) # (N, C, H, W, T)
x_recon, q_z, p_z, sampled_z = network(spike_input,
scheduled=network_config['scheduled']) # sampled_z(B, C, 1, 1, T)
# print("real_img: ", real_img.shape, real_img.max(), real_img.min(), real_img.mean())
# print("x_recon: ", x_recon.shape, x_recon.max(), x_recon.min(), x_recon.mean())
if network_config['loss_func'] == 'mmd':
losses = network.loss_function_mmd(real_img, x_recon, q_z, p_z)
elif network_config['loss_func'] == 'kld':
losses = network.loss_function_kld(real_img, x_recon, q_z, p_z)
else:
raise ValueError('unrecognized loss function')
losses['loss'].backward()
opti.step()
loss_meter.update(losses['loss'].detach().cpu().item())
recons_meter.update(losses['Reconstruction_Loss'].detach().cpu().item())
dist_meter.update(losses['Distance_Loss'].detach().cpu().item())
mean_q_z = (q_z.mean(0).detach().cpu() + batch_idx * mean_q_z) / (batch_idx + 1) # (C,k,T)
mean_p_z = (p_z.mean(0).detach().cpu() + batch_idx * mean_p_z) / (batch_idx + 1) # (C,k,T)
mean_sampled_z = (sampled_z.mean(0).detach().cpu() + batch_idx * mean_sampled_z) / (batch_idx + 1) # (C,T)
print(
f'Train[{epoch}/{max_epoch}] [{batch_idx}/{len(trainloader)}] Loss: {loss_meter.avg}, RECONS: {recons_meter.avg}, DISTANCE: {dist_meter.avg}')
if batch_idx == len(trainloader) - 1:
os.makedirs(f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/train/', exist_ok=True)
torchvision.utils.save_image((real_img + 1) / 2,
f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/train/epoch{epoch}_input.png')
torchvision.utils.save_image((x_recon + 1) / 2,
f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/train/epoch{epoch}_recons.png')
writer.add_images('Train/input_img', (real_img + 1) / 2, epoch)
writer.add_images('Train/recons_img', (x_recon + 1) / 2, epoch)
# break
logging.info(f"Train [{epoch}] Loss: {loss_meter.avg} ReconsLoss: {recons_meter.avg} DISTANCE: {dist_meter.avg}")
writer.add_scalar('Train/loss', loss_meter.avg, epoch)
writer.add_scalar('Train/recons_loss', recons_meter.avg, epoch)
writer.add_scalar('Train/distance', dist_meter.avg, epoch)
writer.add_scalar('Train/mean_q', mean_q_z.mean().item(), epoch)
writer.add_scalar('Train/mean_p', mean_p_z.mean().item(), epoch)
writer.add_image('Train/mean_sampled_z', mean_sampled_z.unsqueeze(0), epoch)
writer.add_histogram(f'Train/mean_sampled_z_distribution', mean_sampled_z.sum(-1), epoch)
mean_q_z = mean_q_z.permute(1, 0, 2) # (k,C,T)
mean_p_z = mean_p_z.permute(1, 0, 2) # (k,C,T)
writer.add_image(f'Train/mean_q_z', mean_q_z.mean(0).unsqueeze(0))
writer.add_image(f'Train/mean_p_z', mean_p_z.mean(0).unsqueeze(0))
return loss_meter.avg
def test(network, testloader, epoch):
n_steps = glv.network_config['n_steps']
max_epoch = glv.network_config['epochs']
loss_meter = AverageMeter()
recons_meter = AverageMeter()
dist_meter = AverageMeter()
mean_q_z = 0
mean_p_z = 0
mean_sampled_z = 0
count_mul_add, hook_handles = add_hook(net)
network = network.eval()
with torch.no_grad():
for batch_idx, (real_img, labels) in enumerate(testloader):
real_img = real_img.to(init_device, non_blocking=True)
labels = labels.to(init_device, non_blocking=True)
# direct spike input
spike_input = real_img.unsqueeze(-1).repeat(1, 1, 1, 1, n_steps) # (N,C,H,W,T)
x_recon, q_z, p_z, sampled_z = network(spike_input, scheduled=network_config['scheduled'])
if network_config['loss_func'] == 'mmd':
losses = network.loss_function_mmd(real_img, x_recon, q_z, p_z)
elif network_config['loss_func'] == 'kld':
losses = network.loss_function_kld(real_img, x_recon, q_z, p_z)
else:
raise ValueError('unrecognized loss function')
mean_q_z = (q_z.mean(0).detach().cpu() + batch_idx * mean_q_z) / (batch_idx + 1) # (C,k,T)
mean_p_z = (p_z.mean(0).detach().cpu() + batch_idx * mean_p_z) / (batch_idx + 1) # (C,k,T)
mean_sampled_z = (sampled_z.mean(0).detach().cpu() + batch_idx * mean_sampled_z) / (batch_idx + 1) # (C,T)
loss_meter.update(losses['loss'].detach().cpu().item())
recons_meter.update(losses['Reconstruction_Loss'].detach().cpu().item())
dist_meter.update(losses['Distance_Loss'].detach().cpu().item())
print(
f'Test[{epoch}/{max_epoch}] [{batch_idx}/{len(testloader)}] Loss: {loss_meter.avg}, RECONS: {recons_meter.avg}, DISTANCE: {dist_meter.avg}')
if batch_idx == len(testloader) - 1:
os.makedirs(f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/test/', exist_ok=True)
torchvision.utils.save_image((real_img + 1) / 2,
f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/test/epoch{epoch}_input.png')
torchvision.utils.save_image((x_recon + 1) / 2,
f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/test/epoch{epoch}_recons.png')
writer.add_images('Test/input_img', (real_img + 1) / 2, epoch)
writer.add_images('Test/recons_img', (x_recon + 1) / 2, epoch)
# break
logging.info(f"Test [{epoch}] Loss: {loss_meter.avg} ReconsLoss: {recons_meter.avg} DISTANCE: {dist_meter.avg}")
writer.add_scalar('Test/loss', loss_meter.avg, epoch)
writer.add_scalar('Test/recons_loss', recons_meter.avg, epoch)
writer.add_scalar('Test/distance', dist_meter.avg, epoch)
writer.add_scalar('Test/mean_q', mean_q_z.mean().item(), epoch)
writer.add_scalar('Test/mean_p', mean_p_z.mean().item(), epoch)
writer.add_scalar('Test/mul', count_mul_add.mul_sum.item() / len(testloader), epoch)
writer.add_scalar('Test/add', count_mul_add.add_sum.item() / len(testloader), epoch)
for handle in hook_handles:
handle.remove()
writer.add_image('Test/mean_sampled_z', mean_sampled_z.unsqueeze(0), epoch)
writer.add_histogram('Test/mean_sampled_z_distribution', mean_sampled_z.sum(-1), epoch)
mean_q_z = mean_q_z.permute(1, 0, 2) # # (k,C,T)
mean_p_z = mean_p_z.permute(1, 0, 2) # # (k,C,T)
writer.add_image(f'Test/mean_q_z', mean_q_z.mean(0).unsqueeze(0))
writer.add_image(f'Test/mean_p_z', mean_p_z.mean(0).unsqueeze(0))
return loss_meter.avg
def sample(network, epoch, batch_size=128):
network = network.eval()
with torch.no_grad():
sampled_x, sampled_z = network.sample(batch_size)
writer.add_images('Sample/sample_img', (sampled_x + 1) / 2, epoch)
writer.add_image('Sample/mean_sampled_z', sampled_z.mean(0).unsqueeze(0), epoch)
writer.add_histogram('Sample/mean_sampled_z_distribution', sampled_z.mean(0).sum(-1), epoch)
os.makedirs(f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/sample/', exist_ok=True)
torchvision.utils.save_image((sampled_x + 1) / 2, f'{args.project_save_path}/checkpoint/{dataset_name}/{args.name}/imgs/sample/epoch{epoch}_sample.png')
def calc_inception_score(network, epoch, batch_size=256):
network = network.eval()
with torch.no_grad():
if (epoch % 5 == 0) or epoch == glv.network_config['epochs'] - 1:
batch_times = 10
else:
batch_times = 4
inception_mean, inception_std = inception_score.get_inception_score(network, device=init_device,
batch_size=batch_size,
batch_times=batch_times)
writer.add_scalar('Sample/inception_score_mean', inception_mean, epoch)
writer.add_scalar('Sample/inception_score_std', inception_std, epoch)
def calc_clean_fid(network, epoch):
network = network.eval()
with torch.no_grad():
num_gen = 5000
fid_score = clean_fid.get_clean_fid_score(network, glv.network_config['dataset'], init_device, num_gen)
writer.add_scalar('Sample/FID', fid_score, epoch)
def calc_autoencoder_frechet_distance(network, epoch):
network = network.eval()
if glv.network_config['dataset'] == "MNIST":
dataset = 'mnist'
elif glv.network_config['dataset'] == "FashionMNIST":
dataset = 'fashion'
elif glv.network_config['dataset'] == "CelebA":
dataset = 'celeba'
elif glv.network_config['dataset'] == "CIFAR10":
dataset = 'cifar10'
else:
raise ValueError()
with torch.no_grad():
fid_score = autoencoder_fid.get_autoencoder_frechet_distance(network, dataset, init_device, 5000)
writer.add_scalar('Sample/AutoencoderDist', fid_score, epoch)
def seed_all(seed=42):
"""
set random seed.
"""
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if __name__ == '__main__':
seed_all()
parser = argparse.ArgumentParser()
parser.add_argument('-name', default='tmp', type=str)
parser.add_argument('-config', action='store', dest='config', help='The path of config file')
parser.add_argument('-checkpoint', action='store', dest='checkpoint',
help='The path of checkpoint, if use checkpoint')
parser.add_argument('-device', type=int)
parser.add_argument('-project_save_path', default='/data/zhan/FullySpikingVAE-master/', type=str)
try:
args = parser.parse_args()
except:
parser.print_help()
exit(0)
if args.config is None:
raise Exception('Unrecognized config file.')
if args.device is None:
init_device = torch.device("cuda:0")
else:
init_device = torch.device(f"cuda:{args.device}")
logging.info("start parsing settings")
| params = parse(args.config)
| 0 | 2023-10-23 07:33:27+00:00 | 8k |
iesl/softmax_CPR_recommend | recbole/data/dataloader/user_dataloader.py | [
{
"identifier": "AbstractDataLoader",
"path": "recbole/data/dataloader/abstract_dataloader.py",
"snippet": "class AbstractDataLoader:\n \"\"\":class:`AbstractDataLoader` is an abstract object which would return a batch of data which is loaded by\n :class:`~recbole.data.interaction.Interaction` whe... | import torch
from recbole.data.dataloader.abstract_dataloader import AbstractDataLoader
from recbole.data.interaction import Interaction | 3,602 | # @Time : 2020/9/23
# @Author : Yushuo Chen
# @Email : chenyushuo@ruc.edu.cn
# UPDATE
# @Time : 2020/9/23, 2020/12/28
# @Author : Yushuo Chen, Xingyu Pan
# @email : chenyushuo@ruc.edu.cn, panxy@ruc.edu.cn
"""
recbole.data.dataloader.user_dataloader
################################################
"""
class UserDataLoader(AbstractDataLoader):
""":class:`UserDataLoader` will return a batch of data which only contains user-id when it is iterated.
Args:
config (Config): The config of dataloader.
dataset (Dataset): The dataset of dataloader.
sampler (Sampler): The sampler of dataloader.
shuffle (bool, optional): Whether the dataloader will be shuffle after a round. Defaults to ``False``.
Attributes:
shuffle (bool): Whether the dataloader will be shuffle after a round.
However, in :class:`UserDataLoader`, it's guaranteed to be ``True``.
"""
def __init__(self, config, dataset, sampler, shuffle=False):
if shuffle is False:
shuffle = True
self.logger.warning('UserDataLoader must shuffle the data.')
self.uid_field = dataset.uid_field
| # @Time : 2020/9/23
# @Author : Yushuo Chen
# @Email : chenyushuo@ruc.edu.cn
# UPDATE
# @Time : 2020/9/23, 2020/12/28
# @Author : Yushuo Chen, Xingyu Pan
# @email : chenyushuo@ruc.edu.cn, panxy@ruc.edu.cn
"""
recbole.data.dataloader.user_dataloader
################################################
"""
class UserDataLoader(AbstractDataLoader):
""":class:`UserDataLoader` will return a batch of data which only contains user-id when it is iterated.
Args:
config (Config): The config of dataloader.
dataset (Dataset): The dataset of dataloader.
sampler (Sampler): The sampler of dataloader.
shuffle (bool, optional): Whether the dataloader will be shuffle after a round. Defaults to ``False``.
Attributes:
shuffle (bool): Whether the dataloader will be shuffle after a round.
However, in :class:`UserDataLoader`, it's guaranteed to be ``True``.
"""
def __init__(self, config, dataset, sampler, shuffle=False):
if shuffle is False:
shuffle = True
self.logger.warning('UserDataLoader must shuffle the data.')
self.uid_field = dataset.uid_field | self.user_list = Interaction({self.uid_field: torch.arange(dataset.user_num)}) | 1 | 2023-10-21 16:31:44+00:00 | 8k |
timapage/pyqt6-yolov8 | src/qt/stream/ai_worker.py | [
{
"identifier": "YoloDetector",
"path": "src/models/detection/yolov8_detector_onnx.py",
"snippet": "class YoloDetector(DetectorBase):\n def __init__(self):\n self._model = None\n \n def init(self, model_path, class_txt_path, confidence_threshold=0.3, iou_threshold=0.45):\n _class_... | from PyQt6.QtCore import QThread, pyqtSignal
from src.models.detection.yolov8_detector_onnx import YoloDetector
from src.models.pose.yolov8_pose_onnx import PoseDetector
from src.models.segmentation.yolov8_seg_onnx import YOLOSeg
from src.models.tracking.deep_sort.deep_sort import DeepSort
from src.data_type.video_buffer import LatestFrame
from src.utils.general import ROOT, add_image_id
import os | 4,306 |
class AiWorkerThread(QThread):
send_ai_output = pyqtSignal(list)
def __init__(self):
super(AiWorkerThread, self).__init__()
self.thread_name = "AiWorkerThread"
self.threadFlag = False
def set_start_config(self, ai_task, model_name="yolov8n", confidence_threshold=0.35, iou_threshold=0.45):
self.threadFlag = True
self.ai_task = ai_task
self.latest_frame = LatestFrame()
self.confi_thr = confidence_threshold
self.iou_thr = iou_threshold
self.model_name = model_name
self._init_yolo()
self._init_tracker()
def set_iou_threshold(self, iou_threshold):
self.iou_thr = iou_threshold
def set_confidence_threshold(self, confidence_threshold):
self.confi_thr = confidence_threshold
def set_model_name(self, model_name):
self.model_name = model_name
def _init_yolo(self):
if self.ai_task == "object_detection":
|
class AiWorkerThread(QThread):
send_ai_output = pyqtSignal(list)
def __init__(self):
super(AiWorkerThread, self).__init__()
self.thread_name = "AiWorkerThread"
self.threadFlag = False
def set_start_config(self, ai_task, model_name="yolov8n", confidence_threshold=0.35, iou_threshold=0.45):
self.threadFlag = True
self.ai_task = ai_task
self.latest_frame = LatestFrame()
self.confi_thr = confidence_threshold
self.iou_thr = iou_threshold
self.model_name = model_name
self._init_yolo()
self._init_tracker()
def set_iou_threshold(self, iou_threshold):
self.iou_thr = iou_threshold
def set_confidence_threshold(self, confidence_threshold):
self.confi_thr = confidence_threshold
def set_model_name(self, model_name):
self.model_name = model_name
def _init_yolo(self):
if self.ai_task == "object_detection": | self.detector = YoloDetector() | 0 | 2023-10-18 09:21:01+00:00 | 8k |
OthersideAI/self-operating-computer | operate/actions.py | [
{
"identifier": "Config",
"path": "operate/settings.py",
"snippet": "class Config:\n \"\"\"\n Configuration class for managing settings.\n\n Attributes:\n debug (bool): Flag indicating whether debug mode is enabled.\n openai_api_key (str): API key for OpenAI.\n google_api_k... | import os
import time
import json
import base64
import re
import io
import asyncio
import aiohttp
import google.generativeai as genai
from PIL import Image
from ultralytics import YOLO
from operate.settings import Config
from operate.exceptions import ModelNotRecognizedException
from operate.utils.screenshot import (
capture_screen_with_cursor,
add_grid_to_image,
capture_mini_screenshot_with_cursor,
)
from operate.utils.os import get_last_assistant_message
from operate.prompts import (
format_vision_prompt,
format_accurate_mode_vision_prompt,
format_summary_prompt,
format_decision_prompt,
format_label_prompt,
)
from operate.utils.label import (
add_labels,
parse_click_content,
get_click_position_in_percent,
get_label_coordinates,
)
from operate.utils.style import (
ANSI_GREEN,
ANSI_RED,
ANSI_RESET,
) | 6,399 | if model == "gpt-4-vision-preview":
with open(screenshot_filename, "rb") as img_file:
img_base64 = base64.b64encode(img_file.read()).decode("utf-8")
summary_message = {
"role": "user",
"content": [
{"type": "text", "text": summary_prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
},
],
}
messages.append(summary_message)
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=messages,
max_tokens=500,
)
content = response.choices[0].message.content
elif model == "gemini-pro-vision":
model = genai.GenerativeModel("gemini-pro-vision")
summary_message = model.generate_content(
[summary_prompt, Image.open(screenshot_filename)]
)
content = summary_message.text
return content
except Exception as e:
print(f"Error in summarize: {e}")
return "Failed to summarize the workflow"
async def call_gpt_4_v_labeled(messages, objective):
time.sleep(1)
try:
screenshots_dir = "screenshots"
if not os.path.exists(screenshots_dir):
os.makedirs(screenshots_dir)
screenshot_filename = os.path.join(screenshots_dir, "screenshot.png")
# Call the function to capture the screen with the cursor
capture_screen_with_cursor(screenshot_filename)
with open(screenshot_filename, "rb") as img_file:
img_base64 = base64.b64encode(img_file.read()).decode("utf-8")
previous_action = get_last_assistant_message(messages)
img_base64_labeled, img_base64_original, label_coordinates = add_labels(
img_base64, yolo_model
)
decision_prompt = format_decision_prompt(objective, previous_action)
labeled_click_prompt = format_label_prompt(objective)
click_message = {
"role": "user",
"content": [
{"type": "text", "text": labeled_click_prompt},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{img_base64_labeled}"
},
},
],
}
decision_message = {
"role": "user",
"content": [
{"type": "text", "text": decision_prompt},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{img_base64_original}"
},
},
],
}
click_messages = messages.copy()
click_messages.append(click_message)
decision_messages = messages.copy()
decision_messages.append(decision_message)
click_future = fetch_openai_response_async(click_messages)
decision_future = fetch_openai_response_async(decision_messages)
click_response, decision_response = await asyncio.gather(
click_future, decision_future
)
# Extracting the message content from the ChatCompletionMessage object
click_content = click_response.get("choices")[0].get("message").get("content")
decision_content = (
decision_response.get("choices")[0].get("message").get("content")
)
if not decision_content.startswith("CLICK"):
return decision_content
label_data = parse_click_content(click_content)
if label_data and "label" in label_data:
coordinates = get_label_coordinates(label_data["label"], label_coordinates)
image = Image.open(
io.BytesIO(base64.b64decode(img_base64))
) # Load the image to get its size
image_size = image.size # Get the size of the image (width, height)
click_position_percent = get_click_position_in_percent(
coordinates, image_size
)
if not click_position_percent:
print(
|
# Load configuration
config = Config()
client = config.initialize_openai_client()
yolo_model = YOLO("./operate/model/weights/best.pt") # Load your trained model
async def get_next_action(model, messages, objective):
if model == "gpt-4":
return call_gpt_4_v(messages, objective)
if model == "gpt-4-with-som":
return await call_gpt_4_v_labeled(messages, objective)
elif model == "agent-1":
return "coming soon"
elif model == "gemini-pro-vision":
return call_gemini_pro_vision(messages, objective)
raise ModelNotRecognizedException(model)
def call_gpt_4_v(messages, objective):
"""
Get the next action for Self-Operating Computer
"""
# sleep for a second
time.sleep(1)
try:
screenshots_dir = "screenshots"
if not os.path.exists(screenshots_dir):
os.makedirs(screenshots_dir)
screenshot_filename = os.path.join(screenshots_dir, "screenshot.png")
# Call the function to capture the screen with the cursor
capture_screen_with_cursor(screenshot_filename)
new_screenshot_filename = os.path.join(
"screenshots", "screenshot_with_grid.png"
)
add_grid_to_image(screenshot_filename, new_screenshot_filename, 500)
# sleep for a second
time.sleep(1)
with open(new_screenshot_filename, "rb") as img_file:
img_base64 = base64.b64encode(img_file.read()).decode("utf-8")
previous_action = get_last_assistant_message(messages)
vision_prompt = format_vision_prompt(objective, previous_action)
vision_message = {
"role": "user",
"content": [
{"type": "text", "text": vision_prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
},
],
}
# create a copy of messages and save to pseudo_messages
pseudo_messages = messages.copy()
pseudo_messages.append(vision_message)
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=pseudo_messages,
presence_penalty=1,
frequency_penalty=1,
temperature=0.7,
max_tokens=300,
)
messages.append(
{
"role": "user",
"content": "`screenshot.png`",
}
)
content = response.choices[0].message.content
return content
except Exception as e:
print(f"Error parsing JSON: {e}")
return "Failed take action after looking at the screenshot"
def call_gemini_pro_vision(messages, objective):
"""
Get the next action for Self-Operating Computer using Gemini Pro Vision
"""
# sleep for a second
time.sleep(1)
try:
screenshots_dir = "screenshots"
if not os.path.exists(screenshots_dir):
os.makedirs(screenshots_dir)
screenshot_filename = os.path.join(screenshots_dir, "screenshot.png")
# Call the function to capture the screen with the cursor
capture_screen_with_cursor(screenshot_filename)
new_screenshot_filename = os.path.join(
"screenshots", "screenshot_with_grid.png"
)
add_grid_to_image(screenshot_filename, new_screenshot_filename, 500)
# sleep for a second
time.sleep(1)
previous_action = get_last_assistant_message(messages)
vision_prompt = format_vision_prompt(objective, previous_action)
model = genai.GenerativeModel("gemini-pro-vision")
response = model.generate_content(
[vision_prompt, Image.open(new_screenshot_filename)]
)
# create a copy of messages and save to pseudo_messages
pseudo_messages = messages.copy()
pseudo_messages.append(response.text)
messages.append(
{
"role": "user",
"content": "`screenshot.png`",
}
)
content = response.text[1:]
return content
except Exception as e:
print(f"Error parsing JSON: {e}")
return "Failed take action after looking at the screenshot"
# This function is not used. `-accurate` mode was removed for now until a new PR fixes it.
def accurate_mode_double_check(model, pseudo_messages, prev_x, prev_y):
"""
Reprompt OAI with additional screenshot of a mini screenshot centered around the cursor for further finetuning of clicked location
"""
try:
screenshot_filename = os.path.join("screenshots", "screenshot_mini.png")
capture_mini_screenshot_with_cursor(
file_path=screenshot_filename, x=prev_x, y=prev_y
)
new_screenshot_filename = os.path.join(
"screenshots", "screenshot_mini_with_grid.png"
)
with open(new_screenshot_filename, "rb") as img_file:
img_base64 = base64.b64encode(img_file.read()).decode("utf-8")
accurate_vision_prompt = format_accurate_mode_vision_prompt(prev_x, prev_y)
accurate_mode_message = {
"role": "user",
"content": [
{"type": "text", "text": accurate_vision_prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
},
],
}
pseudo_messages.append(accurate_mode_message)
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=pseudo_messages,
presence_penalty=1,
frequency_penalty=1,
temperature=0.7,
max_tokens=300,
)
content = response.choices[0].message.content
except Exception as e:
print(f"Error reprompting model for accurate_mode: {e}")
return "ERROR"
def summarize(model, messages, objective):
try:
screenshots_dir = "screenshots"
if not os.path.exists(screenshots_dir):
os.makedirs(screenshots_dir)
screenshot_filename = os.path.join(screenshots_dir, "summary_screenshot.png")
# Call the function to capture the screen with the cursor
capture_screen_with_cursor(screenshot_filename)
summary_prompt = format_summary_prompt(objective)
if model == "gpt-4-vision-preview":
with open(screenshot_filename, "rb") as img_file:
img_base64 = base64.b64encode(img_file.read()).decode("utf-8")
summary_message = {
"role": "user",
"content": [
{"type": "text", "text": summary_prompt},
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_base64}"},
},
],
}
messages.append(summary_message)
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=messages,
max_tokens=500,
)
content = response.choices[0].message.content
elif model == "gemini-pro-vision":
model = genai.GenerativeModel("gemini-pro-vision")
summary_message = model.generate_content(
[summary_prompt, Image.open(screenshot_filename)]
)
content = summary_message.text
return content
except Exception as e:
print(f"Error in summarize: {e}")
return "Failed to summarize the workflow"
async def call_gpt_4_v_labeled(messages, objective):
time.sleep(1)
try:
screenshots_dir = "screenshots"
if not os.path.exists(screenshots_dir):
os.makedirs(screenshots_dir)
screenshot_filename = os.path.join(screenshots_dir, "screenshot.png")
# Call the function to capture the screen with the cursor
capture_screen_with_cursor(screenshot_filename)
with open(screenshot_filename, "rb") as img_file:
img_base64 = base64.b64encode(img_file.read()).decode("utf-8")
previous_action = get_last_assistant_message(messages)
img_base64_labeled, img_base64_original, label_coordinates = add_labels(
img_base64, yolo_model
)
decision_prompt = format_decision_prompt(objective, previous_action)
labeled_click_prompt = format_label_prompt(objective)
click_message = {
"role": "user",
"content": [
{"type": "text", "text": labeled_click_prompt},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{img_base64_labeled}"
},
},
],
}
decision_message = {
"role": "user",
"content": [
{"type": "text", "text": decision_prompt},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{img_base64_original}"
},
},
],
}
click_messages = messages.copy()
click_messages.append(click_message)
decision_messages = messages.copy()
decision_messages.append(decision_message)
click_future = fetch_openai_response_async(click_messages)
decision_future = fetch_openai_response_async(decision_messages)
click_response, decision_response = await asyncio.gather(
click_future, decision_future
)
# Extracting the message content from the ChatCompletionMessage object
click_content = click_response.get("choices")[0].get("message").get("content")
decision_content = (
decision_response.get("choices")[0].get("message").get("content")
)
if not decision_content.startswith("CLICK"):
return decision_content
label_data = parse_click_content(click_content)
if label_data and "label" in label_data:
coordinates = get_label_coordinates(label_data["label"], label_coordinates)
image = Image.open(
io.BytesIO(base64.b64decode(img_base64))
) # Load the image to get its size
image_size = image.size # Get the size of the image (width, height)
click_position_percent = get_click_position_in_percent(
coordinates, image_size
)
if not click_position_percent:
print( | f"{ANSI_GREEN}[Self-Operating Computer]{ANSI_RED}[Error] Failed to get click position in percent. Trying another method {ANSI_RESET}" | 15 | 2023-11-04 03:13:45+00:00 | 8k |
netease-youdao/EmotiVoice | models/prompt_tts_modified/model_open_source.py | [
{
"identifier": "Encoder",
"path": "models/prompt_tts_modified/modules/encoder.py",
"snippet": "class Encoder(torch.nn.Module):\n def __init__(\n self,\n attention_dim=256,\n attention_heads=4,\n linear_units=2048,\n num_blocks=6,\n dropout_rate=0.1,\n ... | import torch
import torch.nn as nn
from models.prompt_tts_modified.modules.encoder import Encoder
from models.prompt_tts_modified.modules.variance import DurationPredictor, VariancePredictor
from models.prompt_tts_modified.modules.alignment import AlignmentModule, GaussianUpsampling, viterbi_decode, average_by_duration
from models.prompt_tts_modified.modules.initialize import initialize | 3,874 | """
This code is modified from https://github.com/espnet/espnet.
"""
class PromptTTS(nn.Module):
def __init__(self, config) -> None:
super().__init__()
self.encoder = Encoder(
attention_dim=config.model.encoder_n_hidden,
attention_heads=config.model.encoder_n_heads,
linear_units=config.model.encoder_n_hidden * 4,
num_blocks=config.model.encoder_n_layers,
dropout_rate=config.model.encoder_p_dropout,
positional_dropout_rate=config.model.encoder_p_dropout,
attention_dropout_rate=config.model.encoder_p_dropout,
normalize_before=True,
concat_after=False,
positionwise_conv_kernel_size=config.model.encoder_kernel_size_conv_mod,
stochastic_depth_rate=0.0,
)
self.decoder = Encoder(
attention_dim=config.model.decoder_n_hidden,
attention_heads=config.model.decoder_n_heads,
linear_units=config.model.decoder_n_hidden * 4,
num_blocks=config.model.decoder_n_layers,
dropout_rate=config.model.decoder_p_dropout,
positional_dropout_rate=config.model.decoder_p_dropout,
attention_dropout_rate=config.model.decoder_p_dropout,
normalize_before=True,
concat_after=False,
positionwise_conv_kernel_size=config.model.decoder_kernel_size_conv_mod,
stochastic_depth_rate=0.0,
)
self.duration_predictor = DurationPredictor(
idim=config.model.encoder_n_hidden,
n_layers=config.model.duration_n_layers,
n_chans=config.model.variance_n_hidden,
kernel_size=config.model.duration_kernel_size,
dropout_rate=config.model.duration_p_dropout,
)
| """
This code is modified from https://github.com/espnet/espnet.
"""
class PromptTTS(nn.Module):
def __init__(self, config) -> None:
super().__init__()
self.encoder = Encoder(
attention_dim=config.model.encoder_n_hidden,
attention_heads=config.model.encoder_n_heads,
linear_units=config.model.encoder_n_hidden * 4,
num_blocks=config.model.encoder_n_layers,
dropout_rate=config.model.encoder_p_dropout,
positional_dropout_rate=config.model.encoder_p_dropout,
attention_dropout_rate=config.model.encoder_p_dropout,
normalize_before=True,
concat_after=False,
positionwise_conv_kernel_size=config.model.encoder_kernel_size_conv_mod,
stochastic_depth_rate=0.0,
)
self.decoder = Encoder(
attention_dim=config.model.decoder_n_hidden,
attention_heads=config.model.decoder_n_heads,
linear_units=config.model.decoder_n_hidden * 4,
num_blocks=config.model.decoder_n_layers,
dropout_rate=config.model.decoder_p_dropout,
positional_dropout_rate=config.model.decoder_p_dropout,
attention_dropout_rate=config.model.decoder_p_dropout,
normalize_before=True,
concat_after=False,
positionwise_conv_kernel_size=config.model.decoder_kernel_size_conv_mod,
stochastic_depth_rate=0.0,
)
self.duration_predictor = DurationPredictor(
idim=config.model.encoder_n_hidden,
n_layers=config.model.duration_n_layers,
n_chans=config.model.variance_n_hidden,
kernel_size=config.model.duration_kernel_size,
dropout_rate=config.model.duration_p_dropout,
)
| self.pitch_predictor = VariancePredictor( | 2 | 2023-11-08 10:15:27+00:00 | 8k |
S-LoRA/S-LoRA | slora/models/llama2/layer_infer/transformer_layer_infer.py | [
{
"identifier": "Llama2TransformerLayerWeight",
"path": "slora/models/llama2/layer_weights/transformer_layer_weight.py",
"snippet": "class Llama2TransformerLayerWeight(LlamaTransformerLayerWeight):\n def __init__(self, layer_num, tp_rank, world_size, data_type, network_config, mode=[]):\n supe... | import torch
import torch.functional as F
import torch.distributed as dist
import numpy as np
import triton
from slora.models.llama2.layer_weights.transformer_layer_weight import Llama2TransformerLayerWeight
from slora.models.llama2.triton_kernel.context_flashattention_nopad import context_attention_fwd
from slora.models.llama2.triton_kernel.token_attention_nopad_att1 import token_att_fwd
from slora.models.llama2.triton_kernel.token_attention_nopad_softmax import token_softmax_fwd
from slora.models.llama2.triton_kernel.token_attention_nopad_reduceV import token_att_fwd2
from slora.models.llama.infer_struct import LlamaInferStateInfo
from slora.models.llama.layer_infer.transformer_layer_infer import LlamaTransformerLayerInfer
from slora.models.llama2.triton_kernel.token_attention_softmax_and_reducev import token_softmax_reducev_fwd | 4,763 |
class Llama2TransformerLayerInfer(LlamaTransformerLayerInfer):
def __init__(self, layer_num, tp_rank, world_size, network_config, mode=[]):
super().__init__(layer_num, tp_rank, world_size, network_config, mode)
key_value_head_num_ = network_config["num_key_value_heads"]
assert key_value_head_num_ % self.world_size_ == 0
self.tp_k_head_num_ = key_value_head_num_ // self.world_size_
self.tp_v_head_num_ = key_value_head_num_ // self.world_size_
return
# gqa attention
def _context_attention_kernel(self, q, k, v, infer_state: LlamaInferStateInfo, layer_weight:Llama2TransformerLayerWeight) -> torch.Tensor:
o_tensor = torch.empty_like(q)
|
class Llama2TransformerLayerInfer(LlamaTransformerLayerInfer):
def __init__(self, layer_num, tp_rank, world_size, network_config, mode=[]):
super().__init__(layer_num, tp_rank, world_size, network_config, mode)
key_value_head_num_ = network_config["num_key_value_heads"]
assert key_value_head_num_ % self.world_size_ == 0
self.tp_k_head_num_ = key_value_head_num_ // self.world_size_
self.tp_v_head_num_ = key_value_head_num_ // self.world_size_
return
# gqa attention
def _context_attention_kernel(self, q, k, v, infer_state: LlamaInferStateInfo, layer_weight:Llama2TransformerLayerWeight) -> torch.Tensor:
o_tensor = torch.empty_like(q) | context_attention_fwd(q.view(-1, self.tp_q_head_num_, self.head_dim_), | 1 | 2023-11-05 04:08:36+00:00 | 8k |
Yuliang-Liu/Monkey | data_generation/grit/grit/modeling/roi_heads/grit_roi_heads.py | [
{
"identifier": "GRiTFastRCNNOutputLayers",
"path": "data_generation/grit/grit/modeling/roi_heads/grit_fast_rcnn.py",
"snippet": "class GRiTFastRCNNOutputLayers(FastRCNNOutputLayers):\n @configurable\n def __init__(\n self, \n input_shape: ShapeSpec,\n **kwargs,\n ):\n ... | import math
import torch
import logging
from typing import Dict, List, Optional, Tuple, Union
from detectron2.config import configurable
from detectron2.structures import Boxes, Instances, pairwise_iou
from detectron2.utils.events import get_event_storage
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.roi_heads.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
from detectron2.modeling.roi_heads.cascade_rcnn import CascadeROIHeads, _ScaleGradient
from detectron2.modeling.poolers import ROIPooler
from detectron2.layers import batched_nms
from .grit_fast_rcnn import GRiTFastRCNNOutputLayers
from ..text.text_decoder import TransformerDecoderTextualHead, GRiTTextDecoder, AutoRegressiveBeamSearch
from ..text.load_text_token import LoadTextTokens
from transformers import BertTokenizer
from grit.data.custom_dataset_mapper import ObjDescription
from ..soft_nms import batched_soft_nms | 7,024 |
logger = logging.getLogger(__name__)
@ROI_HEADS_REGISTRY.register()
class GRiTROIHeadsAndTextDecoder(CascadeROIHeads):
@configurable
def __init__(
self,
*,
text_decoder_transformer,
train_task: list,
test_task: str,
mult_proposal_score: bool = False,
mask_weight: float = 1.0,
object_feat_pooler=None,
soft_nms_enabled=False,
beam_size=1,
**kwargs,
):
super().__init__(**kwargs)
self.mult_proposal_score = mult_proposal_score
self.mask_weight = mask_weight
self.object_feat_pooler = object_feat_pooler
self.soft_nms_enabled = soft_nms_enabled
self.test_task = test_task
self.beam_size = beam_size
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
self.tokenizer = tokenizer
assert test_task in train_task, 'GRiT has not been trained on {} task, ' \
'please verify the task name or train a new ' \
'GRiT on {} task'.format(test_task, test_task)
task_begin_tokens = {}
for i, task in enumerate(train_task):
if i == 0:
task_begin_tokens[task] = tokenizer.cls_token_id
else:
task_begin_tokens[task] = 103 + i
self.task_begin_tokens = task_begin_tokens
|
logger = logging.getLogger(__name__)
@ROI_HEADS_REGISTRY.register()
class GRiTROIHeadsAndTextDecoder(CascadeROIHeads):
@configurable
def __init__(
self,
*,
text_decoder_transformer,
train_task: list,
test_task: str,
mult_proposal_score: bool = False,
mask_weight: float = 1.0,
object_feat_pooler=None,
soft_nms_enabled=False,
beam_size=1,
**kwargs,
):
super().__init__(**kwargs)
self.mult_proposal_score = mult_proposal_score
self.mask_weight = mask_weight
self.object_feat_pooler = object_feat_pooler
self.soft_nms_enabled = soft_nms_enabled
self.test_task = test_task
self.beam_size = beam_size
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
self.tokenizer = tokenizer
assert test_task in train_task, 'GRiT has not been trained on {} task, ' \
'please verify the task name or train a new ' \
'GRiT on {} task'.format(test_task, test_task)
task_begin_tokens = {}
for i, task in enumerate(train_task):
if i == 0:
task_begin_tokens[task] = tokenizer.cls_token_id
else:
task_begin_tokens[task] = 103 + i
self.task_begin_tokens = task_begin_tokens
| beamsearch_decode = AutoRegressiveBeamSearch( | 3 | 2023-11-09 14:31:48+00:00 | 8k |
disler/multi-agent-postgres-data-analytics | postgres_da_ai_agent/main.py | [
{
"identifier": "PostgresAgentInstruments",
"path": "postgres_da_ai_agent/agents/instruments.py",
"snippet": "class PostgresAgentInstruments(AgentInstruments):\n \"\"\"\n Unified Toolset for the Postgres Data Analytics Multi-Agent System\n\n Advantages:\n - All agents have access to the ... | import os
import dotenv
import argparse
import autogen
from postgres_da_ai_agent.agents.instruments import PostgresAgentInstruments
from postgres_da_ai_agent.modules.db import PostgresManager
from postgres_da_ai_agent.modules import llm
from postgres_da_ai_agent.modules import orchestrator
from postgres_da_ai_agent.modules import rand
from postgres_da_ai_agent.modules import file
from postgres_da_ai_agent.modules import embeddings
from postgres_da_ai_agent.agents import agents
from postgres_da_ai_agent.types import ConversationResult | 4,763 | """
Heads up: in v7 pyautogen doesn't work with the latest openai version so this file has been commented out via pyproject.toml
"""
# ---------------- Your Environment Variables ----------------
dotenv.load_dotenv()
assert os.environ.get("DATABASE_URL"), "POSTGRES_CONNECTION_URL not found in .env file"
assert os.environ.get(
"OPENAI_API_KEY"
), "POSTGRES_CONNECTION_URL not found in .env file"
# ---------------- Constants ----------------
DB_URL = os.environ.get("DATABASE_URL")
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
POSTGRES_TABLE_DEFINITIONS_CAP_REF = "TABLE_DEFINITIONS"
def main():
# ---------------- Parse '--prompt' CLI Parameter ----------------
parser = argparse.ArgumentParser()
parser.add_argument("--prompt", help="The prompt for the AI")
args = parser.parse_args()
if not args.prompt:
print("Please provide a prompt")
return
raw_prompt = args.prompt
prompt = f"Fulfill this database query: {raw_prompt}. "
session_id = rand.generate_session_id(raw_prompt)
# ---------------- Create Agent Instruments And Build Database Connection ----------------
with PostgresAgentInstruments(DB_URL, session_id) as (agent_instruments, db):
# ----------- Gate Team: Prevent bad prompts from running and burning your $$$ -------------
gate_orchestrator = agents.build_team_orchestrator(
"scrum_master",
agent_instruments,
validate_results=lambda: (True, ""),
)
gate_orchestrator: ConversationResult = (
gate_orchestrator.sequential_conversation(prompt)
)
print("gate_orchestrator.last_message_str", gate_orchestrator.last_message_str)
nlq_confidence = int(gate_orchestrator.last_message_str)
match nlq_confidence:
case (1 | 2):
print(f"❌ Gate Team Rejected - Confidence too low: {nlq_confidence}")
return
case (3 | 4 | 5):
print(f"✅ Gate Team Approved - Valid confidence: {nlq_confidence}")
case _:
print("❌ Gate Team Rejected - Invalid response")
return
# -------- BUILD TABLE DEFINITIONS -----------
map_table_name_to_table_def = db.get_table_definition_map_for_embeddings()
| """
Heads up: in v7 pyautogen doesn't work with the latest openai version so this file has been commented out via pyproject.toml
"""
# ---------------- Your Environment Variables ----------------
dotenv.load_dotenv()
assert os.environ.get("DATABASE_URL"), "POSTGRES_CONNECTION_URL not found in .env file"
assert os.environ.get(
"OPENAI_API_KEY"
), "POSTGRES_CONNECTION_URL not found in .env file"
# ---------------- Constants ----------------
DB_URL = os.environ.get("DATABASE_URL")
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
POSTGRES_TABLE_DEFINITIONS_CAP_REF = "TABLE_DEFINITIONS"
def main():
# ---------------- Parse '--prompt' CLI Parameter ----------------
parser = argparse.ArgumentParser()
parser.add_argument("--prompt", help="The prompt for the AI")
args = parser.parse_args()
if not args.prompt:
print("Please provide a prompt")
return
raw_prompt = args.prompt
prompt = f"Fulfill this database query: {raw_prompt}. "
session_id = rand.generate_session_id(raw_prompt)
# ---------------- Create Agent Instruments And Build Database Connection ----------------
with PostgresAgentInstruments(DB_URL, session_id) as (agent_instruments, db):
# ----------- Gate Team: Prevent bad prompts from running and burning your $$$ -------------
gate_orchestrator = agents.build_team_orchestrator(
"scrum_master",
agent_instruments,
validate_results=lambda: (True, ""),
)
gate_orchestrator: ConversationResult = (
gate_orchestrator.sequential_conversation(prompt)
)
print("gate_orchestrator.last_message_str", gate_orchestrator.last_message_str)
nlq_confidence = int(gate_orchestrator.last_message_str)
match nlq_confidence:
case (1 | 2):
print(f"❌ Gate Team Rejected - Confidence too low: {nlq_confidence}")
return
case (3 | 4 | 5):
print(f"✅ Gate Team Approved - Valid confidence: {nlq_confidence}")
case _:
print("❌ Gate Team Rejected - Invalid response")
return
# -------- BUILD TABLE DEFINITIONS -----------
map_table_name_to_table_def = db.get_table_definition_map_for_embeddings()
| database_embedder = embeddings.DatabaseEmbedder() | 6 | 2023-11-04 20:15:46+00:00 | 8k |
OpenBMB/ProAgent | ProAgent/n8n_tester/run_node.py | [
{
"identifier": "credentials",
"path": "ProAgent/n8n_tester/credential_loader.py",
"snippet": "class Credentials():\n def __init__(self, base_file_path= \"./ProAgent/n8n_tester/credentials\"):\n def get_workflow_id(self) -> str:\n def query(self, node_type):"
},
{
"identifier": "n8nPyth... | import subprocess
import tempfile
import json
import traceback
import uuid
from typing import Optional
from termcolor import colored
from ProAgent.n8n_tester.credential_loader import credentials
from ProAgent.n8n_parser.node import n8nPythonNode, n8nNodeMeta
from ProAgent.n8n_tester.pseudo_node.run_pseudo_node import run_pseudo_workflow
from ProAgent.utils import NodeType
from ProAgent.n8n_tester.prompts import success_prompt, error_prompt | 4,156 | "index": 0
}
]
]
}
})
workflow_nodes = [node_trigger,node_code, node_var]
workflow_versionId = str(uuid.uuid4())
workflow_name = "Simple Workflow"
workflow = {
# "id": workflow_id,
"versionId": workflow_versionId,
"name": workflow_name,
"nodes": workflow_nodes,
"connections": workflow_connection,
"active": False,
"settings": {
"executionOrder": "v1"
},
"tags": []
}
return workflow
def run_node(node: n8nPythonNode, input_data: list[dict] = [{}]) -> tuple[str, str]:
"""Execute a specified node.
Args:
workflow_id (Optional[str], optional): ID of the workflow in which the node is located. The workflow ID must be in your n8n workflow database. You could create a workflow and pick that id. If not provided, the default workflow will be used. Defaults to None.
node (Optional[dict], optional): n8n node json dictionary. If not provided, the default slack send message node will be used. Defaults to None.
input_data (list[dict], optional): Input data for the node. Defaults to [{}].
Returns:
tuple[str, str]: A tuple containing two strings. The first string represents the status of the node execution (e.g., "success", "failure"), and the second string provides additional information or error messages related to the execution.
"""
# problem: execute parallelly
constant_workflow = _get_constant_workflow(input_data=input_data)
constant_workflow["id"] = credentials.get_workflow_id()
node_var = constant_workflow["nodes"][-1]
node_var["type"] = "n8n-nodes-base." + node.node_meta.integration_name
if credentials.query(node.node_meta.integration_name) != None:
credential_item = credentials.query(node.node_meta.integration_name)
node_var["credentials"] = {
credential_item["type"]: {
"id": credential_item["id"],
"name": credential_item["name"],
}
}
param_json = {}
for key, value in node.params.items():
param = value.to_json()
if param != None:
param_json[key] = param
if 'json' in input_data[0].keys():
node_var['parameters'] = input_data[0]['json']
node_var["parameters"].update(param_json)
else:
node_var["parameters"] = param_json
node_var["parameters"]["operation"] = node.node_meta.operation_name
node_var["parameters"]["resource"] = node.node_meta.resource_name
if node.node_meta.integration_name == 'slack':
node_var["parameters"]["authentication"] = "oAuth2"
if node.node_meta.integration_name == 'googleSheets':
node_var["parameters"]["operation"] = node.node_meta.operation_name
node_var["typeVersion"] = 4
node_var["parameters"]["columns"] = {
"mappingMode": "autoMapInputData",
"value": {},
"matchingColumns": [
"id"
]
}
# handle workflow
if 'pseudoNode' in node.node_json.keys() and node.node_json['pseudoNode']:
try:
# import pdb; pdb.set_trace()
output = run_pseudo_workflow(input_data, constant_workflow)
error= ""
except BaseException as e:
traceback.print_exc()
print(e)
raise e
else:
temp_file = tempfile.NamedTemporaryFile(delete=False, mode="w", suffix=".json")
json.dump(constant_workflow, temp_file)
temp_file.close()
temp_file_path = temp_file.name
# import pdb; pdb.set_trace()
result = subprocess.run(["n8n", "execute", "--file", temp_file_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Get the standard output
output = result.stdout.decode('utf-8')
error = result.stderr.decode('utf-8')
print(colored("###OUTPUT###", color="green"))
print(colored(output, color="green"))
print(colored("###ERROR###", color="red"))
print(colored(error, color="red"))
output_data = ""
error = ""
# check input data
if input_data == None or len(input_data) == 0:
warning_prompt = "WARNING: There is nothing in input_data. This may cause the failure of current node execution.\n"
print(colored(warning_prompt, color='yellow'))
output_data += warning_prompt
|
class n8nRunningException(Exception):
"""封装报错类型,可以重载自己定义的错误类型,只需要说出来 error-message 比如:
1.mainWorkflow只能由trigger调用
2.所有action node的输入都是[{}]格式的
"""
def __init__(self, message ):
super().__init__(message)
self.code_stack = []
self.error_message = ""
def add_context_stack(self, code_context):
"""
Adds a code context to the code stack.
Parameters:
code_context (any): The code context to be added to the stack.
Returns:
None
"""
self.code_stack.append(code_context)
pass
class anonymous_class():
def __init__(self, node: n8nPythonNode,*args, **kwargs):
self.node = node
def run(self, input_data, params):
"""
Run the function with the given input data and parameters.
Args:
input_data (any): The input data for the function.
params (dict): The parameters for the function.
Returns:
any: The output data from the function.
Raises:
n8nRunningException: If there is an error while running the function.
"""
output_data, error = run_node(node=self.node, input_data=input_data)
if error != "":
my_error = n8nRunningException(error)
raise my_error
else:
return output_data
def _get_constant_workflow(input_data):
"""
Generates a constant workflow based on the provided input data.
Parameters:
input_data (Any): The input data to be used in the workflow.
Returns:
Dict: The generated workflow.
"""
# node trigger
node_trigger_id = str(uuid.uuid4())
node_trigger = {
"id": node_trigger_id,
"name": "Execute Workflow Trigger",
"type": "n8n-nodes-base.executeWorkflowTrigger",
"typeVersion": 1,
"position": [0, 0],
"parameters": {}
}
node_trigger_name = str(node_trigger["name"])
# node code
node_code_id = str(uuid.uuid4())
node_code_jsCode = f"return {json.dumps(input_data)}"
node_code = {
"id": node_code_id,
"name": "Code",
"type": "n8n-nodes-base.code",
"typeVersion": 2,
"position": [180, 0],
"parameters": {
"jsCode": node_code_jsCode
}
}
node_code_name = str(node_code["name"])
node_var = {
"id": str(uuid.uuid4()),
"name": "node_var",
"position": [360, 0],
}
workflow_connection = dict({
node_trigger_name: {
"main": [
[
{
"node": node_code_name,
"type": "main",
"index": 0
}
]
]
},
node_code_name: {
"main": [
[
{
"node": node_var["name"],
"type": "main",
"index": 0
}
]
]
}
})
workflow_nodes = [node_trigger,node_code, node_var]
workflow_versionId = str(uuid.uuid4())
workflow_name = "Simple Workflow"
workflow = {
# "id": workflow_id,
"versionId": workflow_versionId,
"name": workflow_name,
"nodes": workflow_nodes,
"connections": workflow_connection,
"active": False,
"settings": {
"executionOrder": "v1"
},
"tags": []
}
return workflow
def run_node(node: n8nPythonNode, input_data: list[dict] = [{}]) -> tuple[str, str]:
"""Execute a specified node.
Args:
workflow_id (Optional[str], optional): ID of the workflow in which the node is located. The workflow ID must be in your n8n workflow database. You could create a workflow and pick that id. If not provided, the default workflow will be used. Defaults to None.
node (Optional[dict], optional): n8n node json dictionary. If not provided, the default slack send message node will be used. Defaults to None.
input_data (list[dict], optional): Input data for the node. Defaults to [{}].
Returns:
tuple[str, str]: A tuple containing two strings. The first string represents the status of the node execution (e.g., "success", "failure"), and the second string provides additional information or error messages related to the execution.
"""
# problem: execute parallelly
constant_workflow = _get_constant_workflow(input_data=input_data)
constant_workflow["id"] = credentials.get_workflow_id()
node_var = constant_workflow["nodes"][-1]
node_var["type"] = "n8n-nodes-base." + node.node_meta.integration_name
if credentials.query(node.node_meta.integration_name) != None:
credential_item = credentials.query(node.node_meta.integration_name)
node_var["credentials"] = {
credential_item["type"]: {
"id": credential_item["id"],
"name": credential_item["name"],
}
}
param_json = {}
for key, value in node.params.items():
param = value.to_json()
if param != None:
param_json[key] = param
if 'json' in input_data[0].keys():
node_var['parameters'] = input_data[0]['json']
node_var["parameters"].update(param_json)
else:
node_var["parameters"] = param_json
node_var["parameters"]["operation"] = node.node_meta.operation_name
node_var["parameters"]["resource"] = node.node_meta.resource_name
if node.node_meta.integration_name == 'slack':
node_var["parameters"]["authentication"] = "oAuth2"
if node.node_meta.integration_name == 'googleSheets':
node_var["parameters"]["operation"] = node.node_meta.operation_name
node_var["typeVersion"] = 4
node_var["parameters"]["columns"] = {
"mappingMode": "autoMapInputData",
"value": {},
"matchingColumns": [
"id"
]
}
# handle workflow
if 'pseudoNode' in node.node_json.keys() and node.node_json['pseudoNode']:
try:
# import pdb; pdb.set_trace()
output = run_pseudo_workflow(input_data, constant_workflow)
error= ""
except BaseException as e:
traceback.print_exc()
print(e)
raise e
else:
temp_file = tempfile.NamedTemporaryFile(delete=False, mode="w", suffix=".json")
json.dump(constant_workflow, temp_file)
temp_file.close()
temp_file_path = temp_file.name
# import pdb; pdb.set_trace()
result = subprocess.run(["n8n", "execute", "--file", temp_file_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Get the standard output
output = result.stdout.decode('utf-8')
error = result.stderr.decode('utf-8')
print(colored("###OUTPUT###", color="green"))
print(colored(output, color="green"))
print(colored("###ERROR###", color="red"))
print(colored(error, color="red"))
output_data = ""
error = ""
# check input data
if input_data == None or len(input_data) == 0:
warning_prompt = "WARNING: There is nothing in input_data. This may cause the failure of current node execution.\n"
print(colored(warning_prompt, color='yellow'))
output_data += warning_prompt
| if success_prompt in output: | 5 | 2023-11-03 01:20:14+00:00 | 8k |
LLaVA-VL/LLaVA-Plus-Codebase | llava/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if al... | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .custom_embedding import SharedEmbedding
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 6,833 | """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction | self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device) | 3 | 2023-11-07 13:06:02+00:00 | 8k |
bobby-he/simplified_transformers | simplified_transformers/train_utils.py | [
{
"identifier": "myGPT2Attention",
"path": "simplified_transformers/model_utils.py",
"snippet": "class myGPT2Attention(nn.Module):\n \"\"\"\n A customisable Attn sub-block that can implement Shaped Attention, and identity value/projection weights.\n \"\"\"\n def __init__(self, config, is_cro... | import torch
import wandb
from transformers import Trainer
from transformers.trainer_pt_utils import get_parameter_names
from .model_utils import myGPT2Attention, myGPT2MLP, MyConv1D, RMSNorm | 4,522 |
class MyTrainer(Trainer):
def create_optimizer(self):
"""
Identical to standard HF AdamW optimizer, but with no WD for gain parameters.
"""
opt_model = self.model
if self.optimizer is None:
decay_parameters = get_parameter_names(
opt_model, [torch.nn.LayerNorm, RMSNorm]
)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
gain_parameters = [name for name in decay_parameters if "gain" in name]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in opt_model.named_parameters()
if (
n in decay_parameters
and n not in gain_parameters
and p.requires_grad
)
],
"weight_decay": self.args.weight_decay,
},
{
"params": [
p
for n, p in opt_model.named_parameters()
if (n in gain_parameters and p.requires_grad)
],
"weight_decay": 0.0,
},
{
"params": [
p
for n, p in opt_model.named_parameters()
if (n not in decay_parameters and p.requires_grad)
],
"weight_decay": 0.0,
},
]
optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(
self.args
)
self.optimizer = optimizer_cls(
optimizer_grouped_parameters, **optimizer_kwargs
)
return self.optimizer
def compute_loss(self, model, inputs, return_outputs=False):
"""
Identical to HF transformers compute_loss, but with extra logging.
"""
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if isinstance(outputs, dict) and "loss" not in outputs:
raise ValueError(
"The model did not return a loss from the inputs, only the following keys: "
f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}."
)
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
if self.state.global_step % 100 == 0 and "wandb" in self.args.report_to:
if self.args.report_gains:
to_report = {}
for i, block in enumerate(model.transformer.h):
if type(block.mlp) is myGPT2MLP:
to_report[
f"{i}.mlp_block_resid_gain"
] = block.mlp_block_resid_gain.data.norm()
if type(block.attn.v_attn) is MyConv1D:
to_report[
f"attn.{i}.value_skip_gain"
] = block.attn.v_attn.skip_gain.data
to_report[
f"attn.{i}.value_resid_gain"
] = block.attn.v_attn.resid_gain.data
if type(block.attn.c_proj) is MyConv1D:
to_report[
f"attn.{i}.proj_skip_gain"
] = block.attn.c_proj.skip_gain.data
to_report[
f"attn.{i}.proj_resid_gain"
] = block.attn.c_proj.resid_gain.data
|
class MyTrainer(Trainer):
def create_optimizer(self):
"""
Identical to standard HF AdamW optimizer, but with no WD for gain parameters.
"""
opt_model = self.model
if self.optimizer is None:
decay_parameters = get_parameter_names(
opt_model, [torch.nn.LayerNorm, RMSNorm]
)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
gain_parameters = [name for name in decay_parameters if "gain" in name]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in opt_model.named_parameters()
if (
n in decay_parameters
and n not in gain_parameters
and p.requires_grad
)
],
"weight_decay": self.args.weight_decay,
},
{
"params": [
p
for n, p in opt_model.named_parameters()
if (n in gain_parameters and p.requires_grad)
],
"weight_decay": 0.0,
},
{
"params": [
p
for n, p in opt_model.named_parameters()
if (n not in decay_parameters and p.requires_grad)
],
"weight_decay": 0.0,
},
]
optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(
self.args
)
self.optimizer = optimizer_cls(
optimizer_grouped_parameters, **optimizer_kwargs
)
return self.optimizer
def compute_loss(self, model, inputs, return_outputs=False):
"""
Identical to HF transformers compute_loss, but with extra logging.
"""
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if isinstance(outputs, dict) and "loss" not in outputs:
raise ValueError(
"The model did not return a loss from the inputs, only the following keys: "
f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}."
)
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
if self.state.global_step % 100 == 0 and "wandb" in self.args.report_to:
if self.args.report_gains:
to_report = {}
for i, block in enumerate(model.transformer.h):
if type(block.mlp) is myGPT2MLP:
to_report[
f"{i}.mlp_block_resid_gain"
] = block.mlp_block_resid_gain.data.norm()
if type(block.attn.v_attn) is MyConv1D:
to_report[
f"attn.{i}.value_skip_gain"
] = block.attn.v_attn.skip_gain.data
to_report[
f"attn.{i}.value_resid_gain"
] = block.attn.v_attn.resid_gain.data
if type(block.attn.c_proj) is MyConv1D:
to_report[
f"attn.{i}.proj_skip_gain"
] = block.attn.c_proj.skip_gain.data
to_report[
f"attn.{i}.proj_resid_gain"
] = block.attn.c_proj.resid_gain.data | if type(block.attn) is myGPT2Attention: | 0 | 2023-11-01 14:28:43+00:00 | 8k |
garibida/cross-image-attention | models/stable_diffusion.py | [
{
"identifier": "Range",
"path": "config.py",
"snippet": "class Range(NamedTuple):\n start: int\n end: int"
},
{
"identifier": "FreeUUNet2DConditionModel",
"path": "models/unet_2d_condition.py",
"snippet": "class FreeUUNet2DConditionModel(UNet2DConditionModel):\n\n def forward(\... | from typing import Any, Callable, Dict, List, Optional, Union
from diffusers import StableDiffusionPipeline
from diffusers.models import AutoencoderKL
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import rescale_noise_cfg
from diffusers.schedulers import KarrasDiffusionSchedulers
from tqdm import tqdm
from transformers import CLIPTextModel, CLIPTokenizer, CLIPImageProcessor
from config import Range
from models.unet_2d_condition import FreeUUNet2DConditionModel
import numpy as np
import torch | 3,932 |
class CrossImageAttentionStableDiffusionPipeline(StableDiffusionPipeline):
""" A modification of the standard StableDiffusionPipeline to incorporate our cross-image attention."""
def __init__(self, vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
|
class CrossImageAttentionStableDiffusionPipeline(StableDiffusionPipeline):
""" A modification of the standard StableDiffusionPipeline to incorporate our cross-image attention."""
def __init__(self, vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer, | unet: FreeUUNet2DConditionModel, | 1 | 2023-11-04 19:28:41+00:00 | 8k |
ForceFledgling/proxyhub | examples/find_and_use.py | [
{
"identifier": "Broker",
"path": "proxyhub/api.py",
"snippet": "class Broker:\n \"\"\"The Broker.\n\n | One broker to rule them all, one broker to find them,\n | One broker to bring them all and in the darkness bind them.\n\n :param asyncio.Queue queue: (optional) Queue of found/checked pro... | import asyncio
import aiohttp
from urllib.parse import urlparse
from proxyhub import Broker, ProxyPool
from proxyhub.errors import NoProxyError | 5,761 | """Find working proxies and use them concurrently.
Note: Pay attention to Broker.serve(), instead of the code listed below.
Perhaps it will be much useful and friendlier.
"""
async def fetch(url, proxy_pool, timeout, loop):
resp, proxy = None, None
try:
print('Waiting a proxy...')
proxy = await proxy_pool.get(scheme=urlparse(url).scheme)
print('Found proxy:', proxy)
proxy_url = 'http://%s:%d' % (proxy.host, proxy.port)
_timeout = aiohttp.ClientTimeout(total=timeout)
async with aiohttp.ClientSession(
timeout=_timeout, loop=loop
) as session, session.get(url, proxy=proxy_url) as response:
resp = await response.text()
except (
aiohttp.errors.ClientOSError,
aiohttp.errors.ClientResponseError,
aiohttp.errors.ServerDisconnectedError,
asyncio.TimeoutError,
NoProxyError,
) as e:
print('Error!\nURL: %s;\nError: %r\n', url, e)
finally:
if proxy:
proxy_pool.put(proxy)
return (url, resp)
async def get_pages(urls, proxy_pool, timeout=10, loop=None):
tasks = [fetch(url, proxy_pool, timeout, loop) for url in urls]
for task in asyncio.as_completed(tasks):
url, content = await task
print('%s\nDone!\nURL: %s;\nContent: %s' % ('-' * 20, url, content))
def main():
loop = asyncio.get_event_loop()
proxies = asyncio.Queue()
proxy_pool = ProxyPool(proxies)
judges = [
'http://httpbin.org/get?show_env',
'https://httpbin.org/get?show_env',
]
providers = [
'http://www.proxylists.net/',
'http://ipaddress.com/proxy-list/',
'https://www.sslproxies.org/',
]
| """Find working proxies and use them concurrently.
Note: Pay attention to Broker.serve(), instead of the code listed below.
Perhaps it will be much useful and friendlier.
"""
async def fetch(url, proxy_pool, timeout, loop):
resp, proxy = None, None
try:
print('Waiting a proxy...')
proxy = await proxy_pool.get(scheme=urlparse(url).scheme)
print('Found proxy:', proxy)
proxy_url = 'http://%s:%d' % (proxy.host, proxy.port)
_timeout = aiohttp.ClientTimeout(total=timeout)
async with aiohttp.ClientSession(
timeout=_timeout, loop=loop
) as session, session.get(url, proxy=proxy_url) as response:
resp = await response.text()
except (
aiohttp.errors.ClientOSError,
aiohttp.errors.ClientResponseError,
aiohttp.errors.ServerDisconnectedError,
asyncio.TimeoutError,
NoProxyError,
) as e:
print('Error!\nURL: %s;\nError: %r\n', url, e)
finally:
if proxy:
proxy_pool.put(proxy)
return (url, resp)
async def get_pages(urls, proxy_pool, timeout=10, loop=None):
tasks = [fetch(url, proxy_pool, timeout, loop) for url in urls]
for task in asyncio.as_completed(tasks):
url, content = await task
print('%s\nDone!\nURL: %s;\nContent: %s' % ('-' * 20, url, content))
def main():
loop = asyncio.get_event_loop()
proxies = asyncio.Queue()
proxy_pool = ProxyPool(proxies)
judges = [
'http://httpbin.org/get?show_env',
'https://httpbin.org/get?show_env',
]
providers = [
'http://www.proxylists.net/',
'http://ipaddress.com/proxy-list/',
'https://www.sslproxies.org/',
]
| broker = Broker( | 0 | 2023-11-05 13:28:57+00:00 | 8k |
WithSecureLabs/IceKube | icekube/icekube.py | [
{
"identifier": "attack_paths",
"path": "icekube/attack_paths.py",
"snippet": "WORKLOAD_TYPES = [\n \"ReplicationController\",\n \"DaemonSet\",\n \"Deployment\",\n \"ReplicaSet\",\n \"StatefulSet\",\n \"CronJob\",\n \"Job\",\n]\ndef create_workload_query(workloads: List[str] = WORKL... | import logging
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from typing import List, Optional
from icekube.attack_paths import attack_paths
from icekube.kube import (
all_resources,
api_resources,
context_name,
kube_version,
)
from icekube.models import Cluster, Signer
from icekube.models.base import Resource
from icekube.neo4j import create, find, get, get_driver
from neo4j import BoltDriver
from tqdm import tqdm | 4,531 |
logger = logging.getLogger(__name__)
def create_indices():
for resource in api_resources():
if "list" not in resource.verbs:
continue
kind = resource.kind
namespace = resource.namespaced
cmd = f"CREATE INDEX {kind.lower()} IF NOT EXISTS "
cmd += f"FOR (n:{kind}) ON (n.name"
if namespace:
cmd += ", n.namespace"
cmd += ")"
with get_driver().session() as session:
session.run(cmd)
def enumerate_resource_kind(
ignore: Optional[List[str]] = None,
):
if ignore is None:
ignore = []
with get_driver().session() as session:
cluster = Cluster(apiVersion="N/A", name=context_name(), version=kube_version())
cmd, kwargs = create(cluster)
session.run(cmd, **kwargs)
signers = [
"kubernetes.io/kube-apiserver-client",
"kubernetes.io/kube-apiserver-client-kubelet",
"kubernetes.io/kubelet-serving",
"kubernetes.io/legacy-unknown",
]
for signer in signers:
s = Signer(name=signer)
cmd, kwargs = create(s)
session.run(cmd, **kwargs)
for resource in all_resources(ignore=ignore):
cmd, kwargs = create(resource)
session.run(cmd, **kwargs)
def relationship_generator(
driver: BoltDriver,
initial: bool,
resource: Resource,
):
with driver.session() as session:
logger.info(f"Generating relationships for {resource}")
for source, relationship, target in resource.relationships(initial):
if isinstance(source, Resource):
src_cmd, src_kwargs = get(source, prefix="src")
else:
src_cmd = source[0].format(prefix="src")
src_kwargs = {f"src_{key}": value for key, value in source[1].items()}
if isinstance(target, Resource):
dst_cmd, dst_kwargs = get(target, prefix="dst")
else:
dst_cmd = target[0].format(prefix="dst")
dst_kwargs = {f"dst_{key}": value for key, value in target[1].items()}
cmd = src_cmd + "WITH src " + dst_cmd
if isinstance(relationship, str):
relationship = [relationship]
cmd += "".join(f"MERGE (src)-[:{x}]->(dst) " for x in relationship)
kwargs = {**src_kwargs, **dst_kwargs}
logger.debug(f"Starting neo4j query: {cmd}, {kwargs}")
session.run(cmd, kwargs)
def generate_relationships(threaded: bool = False) -> None:
logger.info("Generating relationships")
logger.info("Fetching resources from neo4j")
driver = get_driver()
|
logger = logging.getLogger(__name__)
def create_indices():
for resource in api_resources():
if "list" not in resource.verbs:
continue
kind = resource.kind
namespace = resource.namespaced
cmd = f"CREATE INDEX {kind.lower()} IF NOT EXISTS "
cmd += f"FOR (n:{kind}) ON (n.name"
if namespace:
cmd += ", n.namespace"
cmd += ")"
with get_driver().session() as session:
session.run(cmd)
def enumerate_resource_kind(
ignore: Optional[List[str]] = None,
):
if ignore is None:
ignore = []
with get_driver().session() as session:
cluster = Cluster(apiVersion="N/A", name=context_name(), version=kube_version())
cmd, kwargs = create(cluster)
session.run(cmd, **kwargs)
signers = [
"kubernetes.io/kube-apiserver-client",
"kubernetes.io/kube-apiserver-client-kubelet",
"kubernetes.io/kubelet-serving",
"kubernetes.io/legacy-unknown",
]
for signer in signers:
s = Signer(name=signer)
cmd, kwargs = create(s)
session.run(cmd, **kwargs)
for resource in all_resources(ignore=ignore):
cmd, kwargs = create(resource)
session.run(cmd, **kwargs)
def relationship_generator(
driver: BoltDriver,
initial: bool,
resource: Resource,
):
with driver.session() as session:
logger.info(f"Generating relationships for {resource}")
for source, relationship, target in resource.relationships(initial):
if isinstance(source, Resource):
src_cmd, src_kwargs = get(source, prefix="src")
else:
src_cmd = source[0].format(prefix="src")
src_kwargs = {f"src_{key}": value for key, value in source[1].items()}
if isinstance(target, Resource):
dst_cmd, dst_kwargs = get(target, prefix="dst")
else:
dst_cmd = target[0].format(prefix="dst")
dst_kwargs = {f"dst_{key}": value for key, value in target[1].items()}
cmd = src_cmd + "WITH src " + dst_cmd
if isinstance(relationship, str):
relationship = [relationship]
cmd += "".join(f"MERGE (src)-[:{x}]->(dst) " for x in relationship)
kwargs = {**src_kwargs, **dst_kwargs}
logger.debug(f"Starting neo4j query: {cmd}, {kwargs}")
session.run(cmd, kwargs)
def generate_relationships(threaded: bool = False) -> None:
logger.info("Generating relationships")
logger.info("Fetching resources from neo4j")
driver = get_driver() | resources = find() | 9 | 2023-11-02 13:54:21+00:00 | 8k |
IAAR-Shanghai/UHGEval | run_uhgeval_future.py | [
{
"identifier": "XinhuaHallucinations",
"path": "uhgeval/dataset/xinhua.py",
"snippet": "class XinhuaHallucinations(BaseDataset):\n def __init__(self, path: str, shuffle: bool = False, seed: int = 22):\n self.data = []\n if os.path.isfile(path):\n with open(path, encoding='ut... | import sys
import argparse
from loguru import logger
from uhgeval.dataset.xinhua import XinhuaHallucinations
from uhgeval.evaluator.discriminative import (
DiscriminativeEvaluatorKeywordLevel,
DiscriminativeEvaluatorSentenceLevel
)
from uhgeval.evaluator.generative import GenerativeEvaluator
from uhgeval.evaluator.selective import SelectiveEvaluator
from uhgeval.core.analyst import save_overalls, save_overalls_by_type
from uhgeval.core.experiment import experiment_in_blocks
from uhgeval.llm.api import (
Baichuan2_53B_Chat,
GPT,
)
from uhgeval.llm.remote import (
Aquila_34B_Chat,
Baichuan2_13B_Chat,
ChatGLM2_6B_Chat,
InternLM_20B_Chat,
Xinyu_7B_Chat,
Xinyu_70B_Chat,
Qwen_14B_Chat,
GPT_transit,
) | 6,917 | # @Author : Shichao Song
# @Email : song.shichao@outlook.com
def parse_args(arguments: str = None):
parser = argparse.ArgumentParser(description='UHGEval: Benchmarking the Hallucination of Chinese Large Language Models via Unconstrained Generation')
parser.add_argument('--seed', dest='seed', type=int, default=22, help='Random seed')
parser.add_argument('--enable-log-saving', dest='enable_log_saving', default=False, action='store_true', help='Enable log saving')
parser.add_argument('--dataset-path', dest='dataset_path', default='data/Xinhua/XinhuaHallucinations.json', help='Path to the dataset')
parser.add_argument('--llms', dest='llms', nargs='+', default=['GPT'], help='List of LLMs to be evaluated')
parser.add_argument('--evaluators', dest='evaluators', nargs='+', default=['DiscriminativeEvaluatorKeywordLevel', 'DiscriminativeEvaluatorSentenceLevel', 'GenerativeEvaluator', 'SelectiveEvaluator'], help='List of evaluators to use')
parser.add_argument('--processes', dest='processes', type=int, default=3, help='Number of processes for the experiment')
parser.add_argument('--num-blocks', dest='num_blocks', type=int, default=1700, help='Number of blocks for the experiment')
parser.add_argument('--start-block', dest='start_block', type=int, default=0, help='Starting block number')
parser.add_argument('--save-results', dest='save_results', default=True, action='store_true', help='Save experiment results')
return parser.parse_args()
# TODO: Currently, this script does not support initialize llm parameters
def run(args):
logger.remove() # Remove all logger handlers including the stderr logger handler
logger.add(sys.stderr, level=40) # Update stderr logger
logger.add('logs/uhgeval_{time}.log', level=0) if args.enable_log_saving else ...
# TODO: Currently, loguru does not support log settings above when using the 'spawn' method in multiprocessing.
| # @Author : Shichao Song
# @Email : song.shichao@outlook.com
def parse_args(arguments: str = None):
parser = argparse.ArgumentParser(description='UHGEval: Benchmarking the Hallucination of Chinese Large Language Models via Unconstrained Generation')
parser.add_argument('--seed', dest='seed', type=int, default=22, help='Random seed')
parser.add_argument('--enable-log-saving', dest='enable_log_saving', default=False, action='store_true', help='Enable log saving')
parser.add_argument('--dataset-path', dest='dataset_path', default='data/Xinhua/XinhuaHallucinations.json', help='Path to the dataset')
parser.add_argument('--llms', dest='llms', nargs='+', default=['GPT'], help='List of LLMs to be evaluated')
parser.add_argument('--evaluators', dest='evaluators', nargs='+', default=['DiscriminativeEvaluatorKeywordLevel', 'DiscriminativeEvaluatorSentenceLevel', 'GenerativeEvaluator', 'SelectiveEvaluator'], help='List of evaluators to use')
parser.add_argument('--processes', dest='processes', type=int, default=3, help='Number of processes for the experiment')
parser.add_argument('--num-blocks', dest='num_blocks', type=int, default=1700, help='Number of blocks for the experiment')
parser.add_argument('--start-block', dest='start_block', type=int, default=0, help='Starting block number')
parser.add_argument('--save-results', dest='save_results', default=True, action='store_true', help='Save experiment results')
return parser.parse_args()
# TODO: Currently, this script does not support initialize llm parameters
def run(args):
logger.remove() # Remove all logger handlers including the stderr logger handler
logger.add(sys.stderr, level=40) # Update stderr logger
logger.add('logs/uhgeval_{time}.log', level=0) if args.enable_log_saving else ...
# TODO: Currently, loguru does not support log settings above when using the 'spawn' method in multiprocessing.
| dataset = XinhuaHallucinations(args.dataset_path, shuffle=True, seed=args.seed).load() | 0 | 2023-11-06 11:46:22+00:00 | 8k |
mobiusml/hqq | hqq/models/vllm/llama.py | [
{
"identifier": "BasePatch",
"path": "hqq/models/base.py",
"snippet": "class BasePatch():\n\t#Override these OR override the main patch_model() function\n\t############################################\n\t#This method iterates through layers of the model that are NOT nn.Linear and processes them via new_... | from typing import Any, Dict, List, Optional, Tuple
from torch import nn
from transformers import LlamaConfig
from vllm.model_executor.input_metadata import InputMetadata
from vllm.model_executor.layers.activation import SiluAndMul
from vllm.model_executor.layers.attention import PagedAttentionWithRoPE
from vllm.model_executor.layers.layernorm import RMSNorm
from vllm.model_executor.layers.linear import (LinearMethodBase,
MergedColumnParallelLinear,
QKVParallelLinear,
RowParallelLinear)
from vllm.model_executor.layers.sampler import Sampler
from vllm.model_executor.layers.vocab_parallel_embedding import (
VocabParallelEmbedding, ParallelLMHead)
from vllm.model_executor.parallel_utils.parallel_state import (
get_tensor_model_parallel_world_size)
from vllm.model_executor.weight_utils import (default_weight_loader,
hf_model_weights_iterator)
from vllm.sequence import SamplerOutput
from tqdm import tqdm
from ..base import BasePatch
from .base import BaseHQQVLLMModel
import torch
import gc
import transformers | 4,799 |
class LlamaModel(nn.Module):
def __init__(self, config: LlamaConfig, linear_method: Optional[LinearMethodBase] = None,) -> None:
super().__init__()
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = VocabParallelEmbedding(config.vocab_size,config.hidden_size,)
self.layers = nn.ModuleList([LlamaDecoderLayer(config, linear_method) for _ in range(config.num_hidden_layers)])
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
kv_caches: List[KVCache],
input_metadata: InputMetadata,
cache_events: Optional[List[torch.cuda.Event]],
) -> torch.Tensor:
hidden_states = self.embed_tokens(input_ids)
residual = None
for i in range(len(self.layers)):
cache_event = None if cache_events is None else cache_events[i]
layer = self.layers[i]
hidden_states, residual = layer(positions, hidden_states, kv_caches[i], input_metadata, cache_event, residual)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class LlamaForCausalLM(nn.Module):
def __init__(self, config: LlamaConfig, linear_method: Optional[LinearMethodBase] = None, dummy_load: bool = True) -> None:
super().__init__()
self.config = config
self.linear_method = linear_method
#Dummy loading Added
self.dummy_load = dummy_load
if(self.dummy_load): return
self.model = LlamaModel(config, linear_method)
self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size)
self.sampler = Sampler(config.vocab_size)
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata, cache_events: Optional[List[torch.cuda.Event]]) -> SamplerOutput:
if(self.dummy_load): return torch.empty([0]) #Added
hidden_states = self.model(input_ids, positions, kv_caches, input_metadata, cache_events)
next_tokens = self.sampler(self.lm_head.weight, hidden_states, input_metadata)
return next_tokens
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str] = None, load_format: str = "auto", revision: Optional[str] = None):
if(self.dummy_load): return #Added
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(self.named_parameters())
for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision):
if "rotary_emb.inv_freq" in name:
continue
for (param_name, weight_name, shard_id) in stacked_params_mapping:
if weight_name not in name:
continue
param = params_dict[name.replace(weight_name, param_name)]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
#############################################################################################################################################
#############################################################################################################################################
class LLamaPatch(BasePatch):
@classmethod
def get_linear_tags(cls):
return ['self_attn.qkv_proj', 'self_attn.o_proj', 'mlp.gate_up_proj', 'mlp.down_proj']
@classmethod
def patch_nonlinearlayers(cls, model, patch_fct, verbose=True):
base_model = model.model
model.sampler = patch_fct(model.sampler)
model.lm_head = patch_fct(model.lm_head)
base_model.embed_tokens = patch_fct(base_model.embed_tokens)
base_model.norm = patch_fct(base_model.norm)
layers = base_model.layers
for i in tqdm(range(len(base_model.layers)), disable=not verbose):
#rotary embed
layers[i].self_attn.attn.rotary_emb.cos_sin_cache = torch.nn.Parameter(layers[i].self_attn.attn.rotary_emb.cos_sin_cache, requires_grad=False)
layers[i].self_attn.attn.rotary_emb = patch_fct(layers[i].self_attn.attn.rotary_emb)
layers[i].mlp.act_fn = patch_fct(layers[i].mlp.act_fn)
layers[i].input_layernorm = patch_fct(layers[i].input_layernorm)
layers[i].post_attention_layernorm = patch_fct(layers[i].post_attention_layernorm)
@classmethod
def patch_linearlayers(cls, model, patch_fct, patch_params, verbose=True):
base_model = model.model
layers = base_model.layers
for i in tqdm(range(len(layers)), disable=not verbose):
layers[i].self_attn.qkv_proj = patch_fct(layers[i].self_attn.qkv_proj, patch_params['self_attn.qkv_proj'])
layers[i].self_attn.o_proj = patch_fct(layers[i].self_attn.o_proj, patch_params['self_attn.o_proj'])
layers[i].mlp.gate_up_proj = patch_fct(layers[i].mlp.gate_up_proj, patch_params['mlp.gate_up_proj'])
layers[i].mlp.down_proj = patch_fct(layers[i].mlp.down_proj, patch_params['mlp.down_proj'])
#from ..models.hf.base import init_empty_weights
| # coding=utf-8
# Adapted from
# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py
# Copyright 2023 The vLLM team.
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference-only LLaMA model compatible with HuggingFace weights."""
KVCache = Tuple[torch.Tensor, torch.Tensor]
class LlamaMLP(nn.Module):
def __init__(self, hidden_size: int, intermediate_size: int, hidden_act: str, linear_method: Optional[LinearMethodBase] = None,) -> None:
super().__init__()
##############################################################################################
self.gate_up_proj = MergedColumnParallelLinear(hidden_size, [intermediate_size] * 2, bias=False, linear_method=linear_method)
self.down_proj = RowParallelLinear(intermediate_size, hidden_size, bias=False, linear_method=linear_method)
self.gate_up_proj = self.gate_up_proj.cpu()
self.down_proj = self.down_proj.cpu()
torch.cuda.empty_cache()
##############################################################################################
if hidden_act != "silu":
raise ValueError(f"Unsupported activation: {hidden_act}. ""Only silu is supported for now.")
self.act_fn = SiluAndMul()
def forward(self, x):
gate_up, _ = self.gate_up_proj(x)
x = self.act_fn(gate_up)
x, _ = self.down_proj(x)
return x
class LlamaAttention(nn.Module):
def __init__(self, hidden_size: int, num_heads: int, num_kv_heads: int, rope_theta: float = 10000, rope_scaling: Optional[Dict[str, Any]] = None,
max_position_embeddings: int = 8192, linear_method: Optional[LinearMethodBase] = None,) -> None:
super().__init__()
self.hidden_size = hidden_size
tp_size = get_tensor_model_parallel_world_size()
self.total_num_heads = num_heads
assert self.total_num_heads % tp_size == 0
self.num_heads = self.total_num_heads // tp_size
self.total_num_kv_heads = num_kv_heads
if self.total_num_kv_heads >= tp_size:
# Number of KV heads is greater than TP size, so we partition
# the KV heads across multiple tensor parallel GPUs.
assert self.total_num_kv_heads % tp_size == 0
else:
# Number of KV heads is less than TP size, so we replicate
# the KV heads across multiple tensor parallel GPUs.
assert tp_size % self.total_num_kv_heads == 0
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
self.head_dim = hidden_size // self.total_num_heads
self.q_size = self.num_heads * self.head_dim
self.kv_size = self.num_kv_heads * self.head_dim
self.scaling = self.head_dim**-0.5
self.rope_theta = rope_theta
self.max_position_embeddings = max_position_embeddings
##############################################################################################
self.qkv_proj = QKVParallelLinear(hidden_size, self.head_dim, self.total_num_heads, self.total_num_kv_heads, bias=False, linear_method=linear_method)
self.o_proj = RowParallelLinear(self.total_num_heads * self.head_dim, hidden_size, bias=False, linear_method=linear_method)
self.attn = PagedAttentionWithRoPE(self.num_heads, self.head_dim, self.scaling, base=self.rope_theta,
max_position=self.max_position_embeddings, rotary_dim=self.head_dim,
num_kv_heads=self.num_kv_heads, rope_scaling=rope_scaling)
self.qkv_proj = self.qkv_proj.cpu()
self.o_proj = self.o_proj.cpu()
torch.cuda.empty_cache()
##############################################################################################
def forward(self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata, cache_event: Optional[torch.cuda.Event]) -> torch.Tensor:
qkv, _ = self.qkv_proj(hidden_states)
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
k_cache, v_cache = kv_cache
attn_output = self.attn(positions, q, k, v, k_cache, v_cache, input_metadata, cache_event)
output, _ = self.o_proj(attn_output)
return output
class LlamaDecoderLayer(nn.Module):
def __init__(self, config: LlamaConfig, linear_method: Optional[LinearMethodBase] = None, ) -> None:
super().__init__()
self.hidden_size = config.hidden_size
rope_theta = getattr(config, "rope_theta", 10000)
rope_scaling = getattr(config, "rope_scaling", None)
max_position_embeddings = getattr(config, "max_position_embeddings", 8192)
self.self_attn = LlamaAttention(
hidden_size=self.hidden_size,
num_heads=config.num_attention_heads,
num_kv_heads=config.num_key_value_heads,
rope_theta=rope_theta,
rope_scaling=rope_scaling,
max_position_embeddings=max_position_embeddings,
linear_method=linear_method,
)
self.mlp = LlamaMLP(
hidden_size=self.hidden_size,
intermediate_size=config.intermediate_size,
hidden_act=config.hidden_act,
linear_method=linear_method,
)
self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(self,positions: torch.Tensor,hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata,
cache_event: Optional[torch.cuda.Event], residual: Optional[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
# Self Attention
if residual is None:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
else:
hidden_states, residual = self.input_layernorm(hidden_states, residual)
hidden_states = self.self_attn(positions=positions, hidden_states=hidden_states, kv_cache=kv_cache, input_metadata=input_metadata, cache_event=cache_event)
# Fully Connected
hidden_states, residual = self.post_attention_layernorm(hidden_states, residual)
hidden_states = self.mlp(hidden_states)
return hidden_states, residual
class LlamaModel(nn.Module):
def __init__(self, config: LlamaConfig, linear_method: Optional[LinearMethodBase] = None,) -> None:
super().__init__()
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = VocabParallelEmbedding(config.vocab_size,config.hidden_size,)
self.layers = nn.ModuleList([LlamaDecoderLayer(config, linear_method) for _ in range(config.num_hidden_layers)])
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
kv_caches: List[KVCache],
input_metadata: InputMetadata,
cache_events: Optional[List[torch.cuda.Event]],
) -> torch.Tensor:
hidden_states = self.embed_tokens(input_ids)
residual = None
for i in range(len(self.layers)):
cache_event = None if cache_events is None else cache_events[i]
layer = self.layers[i]
hidden_states, residual = layer(positions, hidden_states, kv_caches[i], input_metadata, cache_event, residual)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class LlamaForCausalLM(nn.Module):
def __init__(self, config: LlamaConfig, linear_method: Optional[LinearMethodBase] = None, dummy_load: bool = True) -> None:
super().__init__()
self.config = config
self.linear_method = linear_method
#Dummy loading Added
self.dummy_load = dummy_load
if(self.dummy_load): return
self.model = LlamaModel(config, linear_method)
self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size)
self.sampler = Sampler(config.vocab_size)
def forward(self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata, cache_events: Optional[List[torch.cuda.Event]]) -> SamplerOutput:
if(self.dummy_load): return torch.empty([0]) #Added
hidden_states = self.model(input_ids, positions, kv_caches, input_metadata, cache_events)
next_tokens = self.sampler(self.lm_head.weight, hidden_states, input_metadata)
return next_tokens
def load_weights(self, model_name_or_path: str, cache_dir: Optional[str] = None, load_format: str = "auto", revision: Optional[str] = None):
if(self.dummy_load): return #Added
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(self.named_parameters())
for name, loaded_weight in hf_model_weights_iterator(model_name_or_path, cache_dir, load_format, revision):
if "rotary_emb.inv_freq" in name:
continue
for (param_name, weight_name, shard_id) in stacked_params_mapping:
if weight_name not in name:
continue
param = params_dict[name.replace(weight_name, param_name)]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
#############################################################################################################################################
#############################################################################################################################################
class LLamaPatch(BasePatch):
@classmethod
def get_linear_tags(cls):
return ['self_attn.qkv_proj', 'self_attn.o_proj', 'mlp.gate_up_proj', 'mlp.down_proj']
@classmethod
def patch_nonlinearlayers(cls, model, patch_fct, verbose=True):
base_model = model.model
model.sampler = patch_fct(model.sampler)
model.lm_head = patch_fct(model.lm_head)
base_model.embed_tokens = patch_fct(base_model.embed_tokens)
base_model.norm = patch_fct(base_model.norm)
layers = base_model.layers
for i in tqdm(range(len(base_model.layers)), disable=not verbose):
#rotary embed
layers[i].self_attn.attn.rotary_emb.cos_sin_cache = torch.nn.Parameter(layers[i].self_attn.attn.rotary_emb.cos_sin_cache, requires_grad=False)
layers[i].self_attn.attn.rotary_emb = patch_fct(layers[i].self_attn.attn.rotary_emb)
layers[i].mlp.act_fn = patch_fct(layers[i].mlp.act_fn)
layers[i].input_layernorm = patch_fct(layers[i].input_layernorm)
layers[i].post_attention_layernorm = patch_fct(layers[i].post_attention_layernorm)
@classmethod
def patch_linearlayers(cls, model, patch_fct, patch_params, verbose=True):
base_model = model.model
layers = base_model.layers
for i in tqdm(range(len(layers)), disable=not verbose):
layers[i].self_attn.qkv_proj = patch_fct(layers[i].self_attn.qkv_proj, patch_params['self_attn.qkv_proj'])
layers[i].self_attn.o_proj = patch_fct(layers[i].self_attn.o_proj, patch_params['self_attn.o_proj'])
layers[i].mlp.gate_up_proj = patch_fct(layers[i].mlp.gate_up_proj, patch_params['mlp.gate_up_proj'])
layers[i].mlp.down_proj = patch_fct(layers[i].mlp.down_proj, patch_params['mlp.down_proj'])
#from ..models.hf.base import init_empty_weights
| class LlamaHQQ(LLamaPatch, BaseHQQVLLMModel): | 1 | 2023-11-07 20:15:00+00:00 | 8k |
TheFunny/ArisuAutoSweeper | tasks/cafe/cafe.py | [
{
"identifier": "Config",
"path": "module/base/decorator.py",
"snippet": "class Config:\n \"\"\"\n Decorator that calls different function with a same name according to config.\n\n func_list likes:\n func_list = {\n 'func1': [\n {'options': {'ENABLE': True}, 'func': 1},\n ... | from enum import Enum
from module.base.decorator import Config
from module.base.timer import Timer
from module.logger import logger
from module.ui.switch import Switch
from tasks.base.page import page_cafe
from tasks.cafe.assets.assets_cafe import *
from tasks.cafe.invitation import handle_invitation
from tasks.cafe.ui import CafeUI | 4,380 |
SWITCH_CAFE = Switch('Cafe_switch')
SWITCH_CAFE.add_state('off', CHANGE_CAFE_NOT_SELECTED)
SWITCH_CAFE.add_state('on', CHANGE_CAFE_SELECTED)
SWITCH_CAFE_SELECT = Switch('Cafe_switch_select')
SWITCH_CAFE_SELECT.add_state('1', CAFE_FIRST)
SWITCH_CAFE_SELECT.add_state('2', CAFE_SECOND)
class CafeStatus(Enum):
STUDENT_LIST = 0
OCR = 1
REWARD = 2
GOT = 3
INVITATION = 4
CLICK = 5
CHECK = 6
FINISHED = -1
class Cafe(CafeUI):
@Config.when(Emulator_GameLanguage='jp')
def _is_second_cafe_on(self):
return self.config.Cafe_SecondCafe
@Config.when(Emulator_GameLanguage=None)
def _is_second_cafe_on(self):
return False
is_second_cafe_on = property(_is_second_cafe_on)
def _handle_cafe(self, status):
match status:
case CafeStatus.STUDENT_LIST:
self.appear_then_click(STUDENT_LIST)
if not self.appear(STUDENT_LIST):
return CafeStatus.OCR
case CafeStatus.OCR:
reward = self.get_reward_num()
if reward == 0:
return CafeStatus.GOT
if self.appear_then_click(CHECK_REWARD):
return CafeStatus.REWARD
case CafeStatus.REWARD:
if not self.appear(GET_REWARD_CLOSE):
self.click_with_interval(CHECK_REWARD)
return status
if self.match_color(GOT_REWARD):
self.device.click(GET_REWARD_CLOSE)
return CafeStatus.GOT
if self.match_color(GET_REWARD):
self.click_with_interval(GET_REWARD)
case CafeStatus.GOT:
|
SWITCH_CAFE = Switch('Cafe_switch')
SWITCH_CAFE.add_state('off', CHANGE_CAFE_NOT_SELECTED)
SWITCH_CAFE.add_state('on', CHANGE_CAFE_SELECTED)
SWITCH_CAFE_SELECT = Switch('Cafe_switch_select')
SWITCH_CAFE_SELECT.add_state('1', CAFE_FIRST)
SWITCH_CAFE_SELECT.add_state('2', CAFE_SECOND)
class CafeStatus(Enum):
STUDENT_LIST = 0
OCR = 1
REWARD = 2
GOT = 3
INVITATION = 4
CLICK = 5
CHECK = 6
FINISHED = -1
class Cafe(CafeUI):
@Config.when(Emulator_GameLanguage='jp')
def _is_second_cafe_on(self):
return self.config.Cafe_SecondCafe
@Config.when(Emulator_GameLanguage=None)
def _is_second_cafe_on(self):
return False
is_second_cafe_on = property(_is_second_cafe_on)
def _handle_cafe(self, status):
match status:
case CafeStatus.STUDENT_LIST:
self.appear_then_click(STUDENT_LIST)
if not self.appear(STUDENT_LIST):
return CafeStatus.OCR
case CafeStatus.OCR:
reward = self.get_reward_num()
if reward == 0:
return CafeStatus.GOT
if self.appear_then_click(CHECK_REWARD):
return CafeStatus.REWARD
case CafeStatus.REWARD:
if not self.appear(GET_REWARD_CLOSE):
self.click_with_interval(CHECK_REWARD)
return status
if self.match_color(GOT_REWARD):
self.device.click(GET_REWARD_CLOSE)
return CafeStatus.GOT
if self.match_color(GET_REWARD):
self.click_with_interval(GET_REWARD)
case CafeStatus.GOT: | logger.info('Cafe reward have been got') | 2 | 2023-11-01 07:09:45+00:00 | 8k |
dtiesling/flask-muck | tests/test.py | [
{
"identifier": "GuardianModel",
"path": "tests/app.py",
"snippet": "class GuardianModel(db.Model):\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String, nullable=False, unique=True)\n age = db.Column(db.Integer, nullable=True)\n family_id = db.Colu... | import json
import pytest
from unittest.mock import patch
from pydantic import BaseModel, ConfigDict
from flask_muck.exceptions import MuckImplementationError
from flask_muck.utils import (
get_url_rule,
get_fk_column,
get_query_filters_from_request_path,
get_join_models_from_parent_views,
)
from tests.app import (
GuardianModel,
ToyApiView,
ChildModel,
ToyModel,
BaseApiView,
PreCallback,
PostCallback,
GuardianApiView,
) | 3,716 | ]
assert get(
f"/guardians/{marge.id}/children/?sort=name__asc",
) == [{"name": bart.name}, {"name": lisa.name}, {"name": maggie.name}]
def test_sort_desc(self, get, marge, lisa, maggie, bart):
assert get(
f"/guardians/{marge.id}/children/?sort=age__desc",
) == [{"name": bart.name}, {"name": lisa.name}, {"name": maggie.name}]
assert get(
f"/guardians/{marge.id}/children/?sort=name__desc",
) == [{"name": maggie.name}, {"name": lisa.name}, {"name": bart.name}]
def test_nested_sort(self, get):
assert get(f"/guardians/?sort=family.surname") == [
{"name": "Bob"},
{"name": "Marge"},
]
def test_bad_sort(self, get):
get(f"/guardians/?sort=name__fail", expected_status_code=400)
get(f"/guardians/?sort=fail", expected_status_code=400)
get(f"/guardians/?sort=family.fail", expected_status_code=400)
get(f"/guardians/?sort=double.fail", expected_status_code=400)
def test_change_operator_separator(
self, get, monkeypatch, marge, lisa, bart, maggie
):
monkeypatch.setattr(BaseApiView, "operator_separator", "|")
assert get(
f"/guardians/{marge.id}/children/?sort=age|desc",
) == [{"name": bart.name}, {"name": lisa.name}, {"name": maggie.name}]
assert get(
f"/guardians/{marge.id}/children/?sort=name|desc",
) == [{"name": maggie.name}, {"name": lisa.name}, {"name": bart.name}]
@pytest.mark.usefixtures("simpsons", "belchers")
class TestSearch:
def test_search(self, get, marge):
assert get(f"/guardians/?search=marge") == [{"name": "Marge"}]
assert get(f"/guardians/?search=nobody") == []
assert get(f"/guardians/{marge.id}/children/?search=bart") == [{"name": "Bart"}]
assert get(f"/guardians/{marge.id}/children/?search=nope") == []
def test_unsupported_search(self, get, marge, bart, monkeypatch):
monkeypatch.setattr(GuardianApiView, "searchable_columns", [])
get(f"/guardians/?search=marge", expected_status_code=400)
class TestCallbacks:
@pytest.fixture
def pre_callback_patch(self):
with patch.object(PreCallback, "execute") as patched:
yield patched
@pytest.fixture
def post_callback_patch(self):
with patch.object(PostCallback, "execute") as patched:
yield patched
def test_create_callbacks(
self, post, user, pre_callback_patch, post_callback_patch
):
post("/guardians/", json={"name": "Jill"})
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
def test_update_callbacks(
self, put, guardian, pre_callback_patch, post_callback_patch
):
put(f"/guardians/{guardian.id}/", json={"name": "updated"})
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
def test_patch_callbacks(
self, put, patch, guardian, pre_callback_patch, post_callback_patch
):
patch(f"/guardians/{guardian.id}/", json={"name": "patched"})
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
def test_delete_callbacks(
self, client, guardian, pre_callback_patch, post_callback_patch
):
client.delete(f"/guardians/{guardian.id}/")
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
@pytest.mark.usefixtures("simpsons", "belchers")
class TestNestedApis:
def test_get(self, get, bart, maggie, lisa, marge, skateboard, bob):
children = (bart, maggie, lisa)
assert get(f"/guardians/") == [{"name": marge.name}, {"name": bob.name}]
assert get(f"/guardians/{marge.id}/children/") == [
{"name": child.name} for child in children
]
assert get(f"/guardians/{marge.id}/children/{bart.id}/toy/") == {
"name": skateboard.name
}
class TestBlueprintRegistering:
def test_str_pk_patch_creation(self):
return
def test_int_pk_patch_update(self):
return
class TestUtils:
def test_get_url_rule(self):
assert (
get_url_rule(ToyApiView, None)
== "guardians/<int:guardians_id>/children/<int:children_id>/toy/"
)
def test_get_fk_column(self):
assert (
|
class TestBasicCrud:
def test_create(self, post, user):
response = post("/guardians/", json={"name": "Jill"})
parent = GuardianModel.query.one()
assert response == {"name": parent.name}
# Verify integrity errors are handled.
post("/guardians/", json={"name": "Jill"}, expected_status_code=409)
def test_read(self, get, user, guardian, child):
assert get(f"/guardians/") == [{"name": guardian.name}]
assert get(f"/guardians/{guardian.id}/") == {
"name": "Samantha",
"children": [{"name": "Tamara"}],
}
def test_update(self, put, patch, guardian):
assert put(f"/guardians/{guardian.id}/", json={"name": "updated"}) == {
"name": "updated"
}
assert patch(f"/guardians/{guardian.id}/", json={"name": "patched"}) == {
"name": "patched"
}
def test_delete(self, client, guardian):
client.delete(f"/guardians/{guardian.id}/")
assert GuardianModel.query.count() == 0
class TestAllowedMethods:
def test_get_only(self, client, monkeypatch):
monkeypatch.setattr(BaseApiView, "allowed_methods", {"GET"})
assert client.get("/guardians/").status_code == 200
assert client.post("/guardians/").status_code == 405
assert client.put("/guardians/").status_code == 405
assert client.patch("/guardians/").status_code == 405
assert client.delete("/guardians/").status_code == 405
def test_no_methods(self, client, monkeypatch):
monkeypatch.setattr(BaseApiView, "allowed_methods", {})
assert client.get("/guardians/").status_code == 405
assert client.post("/guardians/").status_code == 405
assert client.put("/guardians/").status_code == 405
assert client.patch("/guardians/").status_code == 405
assert client.delete("/guardians/").status_code == 405
@pytest.mark.usefixtures("simpsons", "belchers")
class TestPagination:
def test_offset(self, get):
assert get("/guardians/?offset=1") == {
"items": [{"name": "Bob"}],
"limit": 20,
"offset": 1,
"total": 2,
}
def test_limit(self, get):
assert get("/guardians/?limit=1") == {
"items": [{"name": "Marge"}],
"limit": 1,
"offset": 0,
"total": 2,
}
def test_limit_and_offset(self, get):
assert get("/guardians/?limit=10&offset=0") == {
"items": [{"name": "Marge"}, {"name": "Bob"}],
"limit": 10,
"offset": 0,
"total": 2,
}
@pytest.mark.usefixtures("simpsons", "belchers")
class TestFiltering:
@pytest.fixture
def filter_guardians(self, get):
def _filter_guardians(filters: dict, expected_status_code: int = 200):
return get(
f"/guardians/?filters={json.dumps(filters)}",
expected_status_code=expected_status_code,
)
return _filter_guardians
def test_equal(self, filter_guardians):
assert filter_guardians({"name": "Marge"}) == [{"name": "Marge"}]
assert filter_guardians({"name": "Bob"}) == [{"name": "Bob"}]
assert filter_guardians({"name": "Marge", "age": 34}) == [{"name": "Marge"}]
assert filter_guardians({"name": "Marge", "age": 45}) == []
def test_gt(self, filter_guardians):
assert filter_guardians({"age__gt": 18}) == [
{"name": "Marge"},
{"name": "Bob"},
]
assert filter_guardians({"age__gt": 34}) == [{"name": "Bob"}]
assert filter_guardians({"age__gt": 46}) == []
def test_gte(self, filter_guardians):
assert filter_guardians({"age__gte": 18}) == [
{"name": "Marge"},
{"name": "Bob"},
]
assert filter_guardians({"age__gte": 34}) == [
{"name": "Marge"},
{"name": "Bob"},
]
assert filter_guardians({"age__gte": 46}) == [{"name": "Bob"}]
assert filter_guardians({"age__gte": 47}) == []
def test_lt(self, filter_guardians):
assert filter_guardians({"age__lt": 18}) == []
assert filter_guardians({"age__lt": 34}) == []
assert filter_guardians({"age__lt": 46}) == [{"name": "Marge"}]
assert filter_guardians({"age__lt": 47}) == [{"name": "Marge"}, {"name": "Bob"}]
def test_lte(self, filter_guardians):
assert filter_guardians({"age__lte": 18}) == []
assert filter_guardians({"age__lte": 34}) == [{"name": "Marge"}]
assert filter_guardians({"age__lte": 46}) == [
{"name": "Marge"},
{"name": "Bob"},
]
assert filter_guardians({"age__lte": 47}) == [
{"name": "Marge"},
{"name": "Bob"},
]
def test_in(self, filter_guardians):
assert filter_guardians({"name__in": ["Marge", "Bob"]}) == [
{"name": "Bob"},
{"name": "Marge"},
]
assert filter_guardians({"name__in": ["Marge"]}) == [{"name": "Marge"}]
assert filter_guardians({"name__in": ["Bob"]}) == [{"name": "Bob"}]
assert filter_guardians({"name__in": ["Billy"]}) == []
def test_not_in(self, filter_guardians):
assert filter_guardians({"name__not_in": ["Marge", "Bob"]}) == []
assert filter_guardians({"name__not_in": ["Marge"]}) == [{"name": "Bob"}]
assert filter_guardians({"name__not_in": ["Bob"]}) == [{"name": "Marge"}]
assert filter_guardians({"name__not_in": ["Billy"]}) == [
{"name": "Marge"},
{"name": "Bob"},
]
def test_ne(self, filter_guardians):
assert filter_guardians({"name__ne": "Marge"}) == [{"name": "Bob"}]
assert filter_guardians({"name__ne": "Bob"}) == [{"name": "Marge"}]
assert filter_guardians({"name__ne": "Billy"}) == [
{"name": "Marge"},
{"name": "Bob"},
]
def test_change_operator_separator(self, filter_guardians, monkeypatch):
monkeypatch.setattr(BaseApiView, "operator_separator", "|")
assert filter_guardians({"name|ne": "Marge"}) == [{"name": "Bob"}]
assert filter_guardians({"name|in": ["Marge"]}) == [{"name": "Marge"}]
def test_nested_filter(self, filter_guardians, client):
assert filter_guardians({"children.name": "Bart"}) == [{"name": "Marge"}]
assert filter_guardians({"children.name": "Gene"}) == [{"name": "Bob"}]
def test_bad_json(self, get):
get("/guardians/?filters=notjson", expected_status_code=400)
def test_column_does_not_exist(self, filter_guardians):
filter_guardians({"nope": "fail"}, expected_status_code=400)
filter_guardians({"nope.nested": "fail"}, expected_status_code=400)
filter_guardians({"children.nope": "fail"}, expected_status_code=400)
@pytest.mark.usefixtures("simpsons", "belchers")
class TestSort:
def test_sort(self, get, marge, bart, maggie, lisa):
assert get(f"/guardians/{marge.id}/children/?sort=name") == [
{"name": bart.name},
{"name": lisa.name},
{"name": maggie.name},
]
assert get(f"/guardians/{marge.id}/children/?sort=age") == [
{"name": maggie.name},
{"name": lisa.name},
{"name": bart.name},
]
def test_sort_asc(self, get, marge, maggie, lisa, bart):
assert get(f"/guardians/{marge.id}/children/?sort=age__asc") == [
{"name": maggie.name},
{"name": lisa.name},
{"name": bart.name},
]
assert get(
f"/guardians/{marge.id}/children/?sort=name__asc",
) == [{"name": bart.name}, {"name": lisa.name}, {"name": maggie.name}]
def test_sort_desc(self, get, marge, lisa, maggie, bart):
assert get(
f"/guardians/{marge.id}/children/?sort=age__desc",
) == [{"name": bart.name}, {"name": lisa.name}, {"name": maggie.name}]
assert get(
f"/guardians/{marge.id}/children/?sort=name__desc",
) == [{"name": maggie.name}, {"name": lisa.name}, {"name": bart.name}]
def test_nested_sort(self, get):
assert get(f"/guardians/?sort=family.surname") == [
{"name": "Bob"},
{"name": "Marge"},
]
def test_bad_sort(self, get):
get(f"/guardians/?sort=name__fail", expected_status_code=400)
get(f"/guardians/?sort=fail", expected_status_code=400)
get(f"/guardians/?sort=family.fail", expected_status_code=400)
get(f"/guardians/?sort=double.fail", expected_status_code=400)
def test_change_operator_separator(
self, get, monkeypatch, marge, lisa, bart, maggie
):
monkeypatch.setattr(BaseApiView, "operator_separator", "|")
assert get(
f"/guardians/{marge.id}/children/?sort=age|desc",
) == [{"name": bart.name}, {"name": lisa.name}, {"name": maggie.name}]
assert get(
f"/guardians/{marge.id}/children/?sort=name|desc",
) == [{"name": maggie.name}, {"name": lisa.name}, {"name": bart.name}]
@pytest.mark.usefixtures("simpsons", "belchers")
class TestSearch:
def test_search(self, get, marge):
assert get(f"/guardians/?search=marge") == [{"name": "Marge"}]
assert get(f"/guardians/?search=nobody") == []
assert get(f"/guardians/{marge.id}/children/?search=bart") == [{"name": "Bart"}]
assert get(f"/guardians/{marge.id}/children/?search=nope") == []
def test_unsupported_search(self, get, marge, bart, monkeypatch):
monkeypatch.setattr(GuardianApiView, "searchable_columns", [])
get(f"/guardians/?search=marge", expected_status_code=400)
class TestCallbacks:
@pytest.fixture
def pre_callback_patch(self):
with patch.object(PreCallback, "execute") as patched:
yield patched
@pytest.fixture
def post_callback_patch(self):
with patch.object(PostCallback, "execute") as patched:
yield patched
def test_create_callbacks(
self, post, user, pre_callback_patch, post_callback_patch
):
post("/guardians/", json={"name": "Jill"})
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
def test_update_callbacks(
self, put, guardian, pre_callback_patch, post_callback_patch
):
put(f"/guardians/{guardian.id}/", json={"name": "updated"})
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
def test_patch_callbacks(
self, put, patch, guardian, pre_callback_patch, post_callback_patch
):
patch(f"/guardians/{guardian.id}/", json={"name": "patched"})
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
def test_delete_callbacks(
self, client, guardian, pre_callback_patch, post_callback_patch
):
client.delete(f"/guardians/{guardian.id}/")
pre_callback_patch.assert_called_once()
post_callback_patch.assert_called_once()
@pytest.mark.usefixtures("simpsons", "belchers")
class TestNestedApis:
def test_get(self, get, bart, maggie, lisa, marge, skateboard, bob):
children = (bart, maggie, lisa)
assert get(f"/guardians/") == [{"name": marge.name}, {"name": bob.name}]
assert get(f"/guardians/{marge.id}/children/") == [
{"name": child.name} for child in children
]
assert get(f"/guardians/{marge.id}/children/{bart.id}/toy/") == {
"name": skateboard.name
}
class TestBlueprintRegistering:
def test_str_pk_patch_creation(self):
return
def test_int_pk_patch_update(self):
return
class TestUtils:
def test_get_url_rule(self):
assert (
get_url_rule(ToyApiView, None)
== "guardians/<int:guardians_id>/children/<int:children_id>/toy/"
)
def test_get_fk_column(self):
assert ( | get_fk_column(parent_model=GuardianModel, child_model=ChildModel) | 2 | 2023-11-07 03:44:49+00:00 | 8k |
BrianPugh/cyclopts | cyclopts/bind.py | [
{
"identifier": "token_count",
"path": "cyclopts/_convert.py",
"snippet": "def token_count(type_: Union[Type, inspect.Parameter]) -> Tuple[int, bool]:\n \"\"\"The number of tokens after a keyword the parameter should consume.\n\n Parameters\n ----------\n type_: Type\n A type hint/ann... | import inspect
import itertools
import os
import shlex
import sys
from typing import Any, Dict, Iterable, List, Tuple, Union, get_origin
from cyclopts._convert import token_count
from cyclopts.exceptions import (
CoercionError,
CycloptsError,
MissingArgumentError,
RepeatArgumentError,
ValidationError,
)
from cyclopts.parameter import get_hint_parameter, validate_command
from cyclopts.resolve import ResolvedCommand
from cyclopts.utils import ParameterDict | 5,889 | except KeyError:
pass
else:
mapping.setdefault(iparam, [])
mapping[iparam].append(env_var_value)
break
def _is_required(parameter: inspect.Parameter) -> bool:
return parameter.default is parameter.empty
def _bind(
command: ResolvedCommand,
mapping: ParameterDict,
):
"""Bind the mapping to the function signature.
Better than directly using ``signature.bind`` because this can handle
intermingled keywords.
"""
f_pos, f_kwargs = [], {}
use_pos = True
def f_pos_append(p):
nonlocal use_pos
assert use_pos
try:
f_pos.append(mapping[p])
except KeyError:
if _is_required(p):
raise MissingArgumentError(parameter=p, tokens_so_far=[]) from None
use_pos = False
for iparam in command.iparam_to_cparam.keys():
if use_pos and iparam.kind in (iparam.POSITIONAL_ONLY, iparam.POSITIONAL_OR_KEYWORD):
f_pos_append(iparam)
elif use_pos and iparam.kind is iparam.VAR_POSITIONAL: # ``*args``
f_pos.extend(mapping.get(iparam, []))
use_pos = False
elif iparam.kind is iparam.VAR_KEYWORD:
f_kwargs.update(mapping.get(iparam, {}))
else:
try:
f_kwargs[iparam.name] = mapping[iparam]
except KeyError:
if _is_required(iparam):
raise MissingArgumentError(parameter=iparam, tokens_so_far=[]) from None
bound = command.bind(*f_pos, **f_kwargs)
return bound
def _convert(command: ResolvedCommand, mapping: ParameterDict) -> ParameterDict:
coerced = ParameterDict()
for iparam, parameter_tokens in mapping.items():
cparam = command.iparam_to_cparam[iparam]
type_ = get_hint_parameter(iparam)[0]
# Checking if parameter_token is a string is a little jank,
# but works for all current use-cases.
for parameter_token in parameter_tokens:
if not isinstance(parameter_token, str):
# A token would be non-string if it's the implied-value (from a flag).
coerced[iparam] = parameter_tokens[0]
break
else:
try:
if iparam.kind == iparam.VAR_KEYWORD:
coerced[iparam] = {}
for key, values in parameter_tokens.items():
val = cparam.converter(type_, *values)
for validator in cparam.validator:
validator(type_, val)
coerced[iparam][key] = val
elif iparam.kind == iparam.VAR_POSITIONAL:
val = cparam.converter(List[type_], *parameter_tokens)
for validator in cparam.validator:
for v in val:
validator(type_, v)
coerced[iparam] = val
else:
val = cparam.converter(type_, *parameter_tokens)
for validator in cparam.validator:
validator(type_, val)
coerced[iparam] = val
except CoercionError as e:
e.parameter = iparam
raise
except (AssertionError, ValueError, TypeError) as e:
new_exception = ValidationError(value=e.args[0], parameter=iparam)
raise new_exception from e
return coerced
def create_bound_arguments(
command: ResolvedCommand,
tokens: List[str],
) -> Tuple[inspect.BoundArguments, List[str]]:
"""Parse and coerce CLI tokens to match a function's signature.
Parameters
----------
command: ResolvedCommand
tokens: List[str]
CLI tokens to parse and coerce to match ``f``'s signature.
Returns
-------
bound: inspect.BoundArguments
The converted and bound positional and keyword arguments for ``f``.
unused_tokens: List[str]
Remaining tokens that couldn't be matched to ``f``'s signature.
"""
# Note: mapping is updated inplace
mapping = ParameterDict() # Each value should be a list
c2p, p2c = None, None
unused_tokens = []
|
def normalize_tokens(tokens: Union[None, str, Iterable[str]]) -> List[str]:
if tokens is None:
tokens = sys.argv[1:] # Remove the executable
elif isinstance(tokens, str):
tokens = shlex.split(tokens)
else:
tokens = list(tokens)
return tokens
def cli2parameter(command: ResolvedCommand) -> Dict[str, Tuple[inspect.Parameter, Any]]:
"""Creates a dictionary mapping CLI keywords to python keywords.
Typically the mapping is something like::
{"--foo": (<Parameter "foo">, None)}
Each value is a tuple containing:
1. The corresponding ``inspect.Parameter``.
2. A predefined value. If this value is ``None``, the value should be
inferred from subsequent tokens.
"""
# The tuple's second element is an implicit value for flags.
mapping: Dict[str, Tuple[inspect.Parameter, Any]] = {}
for iparam, cparam in command.iparam_to_cparam.items():
if iparam.kind is iparam.VAR_KEYWORD:
# Don't directly expose the kwarg variable name
continue
hint = get_hint_parameter(iparam)[0]
for name in cparam.name:
mapping[name] = (iparam, True if hint is bool else None)
for name in cparam.get_negatives(hint, *cparam.name):
mapping[name] = (iparam, (get_origin(hint) or hint)())
return mapping
def parameter2cli(command: ResolvedCommand) -> ParameterDict:
c2p = cli2parameter(command)
p2c = ParameterDict()
for cli, tup in c2p.items():
iparam = tup[0]
p2c.setdefault(iparam, [])
p2c[iparam].append(cli)
for iparam, cparam in command.iparam_to_cparam.items():
# POSITIONAL_OR_KEYWORD and KEYWORD_ONLY already handled in cli2parameter
if iparam.kind in (iparam.POSITIONAL_ONLY, iparam.VAR_KEYWORD, iparam.VAR_POSITIONAL):
p2c[iparam] = list(cparam.name)
return p2c
def _cli_kw_to_f_kw(cli_key: str):
"""Only used for converting unknown CLI key/value keys for ``**kwargs``."""
assert cli_key.startswith("--")
cli_key = cli_key[2:] # strip off leading "--"
cli_key = cli_key.replace("-", "_")
return cli_key
def _parse_kw_and_flags(command: ResolvedCommand, tokens, mapping):
cli2kw = cli2parameter(command)
kwargs_iparam = next((x for x in command.iparam_to_cparam.keys() if x.kind == x.VAR_KEYWORD), None)
if kwargs_iparam:
mapping[kwargs_iparam] = {}
unused_tokens = []
skip_next_iterations = 0
for i, token in enumerate(tokens):
# If the previous argument was a keyword, then this is its value
if skip_next_iterations > 0:
skip_next_iterations -= 1
continue
if not token.startswith("-"):
unused_tokens.append(token)
continue
cli_values = []
kwargs_key = None
consume_count = 0
if "=" in token:
cli_key, cli_value = token.split("=", 1)
cli_values.append(cli_value)
consume_count -= 1
else:
cli_key = token
try:
iparam, implicit_value = cli2kw[cli_key]
except KeyError:
if kwargs_iparam:
iparam = kwargs_iparam
kwargs_key = _cli_kw_to_f_kw(cli_key)
implicit_value = None
else:
unused_tokens.append(token)
continue
cparam = command.iparam_to_cparam[iparam]
if implicit_value is not None:
# A flag was parsed
if cli_values:
# A value was parsed from "--key=value", and the ``value`` is in ``cli_values``.
if implicit_value: # Only accept values to the positive flag
pass
else:
raise ValidationError(value=f'Cannot assign value to negative flag "{cli_key}".')
else:
cli_values.append(implicit_value)
tokens_per_element, consume_all = 0, False
else:
tokens_per_element, consume_all = token_count(iparam)
if consume_all:
try:
for j in itertools.count():
token = tokens[i + 1 + j]
if not cparam.allow_leading_hyphen and _is_option_like(token):
break
cli_values.append(token)
skip_next_iterations += 1
except IndexError:
pass
else:
consume_count += tokens_per_element
try:
for j in range(consume_count):
token = tokens[i + 1 + j]
if not cparam.allow_leading_hyphen:
_validate_is_not_option_like(token)
cli_values.append(token)
skip_next_iterations += 1
except IndexError:
raise MissingArgumentError(parameter=iparam, tokens_so_far=cli_values) from None
# Update mapping
if iparam is kwargs_iparam:
assert kwargs_key is not None
if kwargs_key in mapping[iparam] and not consume_all:
raise RepeatArgumentError(parameter=iparam)
mapping[iparam].setdefault(kwargs_key, [])
mapping[iparam][kwargs_key].extend(cli_values)
else:
if iparam in mapping and not consume_all:
raise RepeatArgumentError(parameter=iparam)
mapping.setdefault(iparam, [])
mapping[iparam].extend(cli_values)
return unused_tokens
def _is_option_like(token: str) -> bool:
try:
complex(token)
return False
except ValueError:
pass
if token.startswith("-"):
return True
return False
def _validate_is_not_option_like(token):
if _is_option_like(token):
raise ValidationError(value=f'Unknown option: "{token}".')
def _parse_pos(
command: ResolvedCommand,
tokens: Iterable[str],
mapping: ParameterDict,
) -> List[str]:
tokens = list(tokens)
def remaining_parameters():
for iparam, cparam in command.iparam_to_cparam.items():
_, consume_all = token_count(iparam)
if iparam in mapping and not consume_all:
continue
if iparam.kind is iparam.KEYWORD_ONLY: # pragma: no cover
# the kwargs parameter should always be in mapping.
break
yield iparam, cparam
for iparam, cparam in remaining_parameters():
if not tokens:
break
if iparam.kind is iparam.VAR_POSITIONAL: # ``*args``
mapping.setdefault(iparam, [])
for token in tokens:
if not cparam.allow_leading_hyphen:
_validate_is_not_option_like(token)
mapping[iparam].append(token)
tokens = []
break
tokens_per_element, consume_all = token_count(iparam)
if consume_all:
# Prepend the positional values to the keyword values.
mapping.setdefault(iparam, [])
pos_tokens = []
for token in tokens:
if not cparam.allow_leading_hyphen:
_validate_is_not_option_like(token)
pos_tokens.append(token)
mapping[iparam] = pos_tokens + mapping[iparam]
tokens = []
break
tokens_per_element = max(1, tokens_per_element)
if len(tokens) < tokens_per_element:
raise MissingArgumentError(parameter=iparam, tokens_so_far=tokens)
mapping.setdefault(iparam, [])
for token in tokens[:tokens_per_element]:
if not cparam.allow_leading_hyphen:
_validate_is_not_option_like(token)
mapping[iparam].append(token)
tokens = tokens[tokens_per_element:]
return tokens
def _parse_env(command: ResolvedCommand, mapping):
"""Populate argument defaults from environment variables.
In cyclopts, arguments are parsed with the following priority:
1. CLI-provided values
2. Values parsed from ``Parameter.env_var``.
3. Default values from the function signature.
"""
for iparam, cparam in command.iparam_to_cparam.items():
if iparam in mapping:
# Don't check environment variables for already-parsed parameters.
continue
for env_var_name in cparam.env_var:
try:
env_var_value = os.environ[env_var_name]
except KeyError:
pass
else:
mapping.setdefault(iparam, [])
mapping[iparam].append(env_var_value)
break
def _is_required(parameter: inspect.Parameter) -> bool:
return parameter.default is parameter.empty
def _bind(
command: ResolvedCommand,
mapping: ParameterDict,
):
"""Bind the mapping to the function signature.
Better than directly using ``signature.bind`` because this can handle
intermingled keywords.
"""
f_pos, f_kwargs = [], {}
use_pos = True
def f_pos_append(p):
nonlocal use_pos
assert use_pos
try:
f_pos.append(mapping[p])
except KeyError:
if _is_required(p):
raise MissingArgumentError(parameter=p, tokens_so_far=[]) from None
use_pos = False
for iparam in command.iparam_to_cparam.keys():
if use_pos and iparam.kind in (iparam.POSITIONAL_ONLY, iparam.POSITIONAL_OR_KEYWORD):
f_pos_append(iparam)
elif use_pos and iparam.kind is iparam.VAR_POSITIONAL: # ``*args``
f_pos.extend(mapping.get(iparam, []))
use_pos = False
elif iparam.kind is iparam.VAR_KEYWORD:
f_kwargs.update(mapping.get(iparam, {}))
else:
try:
f_kwargs[iparam.name] = mapping[iparam]
except KeyError:
if _is_required(iparam):
raise MissingArgumentError(parameter=iparam, tokens_so_far=[]) from None
bound = command.bind(*f_pos, **f_kwargs)
return bound
def _convert(command: ResolvedCommand, mapping: ParameterDict) -> ParameterDict:
coerced = ParameterDict()
for iparam, parameter_tokens in mapping.items():
cparam = command.iparam_to_cparam[iparam]
type_ = get_hint_parameter(iparam)[0]
# Checking if parameter_token is a string is a little jank,
# but works for all current use-cases.
for parameter_token in parameter_tokens:
if not isinstance(parameter_token, str):
# A token would be non-string if it's the implied-value (from a flag).
coerced[iparam] = parameter_tokens[0]
break
else:
try:
if iparam.kind == iparam.VAR_KEYWORD:
coerced[iparam] = {}
for key, values in parameter_tokens.items():
val = cparam.converter(type_, *values)
for validator in cparam.validator:
validator(type_, val)
coerced[iparam][key] = val
elif iparam.kind == iparam.VAR_POSITIONAL:
val = cparam.converter(List[type_], *parameter_tokens)
for validator in cparam.validator:
for v in val:
validator(type_, v)
coerced[iparam] = val
else:
val = cparam.converter(type_, *parameter_tokens)
for validator in cparam.validator:
validator(type_, val)
coerced[iparam] = val
except CoercionError as e:
e.parameter = iparam
raise
except (AssertionError, ValueError, TypeError) as e:
new_exception = ValidationError(value=e.args[0], parameter=iparam)
raise new_exception from e
return coerced
def create_bound_arguments(
command: ResolvedCommand,
tokens: List[str],
) -> Tuple[inspect.BoundArguments, List[str]]:
"""Parse and coerce CLI tokens to match a function's signature.
Parameters
----------
command: ResolvedCommand
tokens: List[str]
CLI tokens to parse and coerce to match ``f``'s signature.
Returns
-------
bound: inspect.BoundArguments
The converted and bound positional and keyword arguments for ``f``.
unused_tokens: List[str]
Remaining tokens that couldn't be matched to ``f``'s signature.
"""
# Note: mapping is updated inplace
mapping = ParameterDict() # Each value should be a list
c2p, p2c = None, None
unused_tokens = []
| validate_command(command.command) | 7 | 2023-11-03 02:24:25+00:00 | 8k |
RoboFlamingo/RoboFlamingo | open_flamingo/open_flamingo/src/factory.py | [
{
"identifier": "Flamingo",
"path": "open_flamingo/open_flamingo/src/flamingo.py",
"snippet": "class Flamingo(nn.Module):\n def __init__(\n self,\n vision_encoder: nn.Module,\n lang_encoder: nn.Module,\n eoc_token_id: int,\n media_token_id: int,\n vis_dim: in... | from typing import Optional
from transformers import AutoModelForCausalLM, AutoTokenizer
from .flamingo import Flamingo
from .flamingo_lm import FlamingoLMMixin
from .utils import extend_instance
import open_clip | 5,284 |
def create_model_and_transforms(
clip_vision_encoder_path: str,
clip_vision_encoder_pretrained: str,
lang_encoder_path: str,
tokenizer_path: str,
cross_attn_every_n_layers: int = 1,
use_local_files: bool = False,
decoder_layers_attr_name: str = None,
freeze_lm_embeddings: bool = False,
cache_dir: Optional[str] = None,
**flamingo_kwargs,
):
"""
Initialize a Flamingo model from a pretrained vision encoder and language encoder.
Appends special tokens to the tokenizer and freezes backbones.
Args:
clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32")
clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k")
lang_encoder_path (str): path to pretrained language encoder
tokenizer_path (str): path to pretrained tokenizer
cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1.
use_local_files (bool, optional): whether to use local files. Defaults to False.
decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None.
freeze_lm_embeddings (bool, optional): whether to freeze LM input embeddings when configuring Perceiver.
cache_dir (str, optional): path to cache directory for downloading OpenClip/HF weights.
Returns:
Flamingo: Flamingo model from pretrained vision and language encoders
Image processor: Pipeline to preprocess input images
Tokenizer: A tokenizer for the language model
"""
vision_encoder, _, image_processor = open_clip.create_model_and_transforms(
clip_vision_encoder_path,
pretrained=clip_vision_encoder_pretrained,
cache_dir=cache_dir,
)
# set the vision encoder to output the visual features
vision_encoder.visual.output_tokens = True
text_tokenizer = AutoTokenizer.from_pretrained(
tokenizer_path,
local_files_only=use_local_files,
trust_remote_code=True,
cache_dir=cache_dir,
)
# add Flamingo special tokens to the tokenizer
text_tokenizer.add_special_tokens(
{"additional_special_tokens": ["<|endofchunk|>", "<image>", "<action>"]}
)
if text_tokenizer.pad_token is None:
# Issue: GPT models don't have a pad token, which we use to
# modify labels for the loss.
text_tokenizer.add_special_tokens({"pad_token": "<PAD>"})
lang_encoder = AutoModelForCausalLM.from_pretrained(
lang_encoder_path,
local_files_only=use_local_files,
trust_remote_code=True,
cache_dir=cache_dir,
)
# hacks for MPT-1B, which doesn't have a get_input_embeddings method
if "mpt-1b-redpajama-200b" in lang_encoder_path:
class EmbeddingFnMixin:
def get_input_embeddings(self):
return self.transformer.wte
def set_input_embeddings(self, new_embeddings):
self.transformer.wte = new_embeddings
|
def create_model_and_transforms(
clip_vision_encoder_path: str,
clip_vision_encoder_pretrained: str,
lang_encoder_path: str,
tokenizer_path: str,
cross_attn_every_n_layers: int = 1,
use_local_files: bool = False,
decoder_layers_attr_name: str = None,
freeze_lm_embeddings: bool = False,
cache_dir: Optional[str] = None,
**flamingo_kwargs,
):
"""
Initialize a Flamingo model from a pretrained vision encoder and language encoder.
Appends special tokens to the tokenizer and freezes backbones.
Args:
clip_vision_encoder_path (str): path to pretrained clip model (e.g. "ViT-B-32")
clip_vision_encoder_pretrained (str): name of pretraining dataset for clip model (e.g. "laion2b_s32b_b79k")
lang_encoder_path (str): path to pretrained language encoder
tokenizer_path (str): path to pretrained tokenizer
cross_attn_every_n_layers (int, optional): determines how often to add a cross-attention layer. Defaults to 1.
use_local_files (bool, optional): whether to use local files. Defaults to False.
decoder_layers_attr_name (str, optional): name of the decoder layers attribute. Defaults to None.
freeze_lm_embeddings (bool, optional): whether to freeze LM input embeddings when configuring Perceiver.
cache_dir (str, optional): path to cache directory for downloading OpenClip/HF weights.
Returns:
Flamingo: Flamingo model from pretrained vision and language encoders
Image processor: Pipeline to preprocess input images
Tokenizer: A tokenizer for the language model
"""
vision_encoder, _, image_processor = open_clip.create_model_and_transforms(
clip_vision_encoder_path,
pretrained=clip_vision_encoder_pretrained,
cache_dir=cache_dir,
)
# set the vision encoder to output the visual features
vision_encoder.visual.output_tokens = True
text_tokenizer = AutoTokenizer.from_pretrained(
tokenizer_path,
local_files_only=use_local_files,
trust_remote_code=True,
cache_dir=cache_dir,
)
# add Flamingo special tokens to the tokenizer
text_tokenizer.add_special_tokens(
{"additional_special_tokens": ["<|endofchunk|>", "<image>", "<action>"]}
)
if text_tokenizer.pad_token is None:
# Issue: GPT models don't have a pad token, which we use to
# modify labels for the loss.
text_tokenizer.add_special_tokens({"pad_token": "<PAD>"})
lang_encoder = AutoModelForCausalLM.from_pretrained(
lang_encoder_path,
local_files_only=use_local_files,
trust_remote_code=True,
cache_dir=cache_dir,
)
# hacks for MPT-1B, which doesn't have a get_input_embeddings method
if "mpt-1b-redpajama-200b" in lang_encoder_path:
class EmbeddingFnMixin:
def get_input_embeddings(self):
return self.transformer.wte
def set_input_embeddings(self, new_embeddings):
self.transformer.wte = new_embeddings
| extend_instance(lang_encoder, EmbeddingFnMixin) | 2 | 2023-11-02 01:36:23+00:00 | 8k |
sanmusen214/BAAH | test.py | [
{
"identifier": "config",
"path": "modules/configs/MyConfig.py",
"snippet": "class MyConfigger:\n NOWVERSION=\"1.2.0\"\n USER_CONFIG_FOLDER=\"./BAAH_CONFIGS\"\n SOFTWARE_CONFIG_FOLDER=\"./DATA/CONFIGS\"\n LANGUAGE_PACKAGE_FOLDER=\"./DATA/i18n\"\n SOFTWARE_CONFIG_NAME=\"software_config.jso... | import sys
import logging
import threading
import requests
import cv2
import os
import time
import numpy as np
from modules.configs.MyConfig import config
from modules.AllTask.SubTask.RaidQuest import RaidQuest
from modules.AllTask.SubTask.ScrollSelect import ScrollSelect
from modules.AllTask.InCafe.InviteStudent import InviteStudent
from modules.AllTask.InCafe.TouchHead import TouchHead
from modules.utils import *
from DATA.assets.ButtonName import ButtonName
from DATA.assets.PageName import PageName
from DATA.assets.PopupName import PopupName
from modules.AllTask import *
from modules.AllTask.InCafe.CollectPower import CollectPower
from modules.AllPage.Page import Page | 7,133 | logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', encoding='utf-8')
if len(sys.argv) > 1:
configname = sys.argv[1]
| logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', encoding='utf-8')
if len(sys.argv) > 1:
configname = sys.argv[1] | config = config.parse_user_config(configname) | 0 | 2023-11-09 22:28:39+00:00 | 8k |
QingruZhang/PASTA | scripts/eval_bias_gen.py | [
{
"identifier": "benchmarks",
"path": "evaluation/benchmarks.py",
"snippet": "DEFAULT_PROMPT_PREFIX = \"The following is an excerpt from a Wikipedia article:\\n\\n\"\nDEFAULT_PROMPT_TEMPLATE = \"{} is\"\nDEFAULT_MAX_LENGTH = 100\nDEFAULT_MAX_LENGTH_ERROR_CORRECTION = 150\nDEFAULT_TOP_K = 3\nDEFAULT_N_TO... | import argparse
import json
import logging
import torch
from pathlib import Path
from evaluation import benchmarks, data, models, precompute
from evaluation.utils import experiment_utils, logging_utils
from pastalib import pasta
from torch.utils.tensorboard import SummaryWriter | 6,056 | """Evaluate editor effects on generation for bias setting."""
logger = logging.getLogger(__name__)
def main(args: argparse.Namespace) -> None:
"""Run the evaluation for BiasBios prediction task."""
experiment = experiment_utils.setup_experiment(args)
logging_utils.configure(args=args)
data.disable_caching()
if args.debug:
experiment_utils.set_ipdb_trace()
# Load the model and tokenizer
device = args.device or "cuda" if torch.cuda.is_available() else "cpu"
| """Evaluate editor effects on generation for bias setting."""
logger = logging.getLogger(__name__)
def main(args: argparse.Namespace) -> None:
"""Run the evaluation for BiasBios prediction task."""
experiment = experiment_utils.setup_experiment(args)
logging_utils.configure(args=args)
data.disable_caching()
if args.debug:
experiment_utils.set_ipdb_trace()
# Load the model and tokenizer
device = args.device or "cuda" if torch.cuda.is_available() else "cpu" | mt = models.load_model(args.model, device=device, fp16=args.fp16) | 2 | 2023-11-06 05:36:05+00:00 | 8k |
MrXandbadas/MrX_OAI_Assistant_Manager | assistant_manager/runs_manager.py | [
{
"identifier": "dynamic_functions",
"path": "assistant_manager/functions/dynamic/dynamic_functions.py",
"snippet": "def get_arxiv_papers(query: str, max_results: int = 5, sort_by: str = 'relevance', sort_order: str = 'descending'):\ndef get_weather_forecast(latitude: float, longitude: float, current_we... | import inspect
import json
import time
from assistant_manager.functions.dynamic import dynamic_functions
from assistant_manager.utils import file_operations, special_functions
from assistant_manager.utils.special_functions import append_new_tool_function_and_metadata
from assistant_manager.interface_base import InterfaceBase
from assistant_manager.a_m_threads import OAI_Threads | 4,568 | timeout: Override the client-level default timeout for this request, in seconds
"""
return self.client.threads.runs.retrieve(
thread_id=thread_id,
run_id=run_id,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout
)
def update_run(self, thread_id, run_id, metadata=None, extra_headers=None, extra_query=None, extra_body=None, timeout=None):
"""
Modifies a run.
Args:
thread_id: The ID of the thread the run belongs to.
run_id: The ID of the run to update.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self.client.threads.runs.update(
thread_id=thread_id,
run_id=run_id,
metadata=metadata,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout
)
def list_runs(self, thread_id, limit=20, order="desc", after=None, before=None, extra_headers=None, extra_query=None, extra_body=None, timeout=None):
"""
Returns a list of runs belonging to a thread.
Args:
thread_id: The ID of the thread to list runs from.
limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.
after: A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self.client.threads.runs.list(
thread_id=thread_id,
limit=limit,
order=order,
after=after,
before=before,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout
)
def cancel_run(self, thread_id, run_id, extra_headers=None, extra_query=None, extra_body=None, timeout=None):
"""
Cancels a run.
Args:
thread_id: The ID of the thread the run belongs to.
run_id: The ID of the run to cancel.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self.client.threads.runs.cancel(
thread_id=thread_id,
run_id=run_id,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout
)
def process_run(self,thread_id, run_id):
while True:
run = self.retrieve_run(thread_id, run_id)
print(run.status)
if run.status == "completed":
message_list = self.list_messages(thread_id)
for message in message_list.data:
if message.id in self.chat_ids:
continue
else:
print(f'assistant: {message.content[0].text.value}')
self.chat_ids.append(message.id)
return message.content[0].text.value
break
elif run.status == "requires_action":
print("The run requires action.")
required_actions_json = run.required_action.submit_tool_outputs.model_dump_json(indent=4)
print(f"Required Actions: {required_actions_json}")
required_actions = json.loads(required_actions_json)
tools_output = []
for action in required_actions["tool_calls"]:
if action["function"]["name"] == "append_new_tool_function_and_metadata":
arguments = json.loads(action["function"]["arguments"])
# get the function name
function_name = arguments["function_name"]
# get the function code
function_code = arguments["function_code"]
# get the metadata dict
function_metadata = arguments["metadata_dict"]
function_meta_description = arguments["tool_meta_description"]
#Check if we need to json.loads the metadata
if isinstance(function_metadata, str):
function_metadata = json.loads(arguments["metadata_dict"])
#print(f"Function name: {function_name}")
self.logger.debug(f"Function code: {function_code}")
#print(f"Function metadata: {function_metadata}")
# append the function and metadata to the current assistant
| #oai base
class Run_Manager(OAI_Threads):
def __init__(self, api_key, organization, timeout, log_level) -> None:
super().__init__(api_key, organization, timeout, log_level)
def create_run(self, thread_id, assistant_id, model=None, instructions=None, tools=None, metadata=None, extra_headers=None, extra_query=None, extra_body=None, timeout=None):
"""
Create a run.
Args:
thread_id: The ID of the thread to create a run in.
assistant_id: The ID of the assistant to use to execute this run.
model: The ID of the Model to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.
instructions: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis.
tools: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self.client.threads.runs.create(
thread_id=thread_id,
assistant_id=assistant_id,
model=model,
instructions=instructions,
tools=tools,
metadata=metadata,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout
)
def retrieve_run(self, thread_id, run_id, extra_headers=None, extra_query=None, extra_body=None, timeout=None):
"""
Retrieves a run.
Args:
thread_id: The ID of the thread the run belongs to.
run_id: The ID of the run to retrieve.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self.client.threads.runs.retrieve(
thread_id=thread_id,
run_id=run_id,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout
)
def update_run(self, thread_id, run_id, metadata=None, extra_headers=None, extra_query=None, extra_body=None, timeout=None):
"""
Modifies a run.
Args:
thread_id: The ID of the thread the run belongs to.
run_id: The ID of the run to update.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self.client.threads.runs.update(
thread_id=thread_id,
run_id=run_id,
metadata=metadata,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout
)
def list_runs(self, thread_id, limit=20, order="desc", after=None, before=None, extra_headers=None, extra_query=None, extra_body=None, timeout=None):
"""
Returns a list of runs belonging to a thread.
Args:
thread_id: The ID of the thread to list runs from.
limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.
order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order.
after: A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list.
before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self.client.threads.runs.list(
thread_id=thread_id,
limit=limit,
order=order,
after=after,
before=before,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout
)
def cancel_run(self, thread_id, run_id, extra_headers=None, extra_query=None, extra_body=None, timeout=None):
"""
Cancels a run.
Args:
thread_id: The ID of the thread the run belongs to.
run_id: The ID of the run to cancel.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self.client.threads.runs.cancel(
thread_id=thread_id,
run_id=run_id,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout
)
def process_run(self,thread_id, run_id):
while True:
run = self.retrieve_run(thread_id, run_id)
print(run.status)
if run.status == "completed":
message_list = self.list_messages(thread_id)
for message in message_list.data:
if message.id in self.chat_ids:
continue
else:
print(f'assistant: {message.content[0].text.value}')
self.chat_ids.append(message.id)
return message.content[0].text.value
break
elif run.status == "requires_action":
print("The run requires action.")
required_actions_json = run.required_action.submit_tool_outputs.model_dump_json(indent=4)
print(f"Required Actions: {required_actions_json}")
required_actions = json.loads(required_actions_json)
tools_output = []
for action in required_actions["tool_calls"]:
if action["function"]["name"] == "append_new_tool_function_and_metadata":
arguments = json.loads(action["function"]["arguments"])
# get the function name
function_name = arguments["function_name"]
# get the function code
function_code = arguments["function_code"]
# get the metadata dict
function_metadata = arguments["metadata_dict"]
function_meta_description = arguments["tool_meta_description"]
#Check if we need to json.loads the metadata
if isinstance(function_metadata, str):
function_metadata = json.loads(arguments["metadata_dict"])
#print(f"Function name: {function_name}")
self.logger.debug(f"Function code: {function_code}")
#print(f"Function metadata: {function_metadata}")
# append the function and metadata to the current assistant | function_output = append_new_tool_function_and_metadata(function_name, function_code, function_metadata, function_meta_description) | 3 | 2023-11-07 03:42:04+00:00 | 8k |
bigai-nlco/langsuite | langsuite/envs/iqa/iqa_task.py | [
{
"identifier": "Iqa2DEnv",
"path": "langsuite/envs/iqa/iqa_env.py",
"snippet": "class Iqa2DEnv(LangSuiteEnv):\n \"\"\"Iqa environment class\n\n This class provides functions to:\n - Load scenes, agents.\n - Apply agent actions and perform simulation steps.\n\n Args:\n conf... | import json
import random
import re
from copy import deepcopy
from pathlib import Path
from langsuite.envs.iqa import Iqa2DEnv
from langsuite.task import TASK_REGISTRY, BaseTask, TaskRunner
from langsuite.utils.logging import logger
from langsuite.utils.template_builder import TemplateBuilder | 6,681 | # Copyright (c) BIGAI Research. All rights reserved.
# Licensed under the MIT license.
from __future__ import annotations
__all__ = ["IqaTask"]
IqaPath = Path(__file__).parent.parent.parent.parent
def load_data(data_dir):
"""
Load IQA (IQA: Visual Question Answering in Interactive Environments) data from a specified directory.
Args:
data_dir (str): The directory containing IQA data files.
Returns:
list: A list of task data dictionaries, each containing world and question-answer pairs.
"""
iqa_data = json.load(open(Path(data_dir, "data", "iqa", "iqa_test", "iqa_test_1k.json")))
# iqa_data = json.load(open(Path(data_dir, "data", "iqa", "iqa_list_qa_counts_300.json")))
task_data = []
for _id, world_data in enumerate(iqa_data):
task_data.append(
dict(
name=f"Iqa:Iqa2DEnv:{_id}",
data=dict(world_data=world_data[0]),
task_definition="",
inputs=[],
targets=[],
qa=world_data[1],
)
)
return task_data
def success_or_not(info, gold_answer="True"):
"""
Check if the inferred answer matches the expected answer.
Args:
info: inferred answer to be checked.
gold_answer (str): The expected answer. Default is "True".
Returns:
bool: True if the inferred answer matches the expected answer, False otherwise.
"""
answer = extract_content(info)
if answer is None:
return False
if str(answer).lower() == str(gold_answer).lower():
return answer
return False
@TASK_REGISTRY.register(name="IqaTask:Iqa2DEnv")
class IqaTask(BaseTask):
"""IQA task class
This class provides functions to:
- Load environment, agents, question-answer pair.
"""
def __init__(self, *, env, template, name, **kwargs) -> None:
super().__init__(env=env, template=template, name=name, **kwargs)
self._is_successful: bool = False
self.success_criterions = [success_or_not]
self.stop_criterions = [lambda _: self._timesteps >= 100]
@classmethod
def create(cls, task_cfg, task_data=None):
if not task_data:
task_data = random.choice(load_data(IqaPath))
env = Iqa2DEnv.create(task_cfg["env"])
world_confg = deepcopy(task_cfg["world"])
if "world_data" in task_data.get("data"):
world_confg.update({"data": task_data["data"]["world_data"]})
env.create_world(world_confg)
env.set_feedback_builder(TemplateBuilder(task_cfg["template"]))
env.question_type = task_cfg["question_type"]
env.question = task_data["qa"][env.question_type]["question"]
env.answer = task_data["qa"][env.question_type]["answer"]
env.question_info["object_class"] = task_data["qa"][env.question_type][
"object_class"
]
if "recept" in task_data["qa"][env.question_type]:
env.question_info["recept"] = task_data["qa"][env.question_type]["recept"]
for agent in task_cfg["agents"]:
env.add_agent(agent)
task = cls(
env=env,
template=task_cfg["template"],
name=task_cfg.get("name", task_cfg["task"]),
)
return task
def start(self, render=True):
"""Return task introduction at beginning"""
self.env.reset()
if render:
# broadcast to all agents
for _, agent in self.env.agents.items():
self._task_guidance = self._feedback_builder.build(
"intro",
degree=agent.view_degree,
max_manipulation_steps=agent.max_manipulate_distance,
max_view_steps=agent.max_view_distance,
)
| # Copyright (c) BIGAI Research. All rights reserved.
# Licensed under the MIT license.
from __future__ import annotations
__all__ = ["IqaTask"]
IqaPath = Path(__file__).parent.parent.parent.parent
def load_data(data_dir):
"""
Load IQA (IQA: Visual Question Answering in Interactive Environments) data from a specified directory.
Args:
data_dir (str): The directory containing IQA data files.
Returns:
list: A list of task data dictionaries, each containing world and question-answer pairs.
"""
iqa_data = json.load(open(Path(data_dir, "data", "iqa", "iqa_test", "iqa_test_1k.json")))
# iqa_data = json.load(open(Path(data_dir, "data", "iqa", "iqa_list_qa_counts_300.json")))
task_data = []
for _id, world_data in enumerate(iqa_data):
task_data.append(
dict(
name=f"Iqa:Iqa2DEnv:{_id}",
data=dict(world_data=world_data[0]),
task_definition="",
inputs=[],
targets=[],
qa=world_data[1],
)
)
return task_data
def success_or_not(info, gold_answer="True"):
"""
Check if the inferred answer matches the expected answer.
Args:
info: inferred answer to be checked.
gold_answer (str): The expected answer. Default is "True".
Returns:
bool: True if the inferred answer matches the expected answer, False otherwise.
"""
answer = extract_content(info)
if answer is None:
return False
if str(answer).lower() == str(gold_answer).lower():
return answer
return False
@TASK_REGISTRY.register(name="IqaTask:Iqa2DEnv")
class IqaTask(BaseTask):
"""IQA task class
This class provides functions to:
- Load environment, agents, question-answer pair.
"""
def __init__(self, *, env, template, name, **kwargs) -> None:
super().__init__(env=env, template=template, name=name, **kwargs)
self._is_successful: bool = False
self.success_criterions = [success_or_not]
self.stop_criterions = [lambda _: self._timesteps >= 100]
@classmethod
def create(cls, task_cfg, task_data=None):
if not task_data:
task_data = random.choice(load_data(IqaPath))
env = Iqa2DEnv.create(task_cfg["env"])
world_confg = deepcopy(task_cfg["world"])
if "world_data" in task_data.get("data"):
world_confg.update({"data": task_data["data"]["world_data"]})
env.create_world(world_confg)
env.set_feedback_builder(TemplateBuilder(task_cfg["template"]))
env.question_type = task_cfg["question_type"]
env.question = task_data["qa"][env.question_type]["question"]
env.answer = task_data["qa"][env.question_type]["answer"]
env.question_info["object_class"] = task_data["qa"][env.question_type][
"object_class"
]
if "recept" in task_data["qa"][env.question_type]:
env.question_info["recept"] = task_data["qa"][env.question_type]["recept"]
for agent in task_cfg["agents"]:
env.add_agent(agent)
task = cls(
env=env,
template=task_cfg["template"],
name=task_cfg.get("name", task_cfg["task"]),
)
return task
def start(self, render=True):
"""Return task introduction at beginning"""
self.env.reset()
if render:
# broadcast to all agents
for _, agent in self.env.agents.items():
self._task_guidance = self._feedback_builder.build(
"intro",
degree=agent.view_degree,
max_manipulation_steps=agent.max_manipulate_distance,
max_view_steps=agent.max_view_distance,
) | logger.emit({"role": "system", "content": self.task_guidance}) | 4 | 2023-11-01 01:47:00+00:00 | 8k |
radekd91/inferno | inferno/models/video_emorec/VideoEmotionClassifier.py | [
{
"identifier": "TemporalFeatureEncoder",
"path": "inferno/models/temporal/Bases.py",
"snippet": "class TemporalFeatureEncoder(torch.nn.Module): \n\n def __init__(self):\n super().__init__() \n\n def forward(self, sample, train=False, desired_output_length=None, **kwargs): \n raise N... | import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Any, Optional, Dict, List
from inferno.models.temporal.Bases import TemporalFeatureEncoder, SequenceClassificationEncoder, Preprocessor, ClassificationHead
from inferno.models.temporal.AudioEncoders import Wav2Vec2Encoder
from inferno.models.temporal.SequenceModels import *
from pathlib import Path
from inferno.utils.other import get_path_to_assets
from omegaconf import OmegaConf
from inferno.models.EmoSwinModule import EmoSwinModule | 5,389 | return total_loss, losses, metrics
# def _compute_loss(self, sample, training, validation, loss_name, loss_cfg):
# raise NotImplementedError("Please implement this method in your child class")
def _compute_loss(self, sample, loss_name, loss_cfg):
# TODO: this could be done nicer (have a dict with name - loss functor)
loss_type = loss_name if 'loss_type' not in loss_cfg.keys() else loss_cfg['loss_type']
if "cross_entropy" in loss_type:
label = sample[loss_cfg["output_key"]]
if loss_cfg["output_key"] == "gt_expression_intensity":
label -= 1 # expression intensity is in 1-3 range, but we need 0-2 for cross entropy
loss_value = F.cross_entropy(sample[loss_cfg["input_key"]], label)
else:
raise ValueError(f"Unsupported loss type: '{loss_type}'")
return loss_value
def training_step(self, batch, batch_idx, *args, **kwargs):
training = True
# forward pass
sample = self.forward(batch, train=training, validation=False, **kwargs)
# sample = self.forward(batch, train=training, validation=False, teacher_forcing=False, **kwargs)
# loss
total_loss, losses, metrics = self.compute_loss(sample, training=training, validation=False, **kwargs)
losses_and_metrics_to_log = {**losses, **metrics}
# losses_and_metrics_to_log = {"train_" + k: v.item() for k, v in losses_and_metrics_to_log.items()}
losses_and_metrics_to_log = {"train/" + k: v.item() if isinstance(v, (torch.Tensor)) else v if isinstance(v, float) else 0. for k, v in losses_and_metrics_to_log.items()}
if self.logger is not None:
self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended
# self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=True, sync_dist=True) # log per epoch, # recommended
return total_loss
def validation_step(self, batch, batch_idx, *args, **kwargs):
training = False
# forward pass
sample = self.forward(batch, train=training, validation=True, **kwargs)
# loss
total_loss, losses, metrics = self.compute_loss(sample, training=training, validation=True, **kwargs)
losses_and_metrics_to_log = {**losses, **metrics}
# losses_and_metrics_to_log = {"val_" + k: v.item() for k, v in losses_and_metrics_to_log.items()}
losses_and_metrics_to_log = {"val/" + k: v.item() if isinstance(v, (torch.Tensor)) else v if isinstance(v, float) else 0. for k, v in losses_and_metrics_to_log.items()}
if self.logger is not None:
self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended
# self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=True, sync_dist=True) # log per epoch, # recommended
return total_loss, losses_and_metrics_to_log
def test_step(self, batch, batch_idx, *args, **kwargs):
training = False
# forward pass
sample = self.forward(batch, train=training, teacher_forcing=False, **kwargs)
# loss
total_loss, losses, metrics = self.compute_loss(sample, training, validation=False, **kwargs)
losses_and_metrics_to_log = {**losses, **metrics}
# losses_and_metrics_to_log = {"train_" + k: v.item() for k, v in losses_and_metrics_to_log.items()}
losses_and_metrics_to_log = {"test/" + k: v.item() if isinstance(v, (torch.Tensor,)) else v if isinstance(v, float) else 0. for k, v in losses_and_metrics_to_log.items()}
if self.logger is not None:
self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended
if self.logger is not None:
# self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended
self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=True, sync_dist=True) # log per epoch, # recommended
return total_loss
@classmethod
def instantiate(cls, cfg, stage, prefix, checkpoint, checkpoint_kwargs) -> 'VideoClassifierBase':
"""
Function that instantiates the model from checkpoint or config
"""
if checkpoint is None:
model = VideoClassifierBase(cfg, prefix)
else:
checkpoint_kwargs = checkpoint_kwargs or {}
model = VideoClassifierBase.load_from_checkpoint(
checkpoint_path=checkpoint,
strict=False,
**checkpoint_kwargs)
# if stage == 'train':
# mode = True
# else:
# mode = False
# model.reconfigure(cfg, prefix, downgrade_ok=True, train=mode)
return model
def sequence_encoder_from_cfg(cfg, feature_dim):
if cfg.type == "TransformerSequenceClassifier":
return TransformerSequenceClassifier(cfg, feature_dim)
elif cfg.type == "GRUSequenceClassifier":
return GRUSequenceClassifier(cfg, feature_dim)
else:
raise ValueError(f"Unknown sequence classifier model: {cfg.model}")
def classification_head_from_cfg(cfg, feature_size, num_classes):
if cfg.type == "LinearClassificationHead":
return LinearClassificationHead(cfg, feature_size, num_classes)
elif cfg.type == "MultiheadLinearClassificationHead":
return MultiheadLinearClassificationHead(cfg, feature_size, num_classes)
else:
raise ValueError(f"Unknown classification head model: {cfg.model}")
class EmoSwin(TemporalFeatureEncoder):
def __init__(self, cfg):
super().__init__()
swin_cfg_path = Path(cfg.model_path)
self.trainable = cfg.trainable
if not swin_cfg_path.is_absolute():
| """
Author: Radek Danecek
Copyright (c) 2023, Radek Danecek
All rights reserved.
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2022 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at emote@tue.mpg.de
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
"""
class VideoClassifierBase(pl.LightningModule):
def __init__(self,
cfg,
preprocessor: Optional[Preprocessor] = None,
feature_model: Optional[TemporalFeatureEncoder] = None,
fusion_layer: Optional[nn.Module] = None,
sequence_encoder: Optional[SequenceClassificationEncoder] = None,
classification_head: Optional[ClassificationHead] = None,
) -> None:
super().__init__()
self.cfg = cfg
self.preprocessor = preprocessor
self.feature_model = feature_model
self.fusion_layer = fusion_layer
self.sequence_encoder = sequence_encoder
self.classification_head = classification_head
def get_trainable_parameters(self):
trainable_params = []
if self.feature_model is not None:
trainable_params += self.feature_model.get_trainable_parameters()
if self.sequence_encoder is not None:
trainable_params += self.sequence_encoder.get_trainable_parameters()
if self.classification_head is not None:
trainable_params += self.classification_head.get_trainable_parameters()
return trainable_params
@property
def max_seq_length(self):
return 5000
def configure_optimizers(self):
trainable_params = []
trainable_params += list(self.get_trainable_parameters())
if trainable_params is None or len(trainable_params) == 0:
print("[WARNING] No trainable parameters found.")
return
if self.cfg.learning.optimizer == 'Adam':
opt = torch.optim.Adam(
trainable_params,
lr=self.cfg.learning.learning_rate,
amsgrad=False)
elif self.cfg.learning.optimizer == 'SGD':
opt = torch.optim.SGD(
trainable_params,
lr=self.cfg.learning.learning_rate)
else:
raise ValueError(f"Unsupported optimizer: '{self.cfg.learning.optimizer}'")
optimizers = [opt]
schedulers = []
opt_dict = {}
opt_dict['optimizer'] = opt
if 'learning_rate_patience' in self.cfg.learning.keys():
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(opt,
patience=self.cfg.learning.learning_rate_patience,
factor=self.cfg.learning.learning_rate_decay,
mode=self.cfg.learning.lr_sched_mode)
schedulers += [scheduler]
opt_dict['lr_scheduler'] = scheduler
opt_dict['monitor'] = 'val_loss_total'
elif 'learning_rate_decay' in self.cfg.learning.keys():
scheduler = torch.optim.lr_scheduler.ExponentialLR(opt, gamma=self.cfg.learning.learning_rate_decay)
opt_dict['lr_scheduler'] = scheduler
schedulers += [scheduler]
return opt_dict
@torch.no_grad()
def preprocess_input(self, sample: Dict, train=False, **kwargs: Any) -> Dict:
if self.preprocessor is not None:
if self.device != self.preprocessor.device:
self.preprocessor.to(self.device)
sample = self.preprocessor(sample, input_key="video", train=train, test_time=not train, **kwargs)
# sample = detach_dict(sample)
return sample
def signal_fusion(self, sample: Dict, train=False, **kwargs: Any) -> Dict:
# video_feat = sample["visual_feature"] # b, t, fv
# audio_feat = sample["audio_feature"] # b, t, fa
modality_list = self.cfg.model.get('modality_list', None)
modality_features = [sample[key] for key in modality_list]
if self.cfg.model.fusion_type != "tensor_low_rank":
assert self.fusion_layer is None
if self.cfg.model.fusion_type in ["concat", "cat", "concatenate"]:
fused_feature = torch.cat(modality_features, dim=2) # b, t, fv + fa
elif self.cfg.model.fusion_type in ["add", "sum"]:
# stack the tensors and then sum them up
fused_feature = torch.cat(modality_features, dim=0)
fused_feature = fused_feature.sum(dim=0)
elif self.cfg.model.fusion_type in ["max"]:
fused_feature = torch.stack(modality_features, dim=0).max(dim=0)
elif self.cfg.model.fusion_type in ["tensor"]:
for fi, feat in enumerate(modality_features):
modality_features[fi] = torch.cat([feat, torch.ones(*feat.shape[:-1], 1, device=feat.device)], dim=-1)
if len(modality_features) == 1:
raise ValueError(f"Unsupported fusion type {self.cfg.model.fusion_type} for {len(modality_features)}")
elif len(modality_features) == 2:
# concatenate one to each feature
fused_feature = torch.einsum("bti,btj->btij", modality_features[0], modality_features[1])
fused_feature = fused_feature.view(fused_feature.shape[0], fused_feature.shape[1], -1)
elif len(modality_features) == 3:
fusion_cfg = self.cfg.model.get("fusion_cfg", None)
n_modal = fusion_cfg.get('num_rank', len(modality_features))
if n_modal == 2:
# outer product along the last dimensions
fused_01 = torch.einsum("bti,btj->btij", modality_features[0], modality_features[1])
fused_12 = torch.einsum("bti,btj->btij", modality_features[1], modality_features[2])
fused_20 = torch.einsum("bti,btj->btij", modality_features[2], modality_features[0])
fused_feature = torch.stack([fused_01, fused_12, fused_20], dim=-1)
fused_feature = fused_feature.view(fused_feature.shape[0], fused_feature.shape[1], -1)
elif n_modal == 3:
# outer product along the last dimensions
fused_01 = torch.einsum("bti,btj->btij", modality_features[0], modality_features[1])
fused_012 = torch.einsum("btij,btk->btijk", fused_12, modality_features[2])
fused_feature = fused_012.view(fused_012.shape[0], fused_012.shape[1], -1)
else:
raise ValueError(f"Unsupported fusion type {self.cfg.model.fusion_type} for {len(modality_features)} modalities and {n_modal} ranks")
else:
raise ValueError(f"Unsupported fusion type {self.cfg.model.fusion_type} for {len(modality_features)} modalities")
elif self.cfg.model.fusion_type in ["tensor_low_rank"]:
fused_feature = self.fusion_layer(modality_features)
else:
raise ValueError(f"Unknown fusion type {self.fusion_type}")
sample["hidden_feature"] = fused_feature
# if self.post_fusion_projection is not None:
# sample["fused_feature"] = self.post_fusion_projection(sample["fused_feature"])
# if self.post_fusion_norm is not None:
# sample["fused_feature"] = self.post_fusion_norm(sample["fused_feature"])
return sample
def is_multi_modal(self):
modality_list = self.cfg.model.get('modality_list', None)
return modality_list is not None and len(modality_list) > 1
def forward(self, sample: Dict, train=False, validation=False, **kwargs: Any) -> Dict:
"""
sample: Dict[str, torch.Tensor]
- gt_emo_feature: (B, T, F)
"""
# T = sample[input_key].shape[1]
if "gt_emo_feature" in sample:
T = sample['gt_emo_feature'].shape[1]
else:
T = sample['video'].shape[1]
if self.max_seq_length < T: # truncate
print("[WARNING] Truncating audio sequence from {} to {}".format(T, self.max_seq_length))
sample = truncate_sequence_batch(sample, self.max_seq_length)
# preprocess input (for instance get 3D pseudo-GT )
sample = self.preprocess_input(sample, train=train, **kwargs)
check_nan(sample)
if self.feature_model is not None:
sample = self.feature_model(sample, train=train, **kwargs)
check_nan(sample)
else:
input_key = "gt_emo_feature" # TODO: this needs to be redesigned
sample["hidden_feature"] = sample[input_key]
if self.is_multi_modal():
sample = self.signal_fusion(sample, train=train, **kwargs)
if self.sequence_encoder is not None:
sample = self.sequence_encoder(sample) #, train=train, validation=validation, **kwargs)
check_nan(sample)
if self.classification_head is not None:
sample = self.classification_head(sample)
check_nan(sample)
return sample
def compute_loss(self, sample, training, validation):
"""
Compute the loss for the given sample.
"""
losses = {}
metrics = {}
for loss_name, loss_cfg in self.cfg.learning.losses.items():
assert loss_name not in losses.keys()
losses["loss_" + loss_name] = self._compute_loss(sample, loss_name, loss_cfg)
# losses["loss_" + loss_name] = self._compute_loss(sample, training, validation, loss_name, loss_cfg)
for metric_name, metric_cfg in self.cfg.learning.metrics.items():
assert metric_name not in metrics.keys()
with torch.no_grad():
metrics["metric_" + metric_name] = self._compute_loss(sample, metric_name, metric_cfg)
# metrics["metric_" + metric_name] = self._compute_loss(sample, training, validation, metric_name, metric_cfg)
total_loss = None
for loss_name, loss_cfg in self.cfg.learning.losses.items():
term = losses["loss_" + loss_name]
if term is not None:
if isinstance(term, torch.Tensor) and term.isnan().any():
print(f"[WARNING]: loss '{loss_name}' is NaN. Skipping this term.")
continue
if total_loss is None:
total_loss = 0.
weighted_term = (term * loss_cfg["weight"])
total_loss = total_loss + weighted_term
losses["loss_" + loss_name + "_w"] = weighted_term
losses["loss_total"] = total_loss
return total_loss, losses, metrics
# def _compute_loss(self, sample, training, validation, loss_name, loss_cfg):
# raise NotImplementedError("Please implement this method in your child class")
def _compute_loss(self, sample, loss_name, loss_cfg):
# TODO: this could be done nicer (have a dict with name - loss functor)
loss_type = loss_name if 'loss_type' not in loss_cfg.keys() else loss_cfg['loss_type']
if "cross_entropy" in loss_type:
label = sample[loss_cfg["output_key"]]
if loss_cfg["output_key"] == "gt_expression_intensity":
label -= 1 # expression intensity is in 1-3 range, but we need 0-2 for cross entropy
loss_value = F.cross_entropy(sample[loss_cfg["input_key"]], label)
else:
raise ValueError(f"Unsupported loss type: '{loss_type}'")
return loss_value
def training_step(self, batch, batch_idx, *args, **kwargs):
training = True
# forward pass
sample = self.forward(batch, train=training, validation=False, **kwargs)
# sample = self.forward(batch, train=training, validation=False, teacher_forcing=False, **kwargs)
# loss
total_loss, losses, metrics = self.compute_loss(sample, training=training, validation=False, **kwargs)
losses_and_metrics_to_log = {**losses, **metrics}
# losses_and_metrics_to_log = {"train_" + k: v.item() for k, v in losses_and_metrics_to_log.items()}
losses_and_metrics_to_log = {"train/" + k: v.item() if isinstance(v, (torch.Tensor)) else v if isinstance(v, float) else 0. for k, v in losses_and_metrics_to_log.items()}
if self.logger is not None:
self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended
# self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=True, sync_dist=True) # log per epoch, # recommended
return total_loss
def validation_step(self, batch, batch_idx, *args, **kwargs):
training = False
# forward pass
sample = self.forward(batch, train=training, validation=True, **kwargs)
# loss
total_loss, losses, metrics = self.compute_loss(sample, training=training, validation=True, **kwargs)
losses_and_metrics_to_log = {**losses, **metrics}
# losses_and_metrics_to_log = {"val_" + k: v.item() for k, v in losses_and_metrics_to_log.items()}
losses_and_metrics_to_log = {"val/" + k: v.item() if isinstance(v, (torch.Tensor)) else v if isinstance(v, float) else 0. for k, v in losses_and_metrics_to_log.items()}
if self.logger is not None:
self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended
# self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=True, sync_dist=True) # log per epoch, # recommended
return total_loss, losses_and_metrics_to_log
def test_step(self, batch, batch_idx, *args, **kwargs):
training = False
# forward pass
sample = self.forward(batch, train=training, teacher_forcing=False, **kwargs)
# loss
total_loss, losses, metrics = self.compute_loss(sample, training, validation=False, **kwargs)
losses_and_metrics_to_log = {**losses, **metrics}
# losses_and_metrics_to_log = {"train_" + k: v.item() for k, v in losses_and_metrics_to_log.items()}
losses_and_metrics_to_log = {"test/" + k: v.item() if isinstance(v, (torch.Tensor,)) else v if isinstance(v, float) else 0. for k, v in losses_and_metrics_to_log.items()}
if self.logger is not None:
self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended
if self.logger is not None:
# self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended
self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=True, sync_dist=True) # log per epoch, # recommended
return total_loss
@classmethod
def instantiate(cls, cfg, stage, prefix, checkpoint, checkpoint_kwargs) -> 'VideoClassifierBase':
"""
Function that instantiates the model from checkpoint or config
"""
if checkpoint is None:
model = VideoClassifierBase(cfg, prefix)
else:
checkpoint_kwargs = checkpoint_kwargs or {}
model = VideoClassifierBase.load_from_checkpoint(
checkpoint_path=checkpoint,
strict=False,
**checkpoint_kwargs)
# if stage == 'train':
# mode = True
# else:
# mode = False
# model.reconfigure(cfg, prefix, downgrade_ok=True, train=mode)
return model
def sequence_encoder_from_cfg(cfg, feature_dim):
if cfg.type == "TransformerSequenceClassifier":
return TransformerSequenceClassifier(cfg, feature_dim)
elif cfg.type == "GRUSequenceClassifier":
return GRUSequenceClassifier(cfg, feature_dim)
else:
raise ValueError(f"Unknown sequence classifier model: {cfg.model}")
def classification_head_from_cfg(cfg, feature_size, num_classes):
if cfg.type == "LinearClassificationHead":
return LinearClassificationHead(cfg, feature_size, num_classes)
elif cfg.type == "MultiheadLinearClassificationHead":
return MultiheadLinearClassificationHead(cfg, feature_size, num_classes)
else:
raise ValueError(f"Unknown classification head model: {cfg.model}")
class EmoSwin(TemporalFeatureEncoder):
def __init__(self, cfg):
super().__init__()
swin_cfg_path = Path(cfg.model_path)
self.trainable = cfg.trainable
if not swin_cfg_path.is_absolute(): | swin_cfg_path = get_path_to_assets() / "EmotionRecognition" / "image_based_networks" / swin_cfg_path / "cfg.yaml" | 5 | 2023-11-07 20:13:32+00:00 | 8k |
hxz393/ConfigCenterComparer | ui/table_main.py | [
{
"identifier": "COL_INFO",
"path": "config/settings.py",
"snippet": "COL_INFO = {\n \"name\": {\"col\": 0},\n \"group\": {\"col\": 1},\n \"key\": {\"col\": 2},\n \"pro_value\": {\"col\": 3},\n \"pro_time\": {\"col\": 4},\n \"pre_value\": {\"col\": 5},\n \"pre_time\": {\"col\": 6},\... | import logging
from typing import List, Optional, Dict
from PyQt5.QtCore import Qt, QPoint, pyqtSignal
from PyQt5.QtGui import QBrush, QColor, QKeyEvent
from PyQt5.QtWidgets import QTableWidget, QTableWidgetItem, QMenu, QAction, QHeaderView
from config.settings import COL_INFO, COLOR_SKIP, COLOR_CONSISTENCY_FULLY, COLOR_CONSISTENCY_PARTIALLY, COLOR_EMPTY, COLOR_DEFAULT
from lib.log_time import log_time
from ui.action_copy import ActionCopy
from ui.action_save import ActionSave
from ui.action_skip import ActionSkip
from ui.action_unskip import ActionUnskip
from ui.config_manager import ConfigManager
from ui.lang_manager import LangManager | 6,859 | """
此文件定义了 TableMain 类,一个基于 PyQt5 的 QTableWidget 的高级实现。
TableMain 类主要用于显示和管理表格数据,提供了多种扩展功能,包括语言国际化支持、动态配置管理、右键菜单操作等。
该类与多个辅助类(如 LangManager 和 ConfigManager)集成,实现了复杂的功能逻辑。
:author: assassing
:contact: https://github.com/hxz393
:copyright: Copyright 2023, hxz393. 保留所有权利。
"""
logger = logging.getLogger(__name__)
class TableMain(QTableWidget):
"""
主表格类,用于展示和管理数据行。
此类继承自 PyQt5 的 QTableWidget,提供了丰富的数据展示和管理功能。包括但不限于数据的展示、行的颜色标记、右键菜单功能以及快捷键支持。
通过与 LangManager 和 ConfigManager 的集成,支持动态语言切换和配置管理。
:param lang_manager: 用于管理界面语言的 LangManager 实例。
:type lang_manager: LangManager
:param config_manager: 用于管理配置的 ConfigManager 实例。
:type config_manager: ConfigManager
:author: assassing
:contact: https://github.com/hxz393
:copyright: Copyright 2023, hxz393. 保留所有权利。
"""
status_updated = pyqtSignal(str)
filter_updated = pyqtSignal(list)
def __init__(self,
lang_manager: LangManager,
config_manager: ConfigManager):
super().__init__()
self.lang_manager = lang_manager
self.lang_manager.lang_updated.connect(self.update_lang)
self.config_manager = config_manager
# 实例化用到的组件
self.actionCopy = ActionCopy(self.lang_manager, self)
self.actionSave = ActionSave(self.lang_manager, self)
self.actionSkip = ActionSkip(self.lang_manager, self.config_manager, self)
| """
此文件定义了 TableMain 类,一个基于 PyQt5 的 QTableWidget 的高级实现。
TableMain 类主要用于显示和管理表格数据,提供了多种扩展功能,包括语言国际化支持、动态配置管理、右键菜单操作等。
该类与多个辅助类(如 LangManager 和 ConfigManager)集成,实现了复杂的功能逻辑。
:author: assassing
:contact: https://github.com/hxz393
:copyright: Copyright 2023, hxz393. 保留所有权利。
"""
logger = logging.getLogger(__name__)
class TableMain(QTableWidget):
"""
主表格类,用于展示和管理数据行。
此类继承自 PyQt5 的 QTableWidget,提供了丰富的数据展示和管理功能。包括但不限于数据的展示、行的颜色标记、右键菜单功能以及快捷键支持。
通过与 LangManager 和 ConfigManager 的集成,支持动态语言切换和配置管理。
:param lang_manager: 用于管理界面语言的 LangManager 实例。
:type lang_manager: LangManager
:param config_manager: 用于管理配置的 ConfigManager 实例。
:type config_manager: ConfigManager
:author: assassing
:contact: https://github.com/hxz393
:copyright: Copyright 2023, hxz393. 保留所有权利。
"""
status_updated = pyqtSignal(str)
filter_updated = pyqtSignal(list)
def __init__(self,
lang_manager: LangManager,
config_manager: ConfigManager):
super().__init__()
self.lang_manager = lang_manager
self.lang_manager.lang_updated.connect(self.update_lang)
self.config_manager = config_manager
# 实例化用到的组件
self.actionCopy = ActionCopy(self.lang_manager, self)
self.actionSave = ActionSave(self.lang_manager, self)
self.actionSkip = ActionSkip(self.lang_manager, self.config_manager, self) | self.actionUnskip = ActionUnskip(self.lang_manager, self.config_manager, self) | 10 | 2023-11-07 01:02:38+00:00 | 8k |
pytorch-labs/ao | torchao/quantization/quant_api.py | [
{
"identifier": "DynamicallyPerAxisQuantizedLinear",
"path": "torchao/quantization/dynamic_quant.py",
"snippet": "class DynamicallyPerAxisQuantizedLinear(torch.nn.Linear):\n \"\"\"\n This class is a replacement for `torch.nn.Linear`. It implements a\n quantized matmul using int8 dynamic symmetr... | import torch
from .dynamic_quant import (
DynamicallyPerAxisQuantizedLinear,
)
from .subclass import (
QuantizedLinearWeightBase,
Int8DynamicallyQuantizedLinearWeight,
Int8WeightOnlyQuantizedLinearWeight,
Int4WeightOnlyQuantizedLinearWeight,
)
from .weight_only import (
WeightOnlyInt8QuantLinear,
) | 6,024 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Quantization APIs
Generally these APIs can be applied directly to any model
with Linear modules to obtain quantized linear ops. The intended
usage involves applying torch.compile to the model afterwards
both because primitives were designed based on the fusions that
come along with it and because that is how we access the intended quantized
and mixed GEMM kernels
"""
__all__ = [
"apply_weight_only_int8_quant",
"apply_dynamic_quant",
"change_linear_weights_to_int8_dqtensors",
"change_linear_weights_to_int8_woqtensors",
"change_linear_weights_to_int4_woqtensors",
"swap_conv2d_1x1_to_linear"
]
def _replace_with_custom_fn_if_matches_filter(
model, replacement_fn, filter_fn, cur_fqn=""
) -> None:
"""
For each `child` in `model`, replaces it with `replacement_fn(child)`
if `filter_fn(child)` is `True`
"""
if filter_fn(model, cur_fqn[:-1]):
model = replacement_fn(model)
return model
else:
for name, child in model.named_children():
new_child = _replace_with_custom_fn_if_matches_filter(
child, replacement_fn, filter_fn, f"{cur_fqn}{name}."
)
if new_child is not child:
setattr(model, name, new_child)
return model
def _is_linear(mod, *args):
return (
isinstance(mod, torch.nn.Linear) and
hasattr(mod, "weight") and
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Quantization APIs
Generally these APIs can be applied directly to any model
with Linear modules to obtain quantized linear ops. The intended
usage involves applying torch.compile to the model afterwards
both because primitives were designed based on the fusions that
come along with it and because that is how we access the intended quantized
and mixed GEMM kernels
"""
__all__ = [
"apply_weight_only_int8_quant",
"apply_dynamic_quant",
"change_linear_weights_to_int8_dqtensors",
"change_linear_weights_to_int8_woqtensors",
"change_linear_weights_to_int4_woqtensors",
"swap_conv2d_1x1_to_linear"
]
def _replace_with_custom_fn_if_matches_filter(
model, replacement_fn, filter_fn, cur_fqn=""
) -> None:
"""
For each `child` in `model`, replaces it with `replacement_fn(child)`
if `filter_fn(child)` is `True`
"""
if filter_fn(model, cur_fqn[:-1]):
model = replacement_fn(model)
return model
else:
for name, child in model.named_children():
new_child = _replace_with_custom_fn_if_matches_filter(
child, replacement_fn, filter_fn, f"{cur_fqn}{name}."
)
if new_child is not child:
setattr(model, name, new_child)
return model
def _is_linear(mod, *args):
return (
isinstance(mod, torch.nn.Linear) and
hasattr(mod, "weight") and | not isinstance(mod.weight, QuantizedLinearWeightBase) | 1 | 2023-11-03 21:27:36+00:00 | 8k |
google-research/semivl | semivl.py | [
{
"identifier": "get_palette",
"path": "datasets/palettes.py",
"snippet": "def get_palette(dataset):\n if dataset == 'pascal':\n return VOC_PALETTE\n elif dataset == 'cityscapes':\n return CITYSCAPES_PALETTE\n elif dataset == 'coco':\n return COCO_PALETTE\n elif dataset ... | import argparse
import logging
import math
import os
import pprint
import shutil
import uuid
import time
import mmcv
import torch
import torch.backends.cudnn as cudnn
import yaml
from datetime import datetime
from matplotlib import pyplot as plt
from mmseg.core import build_optimizer
from torch import nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from datasets.palettes import get_palette
from experiments import get_git_revision
from model.builder import build_model
from third_party.unimatch.supervised import evaluate
from third_party.unimatch.dataset.semi import SemiDataset
from datasets.classes import CLASSES
from third_party.unimatch.util.ohem import ProbOhemCrossEntropy2d
from third_party.unimatch.util.dist_helper import setup_distributed
from third_party.unimatch.util.utils import count_params, count_training_params, init_log
from utils.gen_code_archive import gen_code_archive
from utils.plot_utils import plot_data
from utils.train_utils import (DictAverageMeter, confidence_weighted_loss,
cutmix_img_, cutmix_mask)
from version import __version__ | 6,608 | # Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def compute_mc_loss(pred, mask, ign):
l_mc = criterion_mc(pred, mask)
if mcc_loss_reduce == 'mean_valid':
l_mc = l_mc.sum() / (ign != 255).sum()
if mcc_loss_reduce == 'mean_all':
l_mc = l_mc.sum() / ign.numel()
return l_mc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--local_rank', default=0, type=int)
parser.add_argument('--port', default=None, type=int)
args = parser.parse_args()
with open(args.config, "r") as fp:
cfg = yaml.load(fp, Loader=yaml.Loader)
labeled_id_path = f'splits/{cfg["dataset"]}/{cfg["split"]}/labeled.txt'
unlabeled_id_path = f'splits/{cfg["dataset"]}/{cfg["split"]}/unlabeled.txt'
logger = init_log('global', logging.INFO)
logger.propagate = 0
mmcv.utils.get_logger('mmcv').setLevel('WARNING')
rank, world_size = setup_distributed(port=args.port)
if cfg['nccl_p2p_disable']:
os.environ["NCCL_P2P_DISABLE"] = str(1)
if rank == 0:
timestr = datetime.now().strftime("%y%m%d-%H%M")
uid = str(uuid.uuid4())[:5]
| # Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def compute_mc_loss(pred, mask, ign):
l_mc = criterion_mc(pred, mask)
if mcc_loss_reduce == 'mean_valid':
l_mc = l_mc.sum() / (ign != 255).sum()
if mcc_loss_reduce == 'mean_all':
l_mc = l_mc.sum() / ign.numel()
return l_mc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--local_rank', default=0, type=int)
parser.add_argument('--port', default=None, type=int)
args = parser.parse_args()
with open(args.config, "r") as fp:
cfg = yaml.load(fp, Loader=yaml.Loader)
labeled_id_path = f'splits/{cfg["dataset"]}/{cfg["split"]}/labeled.txt'
unlabeled_id_path = f'splits/{cfg["dataset"]}/{cfg["split"]}/unlabeled.txt'
logger = init_log('global', logging.INFO)
logger.propagate = 0
mmcv.utils.get_logger('mmcv').setLevel('WARNING')
rank, world_size = setup_distributed(port=args.port)
if cfg['nccl_p2p_disable']:
os.environ["NCCL_P2P_DISABLE"] = str(1)
if rank == 0:
timestr = datetime.now().strftime("%y%m%d-%H%M")
uid = str(uuid.uuid4())[:5] | run_name = f'{timestr}_{cfg["name"]}_v{__version__}_{uid}'.replace('.', '-') | 17 | 2023-11-02 14:49:38+00:00 | 8k |
softwaredoug/searcharray | test/test_solr.py | [
{
"identifier": "parse_min_should_match",
"path": "searcharray/solr.py",
"snippet": "def parse_min_should_match(num_clauses: int, spec: str) -> int:\n \"\"\"Parse Solr's min should match (ie mm) spec.\n\n See this ChatGPT translation of mm code from Solr's Java code for parsing this\n https://c... | import pytest
import pandas as pd
import numpy as np
from typing import List
from test_utils import w_scenarios
from searcharray.solr import parse_min_should_match, edismax
from searcharray.postings import SearchArray | 5,770 | """Tests for solr dsl helpers."""
def test_standard_percentage():
assert parse_min_should_match(10, "50%") == 5
def test_over_100_percentage():
assert parse_min_should_match(10, "150%") == 10
def test_negative_percentage():
assert parse_min_should_match(10, "-50%") == 5
def test_standard_integer():
assert parse_min_should_match(10, "3") == 3
def test_negative_integer():
assert parse_min_should_match(10, "-3") == 7
def test_integer_exceeding_clause_count():
assert parse_min_should_match(10, "15") == 10
def test_conditional_spec_less_than_clause_count():
assert parse_min_should_match(10, "5<70%") == 7
def test_conditional_spec_greater_than_clause_count():
assert parse_min_should_match(10, "15<70%") == 10
def test_complex_conditional_spec():
assert parse_min_should_match(10, "3<50% 5<30%") == 3
def test_invalid_spec_percentage():
with pytest.raises(ValueError):
parse_min_should_match(10, "five%")
def test_invalid_spec_integer():
with pytest.raises(ValueError):
parse_min_should_match(10, "five")
def test_invalid_spec_conditional():
with pytest.raises(ValueError):
parse_min_should_match(10, "5<")
def test_empty_spec():
with pytest.raises(ValueError):
parse_min_should_match(10, "")
def test_complex_conditional_spec_with_percentage():
assert parse_min_should_match(10, "2<2 5<3 7<40%") == 4
def everythings_a_b_tokenizer(text: str) -> List[str]:
"""Split on whitespace and return a list of tokens."""
return ["b"] * len(text.split())
def just_lowercasing_tokenizer(text: str) -> List[str]:
"""Lowercase and return a list of tokens."""
return [text.lower()]
edismax_scenarios = {
"base": {
"frame": {
| """Tests for solr dsl helpers."""
def test_standard_percentage():
assert parse_min_should_match(10, "50%") == 5
def test_over_100_percentage():
assert parse_min_should_match(10, "150%") == 10
def test_negative_percentage():
assert parse_min_should_match(10, "-50%") == 5
def test_standard_integer():
assert parse_min_should_match(10, "3") == 3
def test_negative_integer():
assert parse_min_should_match(10, "-3") == 7
def test_integer_exceeding_clause_count():
assert parse_min_should_match(10, "15") == 10
def test_conditional_spec_less_than_clause_count():
assert parse_min_should_match(10, "5<70%") == 7
def test_conditional_spec_greater_than_clause_count():
assert parse_min_should_match(10, "15<70%") == 10
def test_complex_conditional_spec():
assert parse_min_should_match(10, "3<50% 5<30%") == 3
def test_invalid_spec_percentage():
with pytest.raises(ValueError):
parse_min_should_match(10, "five%")
def test_invalid_spec_integer():
with pytest.raises(ValueError):
parse_min_should_match(10, "five")
def test_invalid_spec_conditional():
with pytest.raises(ValueError):
parse_min_should_match(10, "5<")
def test_empty_spec():
with pytest.raises(ValueError):
parse_min_should_match(10, "")
def test_complex_conditional_spec_with_percentage():
assert parse_min_should_match(10, "2<2 5<3 7<40%") == 4
def everythings_a_b_tokenizer(text: str) -> List[str]:
"""Split on whitespace and return a list of tokens."""
return ["b"] * len(text.split())
def just_lowercasing_tokenizer(text: str) -> List[str]:
"""Lowercase and return a list of tokens."""
return [text.lower()]
edismax_scenarios = {
"base": {
"frame": { | 'title': lambda: SearchArray.index(["foo bar bar baz", "data2", "data3 bar", "bunny funny wunny"]), | 2 | 2023-11-03 13:25:16+00:00 | 8k |
intellerce/controlanimate | scripts/vid2vid.py | [
{
"identifier": "Upscaler",
"path": "modules/upscaler.py",
"snippet": "class Upscaler():\n def __init__(self, scale, use_face_enhancer = True, upscale_first = False):\n model_name = 'RealESRGAN_x4plus_anime_6B' #'RealESRGAN_x4plus_anime_6B'RealESRNet_x4plus\n self.scale = scale\n ... | import os
import json
import time
import datetime
import numpy as np
from PIL import Image
from omegaconf import OmegaConf
from modules.upscaler import Upscaler
from modules.controlanimate_pipeline import ControlAnimatePipeline
from modules.utils import video_to_high_fps, get_fps_frame_count_width_height, FFMPEGProcessor
from modules.utils import match_colors | 4,657 | def vid2vid(
config_path
):
"""
This function converts an input video into an output video based on the
parameters provided in the config file.
PARAMS:
config_path: str -> Path to the config file.
"""
date_time = datetime.datetime.now()
date_time = date_time.strftime("%Y%m%d_%H%M%S_%f")
print(date_time)
config = OmegaConf.load(config_path)
has_input_video = (config.input_video_path != "")
total_frames = 0
if not has_input_video:
total_frames = int(config.total_frames)
save_frames = bool(config.save_frames)
upscaler = None
upscale = float(config.upscale)
use_face_enhancer = bool(config.use_face_enhancer)
##################################################
# Figuring out the number of frames to be processed
start_time = config.start_time.strip()
end_time = config.end_time.strip()
x = time.strptime(start_time,'%H:%M:%S')
x_seconds = datetime.timedelta(hours=x.tm_hour,minutes=x.tm_min,seconds=x.tm_sec).total_seconds()
y = time.strptime(end_time,'%H:%M:%S')
y_seconds = datetime.timedelta(hours=y.tm_hour,minutes=y.tm_min,seconds=y.tm_sec).total_seconds()
if has_input_video:
input_fps, input_frame_count, width, height = get_fps_frame_count_width_height(config.input_video_path)
input_duration = input_frame_count/input_fps
output_duration = min(input_duration, y_seconds - x_seconds)
intermediate_frame_count = config.fps * output_duration
print("Frames to be processed:", intermediate_frame_count)
if config.width != 0: width = config.width
if config.height != 0: height = config.height
width_64 = width - width%64
height_64 = height - height%64
config.W = width_64
config.H = height_64
###################################################
if start_time == "": start_time = "00:00:00"
if end_time == "00:00:00": end_time = ""
cmd_time_string = (f"-ss {start_time}" + f" -to {end_time}" if len(end_time) else "")
if has_input_video:
input_file_path = os.path.normpath(config.input_video_path.strip())
ffmpeg_decoder = FFMPEGProcessor(
" ".join(
[
str(config.ffmpeg_path) + " -y -loglevel error",
f'{cmd_time_string} -i "{input_file_path}"',
"-vf eq=brightness=0.06:saturation=4",
f"-s:v {width_64}x{height_64} -r {config.fps}",
"-f image2pipe -pix_fmt rgb24",
"-vcodec rawvideo -",
]
),
std_out=True,
)
output_file_name = f"Video_{os.path.basename(config.input_video_path).split('.')[0]}_{date_time}.mp4"
if not os.path.exists(config.output_video_dir):
os.makedirs(config.output_video_dir)
assert upscale >= 1, "Upscale factor should be greater than or equal to one."
width_64_out = int(upscale * width_64)
height_64_out = int(upscale * height_64)
ffmpeg_encoder = FFMPEGProcessor(
" ".join(
[
str(config.ffmpeg_path) + " -y -loglevel error",
"-f rawvideo -pix_fmt rgb24",
"-vcodec rawvideo",
f"-s:v {width_64_out}x{height_64_out}",
f"-r {config.fps}",
"-i - -c:v libx264 -preset fast",
f'-crf {config.crf} "{config.output_video_dir}/{output_file_name}"',
]
),
std_in=True,
)
read_byte_count = width_64 * height_64 * 3
frame_count = 1
in_frame_count = 1
raw_image = []
if has_input_video:
raw_image = ffmpeg_decoder.read(read_byte_count)
if config.seed == -1:
config.seed = np.random.randint(1,2**16)
print(">>>> SEED:", config.seed)
| ##############################################
# INTELLERCE LLC - Oct. - Nov. 2023
# This codebase is designed and written for research, test and demo purposes only
# and is not recommended for production purposes.
# The FFMPEG stream ecoding/decoding was ispired from:
# https://github.com/Filarius/video2video
# This code will work only when the repo's root is added to the PYTHONPATH.
# export PYTHONPATH=$PYTHONPATH:"./"
##############################################
# from typing import Any, Callable, Dict, List, Optional, Union # TODO
####################################################################
# The following is the main function of this program
def vid2vid(
config_path
):
"""
This function converts an input video into an output video based on the
parameters provided in the config file.
PARAMS:
config_path: str -> Path to the config file.
"""
date_time = datetime.datetime.now()
date_time = date_time.strftime("%Y%m%d_%H%M%S_%f")
print(date_time)
config = OmegaConf.load(config_path)
has_input_video = (config.input_video_path != "")
total_frames = 0
if not has_input_video:
total_frames = int(config.total_frames)
save_frames = bool(config.save_frames)
upscaler = None
upscale = float(config.upscale)
use_face_enhancer = bool(config.use_face_enhancer)
##################################################
# Figuring out the number of frames to be processed
start_time = config.start_time.strip()
end_time = config.end_time.strip()
x = time.strptime(start_time,'%H:%M:%S')
x_seconds = datetime.timedelta(hours=x.tm_hour,minutes=x.tm_min,seconds=x.tm_sec).total_seconds()
y = time.strptime(end_time,'%H:%M:%S')
y_seconds = datetime.timedelta(hours=y.tm_hour,minutes=y.tm_min,seconds=y.tm_sec).total_seconds()
if has_input_video:
input_fps, input_frame_count, width, height = get_fps_frame_count_width_height(config.input_video_path)
input_duration = input_frame_count/input_fps
output_duration = min(input_duration, y_seconds - x_seconds)
intermediate_frame_count = config.fps * output_duration
print("Frames to be processed:", intermediate_frame_count)
if config.width != 0: width = config.width
if config.height != 0: height = config.height
width_64 = width - width%64
height_64 = height - height%64
config.W = width_64
config.H = height_64
###################################################
if start_time == "": start_time = "00:00:00"
if end_time == "00:00:00": end_time = ""
cmd_time_string = (f"-ss {start_time}" + f" -to {end_time}" if len(end_time) else "")
if has_input_video:
input_file_path = os.path.normpath(config.input_video_path.strip())
ffmpeg_decoder = FFMPEGProcessor(
" ".join(
[
str(config.ffmpeg_path) + " -y -loglevel error",
f'{cmd_time_string} -i "{input_file_path}"',
"-vf eq=brightness=0.06:saturation=4",
f"-s:v {width_64}x{height_64} -r {config.fps}",
"-f image2pipe -pix_fmt rgb24",
"-vcodec rawvideo -",
]
),
std_out=True,
)
output_file_name = f"Video_{os.path.basename(config.input_video_path).split('.')[0]}_{date_time}.mp4"
if not os.path.exists(config.output_video_dir):
os.makedirs(config.output_video_dir)
assert upscale >= 1, "Upscale factor should be greater than or equal to one."
width_64_out = int(upscale * width_64)
height_64_out = int(upscale * height_64)
ffmpeg_encoder = FFMPEGProcessor(
" ".join(
[
str(config.ffmpeg_path) + " -y -loglevel error",
"-f rawvideo -pix_fmt rgb24",
"-vcodec rawvideo",
f"-s:v {width_64_out}x{height_64_out}",
f"-r {config.fps}",
"-i - -c:v libx264 -preset fast",
f'-crf {config.crf} "{config.output_video_dir}/{output_file_name}"',
]
),
std_in=True,
)
read_byte_count = width_64 * height_64 * 3
frame_count = 1
in_frame_count = 1
raw_image = []
if has_input_video:
raw_image = ffmpeg_decoder.read(read_byte_count)
if config.seed == -1:
config.seed = np.random.randint(1,2**16)
print(">>>> SEED:", config.seed)
| animate_pipeline = ControlAnimatePipeline(config) | 1 | 2023-11-04 01:35:44+00:00 | 8k |
Zaczero/openstreetmap-ng | src/models/db/element.py | [
{
"identifier": "updating_cached_property",
"path": "src/lib/updating_cached_property.py",
"snippet": "class updating_cached_property: # noqa: N801\n \"\"\"\n A decorator to cache the result of a property with an auto-update condition.\n\n If watch_field changes, the property is re-evaluated.\... | from collections.abc import Sequence
from datetime import datetime
from shapely import Point
from sqlalchemy import BigInteger, Boolean, DateTime, Enum, ForeignKey, UniqueConstraint
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import Mapped, mapped_column, relationship, validates
from src.lib.updating_cached_property import updating_cached_property
from src.models.db.base import Base
from src.models.db.changeset import Changeset
from src.models.db.created_at_mixin import CreatedAtMixin
from src.models.db.user import User
from src.models.element_member import ElementMemberRef
from src.models.element_member_type import ElementMemberRefType
from src.models.element_type import ElementType
from src.models.geometry_type import PointType
from src.models.typed_element_ref import TypedElementRef
from src.models.versioned_element_ref import VersionedElementRef | 5,281 |
class Element(Base.Sequential, CreatedAtMixin):
__tablename__ = 'element'
user_id: Mapped[int] = mapped_column(ForeignKey(User.id), nullable=False)
user: Mapped[User] = relationship(lazy='raise')
changeset_id: Mapped[int] = mapped_column(ForeignKey(Changeset.id), nullable=False)
changeset: Mapped[Changeset] = relationship(back_populates='elements', lazy='raise')
type: Mapped[ElementType] = mapped_column(Enum(ElementType), nullable=False)
typed_id: Mapped[int] = mapped_column(BigInteger, nullable=False)
version: Mapped[int] = mapped_column(BigInteger, nullable=False)
visible: Mapped[bool] = mapped_column(Boolean, nullable=False)
tags: Mapped[dict[str, str]] = mapped_column(JSONB, nullable=False)
point: Mapped[Point | None] = mapped_column(PointType, nullable=True)
members: Mapped[list[ElementMemberRef]] = mapped_column(ElementMemberRefType, nullable=False)
# defaults
superseded_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True, default=None)
__table_args__ = (UniqueConstraint(type, typed_id, version),)
@validates('typed_id')
def validate_typed_id(self, _: str, value: int):
if value <= 0:
raise ValueError('typed_id must be positive')
return value
@validates('members')
def validate_members(self, _: str, value: Sequence[ElementMemberRef]):
if any(member.typed_id <= 0 for member in value):
raise ValueError('members typed_id must be positive')
return value
@updating_cached_property('typed_id')
|
class Element(Base.Sequential, CreatedAtMixin):
__tablename__ = 'element'
user_id: Mapped[int] = mapped_column(ForeignKey(User.id), nullable=False)
user: Mapped[User] = relationship(lazy='raise')
changeset_id: Mapped[int] = mapped_column(ForeignKey(Changeset.id), nullable=False)
changeset: Mapped[Changeset] = relationship(back_populates='elements', lazy='raise')
type: Mapped[ElementType] = mapped_column(Enum(ElementType), nullable=False)
typed_id: Mapped[int] = mapped_column(BigInteger, nullable=False)
version: Mapped[int] = mapped_column(BigInteger, nullable=False)
visible: Mapped[bool] = mapped_column(Boolean, nullable=False)
tags: Mapped[dict[str, str]] = mapped_column(JSONB, nullable=False)
point: Mapped[Point | None] = mapped_column(PointType, nullable=True)
members: Mapped[list[ElementMemberRef]] = mapped_column(ElementMemberRefType, nullable=False)
# defaults
superseded_at: Mapped[datetime | None] = mapped_column(DateTime, nullable=True, default=None)
__table_args__ = (UniqueConstraint(type, typed_id, version),)
@validates('typed_id')
def validate_typed_id(self, _: str, value: int):
if value <= 0:
raise ValueError('typed_id must be positive')
return value
@validates('members')
def validate_members(self, _: str, value: Sequence[ElementMemberRef]):
if any(member.typed_id <= 0 for member in value):
raise ValueError('members typed_id must be positive')
return value
@updating_cached_property('typed_id') | def typed_ref(self) -> TypedElementRef: | 9 | 2023-11-04 01:12:13+00:00 | 8k |
codefuse-ai/Collinear-Constrained-Attention | tokenizer/tokenizer.py | [
{
"identifier": "GPT2Tokenizer",
"path": "tokenizer/gpt2_tokenization.py",
"snippet": "class GPT2Tokenizer(object):\n \"\"\"\n GPT-2 BPE tokenizer. Peculiarities:\n - Byte-level BPE\n \"\"\"\n\n @classmethod\n def from_pretrained(\n cls, pretrained_model_name_or_path, cache_... | from abc import ABC
from abc import abstractmethod
from tokenizers import Tokenizer
from transformers import GPT2Tokenizer, GPT2TokenizerFast
from typing import List, Union
from .gpt2_tokenization import GPT2Tokenizer
from utils.common_utils import print_rank_0, is_old_version
from model.glm.tokenization_glm import GLMTokenizer
from model.glm.tokenization_glm_deprecated import GLMChineseTokenizer
import numpy as np
import sentencepiece as spm
import tiktoken | 4,154 | )
tokenizer = HFGPT2Tokenizer(args.vocab_file)
elif args.tokenizer_type.lower() == "CharLevelTokenizer".lower():
tokenizer = CharLevelTokenizer(vocab_size=512)
elif args.tokenizer_type.lower() == "TiktokenTokenizer".lower():
assert args.vocab_file is not None
tokenizer = TiktokenTokenizer(args.vocab_file)
elif args.tokenizer_type.lower() == "GLMTokenizer".lower():
if is_old_version(args.pretrained_model_path):
print('is an old version')
args.glm_mask = '[sMASK]'
old_version_tokenizer = True
tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path, trust_remote_code=True)
else:
print('is not an old version')
old_version_tokenizer = False
tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path, trust_remote_code=True)
else:
raise NotImplementedError(
"{} tokenizer is not " "implemented.".format(args.tokenizer_type)
)
# Add vocab size.
args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size, args)
return tokenizer
def _vocab_size_with_padding(orig_vocab_size, args):
"""Pad vocab size so it is divisible by model parallel size and
still having GPU friendly size."""
after = orig_vocab_size
multiple = args.make_vocab_size_divisible_by * args.model_parallel_size
while (after % multiple) != 0:
after += 1
print_rank_0(
" > padded vocab (size: {}) with {} dummy tokens "
"(new size: {})".format(orig_vocab_size, after - orig_vocab_size, after)
)
# if args.rank == 0:
# print(
# " > padded vocab (size: {}) with {} dummy tokens "
# "(new size: {})".format(orig_vocab_size, after - orig_vocab_size, after),
# flush=True,
# )
return after
class AbstractTokenizer(ABC):
"""Abstract class for tokenizer."""
def __init__(self, name):
self.name = name
super().__init__()
@property
@abstractmethod
def vocab_size(self):
pass
@property
@abstractmethod
def vocab(self):
"""Dictionary from vocab text token to id token."""
pass
@property
@abstractmethod
def inv_vocab(self):
"""Dictionary from vocab id token to text token."""
pass
@abstractmethod
def tokenize(self, text):
pass
def detokenize(self, token_ids):
raise NotImplementedError(
"detokenizer is not implemented for {} " "tokenizer".format(self.name)
)
@property
def cls(self):
raise NotImplementedError(
"CLS is not provided for {} " "tokenizer".format(self.name)
)
@property
def sep(self):
raise NotImplementedError(
"SEP is not provided for {} " "tokenizer".format(self.name)
)
@property
def pad(self):
raise NotImplementedError(
"PAD is not provided for {} " "tokenizer".format(self.name)
)
@property
def eod(self):
raise NotImplementedError(
"EOD is not provided for {} " "tokenizer".format(self.name)
)
@property
def mask(self):
raise NotImplementedError(
"MASK is not provided for {} " "tokenizer".format(self.name)
)
class _GPT2BPETokenizer(AbstractTokenizer):
"""Original GPT2 BPE tokenizer."""
def __init__(self, vocab_file, merge_file):
name = "GPT2 BPE"
super().__init__(name)
| # Copyright (c) 2021, EleutherAI
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron tokenizers."""
def build_tokenizer(args):
"""Initialize tokenizer."""
print_rank_0("> building {} tokenizer ...".format(args.tokenizer_type))
# if args.rank == 0:
# print("> building {} tokenizer ...".format(args.tokenizer_type), flush=True)
# Select and instantiate the tokenizer.
if args.tokenizer_type.lower() == "GPT2BPETokenizer".lower():
assert args.vocab_file is not None
assert args.merge_file is not None
tokenizer = _GPT2BPETokenizer(args.vocab_file, args.merge_file)
elif args.tokenizer_type.lower() == "SPMTokenizer".lower():
assert args.vocab_file is not None
tokenizer = SentencePieceTokenizer(args.vocab_file)
elif args.tokenizer_type.lower() == "HFTokenizer".lower():
assert args.vocab_file is not None
tokenizer = HFTokenizer(args.vocab_file)
elif args.tokenizer_type.lower() == "HFGPT2Tokenizer".lower():
if args.vocab_file is None:
print(
"WARNING: No vocab file found, loading Huggingface's pretrained GPT2Tokenizer"
)
tokenizer = HFGPT2Tokenizer(args.vocab_file)
elif args.tokenizer_type.lower() == "CharLevelTokenizer".lower():
tokenizer = CharLevelTokenizer(vocab_size=512)
elif args.tokenizer_type.lower() == "TiktokenTokenizer".lower():
assert args.vocab_file is not None
tokenizer = TiktokenTokenizer(args.vocab_file)
elif args.tokenizer_type.lower() == "GLMTokenizer".lower():
if is_old_version(args.pretrained_model_path):
print('is an old version')
args.glm_mask = '[sMASK]'
old_version_tokenizer = True
tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path, trust_remote_code=True)
else:
print('is not an old version')
old_version_tokenizer = False
tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path, trust_remote_code=True)
else:
raise NotImplementedError(
"{} tokenizer is not " "implemented.".format(args.tokenizer_type)
)
# Add vocab size.
args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size, args)
return tokenizer
def _vocab_size_with_padding(orig_vocab_size, args):
"""Pad vocab size so it is divisible by model parallel size and
still having GPU friendly size."""
after = orig_vocab_size
multiple = args.make_vocab_size_divisible_by * args.model_parallel_size
while (after % multiple) != 0:
after += 1
print_rank_0(
" > padded vocab (size: {}) with {} dummy tokens "
"(new size: {})".format(orig_vocab_size, after - orig_vocab_size, after)
)
# if args.rank == 0:
# print(
# " > padded vocab (size: {}) with {} dummy tokens "
# "(new size: {})".format(orig_vocab_size, after - orig_vocab_size, after),
# flush=True,
# )
return after
class AbstractTokenizer(ABC):
"""Abstract class for tokenizer."""
def __init__(self, name):
self.name = name
super().__init__()
@property
@abstractmethod
def vocab_size(self):
pass
@property
@abstractmethod
def vocab(self):
"""Dictionary from vocab text token to id token."""
pass
@property
@abstractmethod
def inv_vocab(self):
"""Dictionary from vocab id token to text token."""
pass
@abstractmethod
def tokenize(self, text):
pass
def detokenize(self, token_ids):
raise NotImplementedError(
"detokenizer is not implemented for {} " "tokenizer".format(self.name)
)
@property
def cls(self):
raise NotImplementedError(
"CLS is not provided for {} " "tokenizer".format(self.name)
)
@property
def sep(self):
raise NotImplementedError(
"SEP is not provided for {} " "tokenizer".format(self.name)
)
@property
def pad(self):
raise NotImplementedError(
"PAD is not provided for {} " "tokenizer".format(self.name)
)
@property
def eod(self):
raise NotImplementedError(
"EOD is not provided for {} " "tokenizer".format(self.name)
)
@property
def mask(self):
raise NotImplementedError(
"MASK is not provided for {} " "tokenizer".format(self.name)
)
class _GPT2BPETokenizer(AbstractTokenizer):
"""Original GPT2 BPE tokenizer."""
def __init__(self, vocab_file, merge_file):
name = "GPT2 BPE"
super().__init__(name)
| self.tokenizer = GPT2Tokenizer( | 0 | 2023-11-02 01:37:01+00:00 | 8k |
Hritikbansal/videocon | training/pipeline_video/mplug_owl_video/modeling_mplug_owl.py | [
{
"identifier": "MplugOwlConfig",
"path": "training/pipeline_video/mplug_owl_video/configuration_mplug_owl.py",
"snippet": "class MplugOwlConfig(PretrainedConfig):\n r\"\"\"\n [`MplugOwlConfig`] is the configuration class to store the configuration of a [`MplugOwlForConditionalGeneration`]. It is\... | import logging
import math
import math
import torch
import torch.utils.checkpoint
import einops
from typing import Any, Optional, Tuple, Union
from flash_attn.flash_attn_interface import flash_attn_unpadded_func
from dataclasses import dataclass
from typing import Any, Optional, Tuple, Union
from torch import nn
from transformers.modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPooling,
BaseModelOutputWithPastAndCrossAttentions
)
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from transformers.utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from transformers.models.auto import AutoModelForCausalLM
from .configuration_mplug_owl import MplugOwlConfig, MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig
from transformers import GenerationConfig | 6,114 | context_layer = flash_attn_func(
query_states,
key_states,
value_states,
cu_seqlens,
cu_seqlens,
seq_len,
seq_len,
self.dropout if self.training else 0.0,
softmax_scale=self.scale,
causal=False,
return_attn_probs=False,
)
# [b*sq, np, hn] => [b, sq, np, hn]
context_layer = context_layer.view(bsz, seq_len, context_layer.size(1), context_layer.size(2))
else:
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
attention_scores = attention_scores * self.scale
# Normalize the attention scores to probabilities.
attention_probs = torch.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3)
new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size,)
context_layer = context_layer.reshape(new_context_layer_shape)
output = self.dense(context_layer)
outputs = (output, attention_probs) if output_attentions else (output, None)
return outputs
class MplugOwlMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = QuickGELU()
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
class MplugOwlVisionEncoderLayer(nn.Module):
def __init__(self, config: MplugOwlVisionConfig):
super().__init__()
self.hidden_size = config.hidden_size
self.temporal = MplugOwlVisionLocalTemporal(config)
self.self_attn = MplugOwlVisionAttention(config)
self.input_layernorm = LayerNormFp32(self.hidden_size, eps=config.layer_norm_eps)
self.mlp = MplugOwlMLP(config)
self.post_attention_layernorm = LayerNormFp32(self.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, time, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
B, T = hidden_states.size(0), hidden_states.size(1)
if T > 1:
hidden_states = hidden_states + self.temporal(hidden_states)
hidden_states = einops.rearrange(hidden_states, 'b t n d -> (b t) n d')
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
head_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = hidden_states + residual
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = hidden_states + residual
hidden_states = einops.rearrange(hidden_states, '(b t) n d -> b t n d', b=B)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class MplugOwlPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
| # coding=utf-8
# Copyright 2022 x-plug The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch MplugOwl model. """
try:
flash_attn_func = flash_attn_unpadded_func
except:
flash_attn_func = None
print("install flash-attn first.")
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "MAGAer13/mplug-owl-llama-7b"
_CONFIG_FOR_DOC = "MplugOwlConfig"
MPLUG_OWL_PRETRAINED_MODEL_ARCHIVE_LIST = [
"MAGAer13/mplug-owl-llama-7b",
# See all MplugOwl models at https://huggingface.co/models?filter=mplug_owl
]
@dataclass
class MplugOwlForConditionalGenerationModelOutput(ModelOutput):
"""
Class defining the outputs of [`MPlugOwlForConditionalGeneration`].
Args:
loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Language modeling loss from the language model.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head of the language model.
vision_outputs (`BaseModelOutputWithPooling`):
Outputs of the vision encoder.
language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):
Outputs of the language model.
"""
loss: Optional[Tuple[torch.FloatTensor]] = None
logits: Optional[Tuple[torch.FloatTensor]] = None
vision_outputs: Optional[torch.FloatTensor] = None
language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None
def to_tuple(self) -> Tuple[Any]:
return tuple(
self[k] if k not in ["vision_outputs", "language_model_outputs"] else getattr(self, k).to_tuple()
for k in self.keys()
)
def get_ltor_masks_and_position_ids_from_embeddings(data):
"""Build masks and position id for left to right model."""
# Extract batch size and sequence length.
micro_batch_size, seq_length = data.size()[:2]
# Attention mask (lower triangular).
att_mask_batch = 1
attention_mask = torch.tril(torch.ones((att_mask_batch, seq_length, seq_length), device=data.device)).view(
att_mask_batch, 1, seq_length, seq_length
)
# Loss mask.
loss_mask = torch.ones(data.size()[:2], dtype=torch.float, device=data.device)
# Position ids.
position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)
position_ids = position_ids.unsqueeze(0).expand_as(data[..., 0])
# Convert attention mask to binary:
attention_mask = attention_mask < 0.5
return attention_mask, loss_mask, position_ids
class MplugOwlVisionEmbeddings(nn.Module):
def __init__(self, config: MplugOwlVisionConfig):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.cls_token = nn.Parameter(torch.randn(1, 1, self.hidden_size))
self.patch_embed = nn.Conv2d(
in_channels=3,
out_channels=self.hidden_size,
kernel_size=self.patch_size,
stride=self.patch_size,
bias=False,
)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.position_embedding = nn.Parameter(torch.randn(1, self.num_patches + 1, self.hidden_size))
self.pre_layernorm = LayerNormFp32(self.hidden_size, eps=config.layer_norm_eps)
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
# [B, C, T, H, W] or [B, C, H, W]
batch_size = pixel_values.size(0)
T = pixel_values.size(2) if pixel_values.dim() > 4 else 1
if T > 1:
pixel_values = einops.rearrange(pixel_values, 'b c t h w -> (b t) c h w')
image_embeds = self.patch_embed(pixel_values)
image_embeds = image_embeds.flatten(2).transpose(1, 2)
class_embeds = self.cls_token.expand(batch_size * T, 1, -1).to(image_embeds.dtype)
embeddings = torch.cat([class_embeds, image_embeds], dim=1)
embeddings = embeddings + self.position_embedding[:, : embeddings.size(1)].to(image_embeds.dtype)
embeddings = self.pre_layernorm(embeddings)
embeddings = einops.rearrange(embeddings, '(b t) n d -> b t n d', b=batch_size)
return embeddings
class LayerNormFp32(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16 (by casting to float32 and back)."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, x: torch.Tensor):
output = torch.nn.functional.layer_norm(
x.float(),
self.normalized_shape,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(x)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class MplugOwlVisionLocalTemporal(nn.Module):
def __init__(self, config):
super(MplugOwlVisionLocalTemporal, self).__init__()
self.image_size = config.image_size
self.patch_size = config.patch_size
self.num_patches = 1 + (self.image_size // self.patch_size) ** 2
self.hidden_size = config.hidden_size
d_bottleneck = self.hidden_size // 2
self.ln = LayerNormFp32(self.hidden_size)
self.down_proj = nn.Conv3d(self.hidden_size, d_bottleneck, kernel_size=1, stride=1, padding=0)
self.conv = nn.Conv3d(d_bottleneck, d_bottleneck, kernel_size=(3, 1, 1), stride=1, padding=(1, 0, 0), groups=d_bottleneck)
self.up_proj = nn.Conv3d(d_bottleneck, self.hidden_size, kernel_size=1, stride=1, padding=0)
nn.init.constant_(self.up_proj.weight, 0)
nn.init.constant_(self.up_proj.bias, 0)
self.activation_func = QuickGELU()
def forward(self, x):
# [b, t, s, c]
T = x.size(1)
H = int((self.num_patches - 1)**0.5)
cls_token, x = x[:, :, 0:1], x[:, :, 1:]
x = self.ln(x)
x = einops.rearrange(x, 'b t (h w) c -> b c t h w', h=H)
x = self.down_proj(x)
_device = x.device
self = self.to('cpu') # hack: cpu offloading since bfloat16 on gpu gives error with conv_depthwise3d
x = x.to('cpu')
x = self.conv(x)
self = self.to(_device)
x = x.to(_device)
x = self.activation_func(x)
x = self.up_proj(x)
x = einops.rearrange(x, 'b c t h w -> b t (h w) c')
x = torch.cat([cls_token, x], dim = 2)
return x
class MplugOwlVisionAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
if self.head_dim * self.num_heads != self.hidden_size:
raise ValueError(
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = nn.Dropout(config.attention_dropout)
self.query_key_value = nn.Linear(self.hidden_size, 3 * self.hidden_size)
self.dense = nn.Linear(self.hidden_size, self.hidden_size)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, seq_len, embed_dim = hidden_states.size()
mixed_qkv = self.query_key_value(hidden_states)
mixed_qkv = mixed_qkv.reshape(bsz, seq_len, self.num_heads, 3, embed_dim // self.num_heads).permute(
3, 0, 2, 1, 4
) # [3, b, np, sq, hn]
query_states, key_states, value_states = (
mixed_qkv[0],
mixed_qkv[1],
mixed_qkv[2],
)
# if self.config.use_flash_attn and flash_attn_func is not None:
if False:
# [b*sq, np, hn]
query_states = query_states.permute(0, 2, 1, 3).contiguous()
query_states = query_states.view(query_states.size(0) * query_states.size(1), query_states.size(2), -1)
key_states = key_states.permute(0, 2, 1, 3).contiguous()
key_states = key_states.view(key_states.size(0) * key_states.size(1), key_states.size(2), -1)
value_states = value_states.permute(0, 2, 1, 3).contiguous()
value_states = value_states.view(value_states.size(0) * value_states.size(1), value_states.size(2), -1)
cu_seqlens = torch.arange(
0, (bsz + 1) * seq_len, step=seq_len, dtype=torch.int32, device=query_states.device
)
context_layer = flash_attn_func(
query_states,
key_states,
value_states,
cu_seqlens,
cu_seqlens,
seq_len,
seq_len,
self.dropout if self.training else 0.0,
softmax_scale=self.scale,
causal=False,
return_attn_probs=False,
)
# [b*sq, np, hn] => [b, sq, np, hn]
context_layer = context_layer.view(bsz, seq_len, context_layer.size(1), context_layer.size(2))
else:
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
attention_scores = attention_scores * self.scale
# Normalize the attention scores to probabilities.
attention_probs = torch.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3)
new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size,)
context_layer = context_layer.reshape(new_context_layer_shape)
output = self.dense(context_layer)
outputs = (output, attention_probs) if output_attentions else (output, None)
return outputs
class MplugOwlMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = QuickGELU()
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
class MplugOwlVisionEncoderLayer(nn.Module):
def __init__(self, config: MplugOwlVisionConfig):
super().__init__()
self.hidden_size = config.hidden_size
self.temporal = MplugOwlVisionLocalTemporal(config)
self.self_attn = MplugOwlVisionAttention(config)
self.input_layernorm = LayerNormFp32(self.hidden_size, eps=config.layer_norm_eps)
self.mlp = MplugOwlMLP(config)
self.post_attention_layernorm = LayerNormFp32(self.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, time, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
B, T = hidden_states.size(0), hidden_states.size(1)
if T > 1:
hidden_states = hidden_states + self.temporal(hidden_states)
hidden_states = einops.rearrange(hidden_states, 'b t n d -> (b t) n d')
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
head_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = hidden_states + residual
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = hidden_states + residual
hidden_states = einops.rearrange(hidden_states, '(b t) n d -> b t n d', b=B)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class MplugOwlPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
| config_class = MplugOwlConfig | 0 | 2023-11-07 06:28:09+00:00 | 8k |
XinyuanWangCS/PromptAgent | src/tasks/ncbi.py | [
{
"identifier": "BaseDataset",
"path": "src/tasks/base_task.py",
"snippet": "class BaseDataset(Dataset):\n def __init__(self, dataset):\n self.dataset = dataset\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, index):\n return self.dataset[index]"... | import re
import re
import string
import numpy as np
import random
import random
from .base_task import BaseDataset, BaseTask
from collections import defaultdict
from datasets import load_dataset | 4,443 | if verbose, print overall performance, as well as preformance per chunk type;
otherwise, simply return overall prec, rec, f1 scores
"""
# sum counts
sum_correct_chunks = sum(correct_chunks.values())
sum_true_chunks = sum(true_chunks.values())
sum_pred_chunks = sum(pred_chunks.values())
sum_correct_counts = sum(correct_counts.values())
sum_true_counts = sum(true_counts.values())
nonO_correct_counts = sum(v for k, v in correct_counts.items() if k != 'O')
nonO_true_counts = sum(v for k, v in true_counts.items() if k != 'O')
chunk_types = sorted(list(set(list(true_chunks) + list(pred_chunks))))
# compute overall precision, recall and FB1 (default values are 0.0)
prec, rec, f1 = calc_metrics(sum_correct_chunks, sum_pred_chunks, sum_true_chunks)
res = (prec, rec, f1)
if not verbose:
return res
# print overall performance, and performance per chunk type
print("processed %i tokens with %i phrases; " % (sum_true_counts, sum_true_chunks), end='')
print("found: %i phrases; correct: %i.\n" % (sum_pred_chunks, sum_correct_chunks), end='')
if nonO_correct_counts > 0:
print("accuracy: %6.2f%%; (non-O)" % (100 * nonO_correct_counts / nonO_true_counts))
print("accuracy: %6.2f%%; " % (100 * sum_correct_counts / sum_true_counts), end='')
print("precision: %6.2f%%; recall: %6.2f%%; FB1: %6.2f%%" % (prec, rec, f1))
else:
print("accuracy: %6.2f%%; (non-O)" % 0)
print("accuracy: %6.2f%%; " % 0, end='')
print("precision: %6.2f%%; recall: %6.2f%%; FB1: %6.2f%%" % (prec, rec, f1))
# for each chunk type, compute precision, recall and FB1 (default values are 0.0)
for t in chunk_types:
prec, rec, f1 = calc_metrics(correct_chunks[t], pred_chunks[t], true_chunks[t])
print("%17s: " % t, end='')
print("precision: %6.2f%%; recall: %6.2f%%; FB1: %6.2f%%" %
(prec, rec, f1), end='')
print(" %d" % pred_chunks[t])
return res
# you can generate LaTeX output for tables like in
# http://cnts.uia.ac.be/conll2003/ner/example.tex
# but I'm not implementing this
def evaluate(true_seqs, pred_seqs, verbose=True):
(correct_chunks, true_chunks, pred_chunks,
correct_counts, true_counts, pred_counts) = count_chunks(true_seqs, pred_seqs)
result = get_result(correct_chunks, true_chunks, pred_chunks, correct_counts, true_counts, pred_counts, verbose)
return result
def evaluate_conll_file(fileIterator):
true_seqs, pred_seqs = [], []
for line in fileIterator:
cols = line.strip().split()
# each non-empty line must contain >= 3 columns
if not cols:
true_seqs.append('O')
pred_seqs.append('O')
elif len(cols) < 3:
raise IOError("conlleval: too few columns in line %s\n" % line)
else:
# extract tags from last 2 columns
true_seqs.append(cols[-2])
pred_seqs.append(cols[-1])
return evaluate(true_seqs, pred_seqs)
class CustomDataLoader:
def __init__(self, dataset, batch_size, shuffle=False):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
def __iter__(self):
indices = list(range(len(self.dataset)))
if self.shuffle:
random.shuffle(indices)
for i in range(0, len(indices), self.batch_size):
batch_indices = indices[i:i+self.batch_size]
batch_data = [self.dataset[idx] for idx in batch_indices]
yield self._collate_fn(batch_data)
def _collate_fn(self, batch_data):
# This function will transform a batch of data into the desired format.
question, answers = zip(*[(item['question'], item['answer']) for item in batch_data]) # Changed to tags
return {'question': question, 'answer': answers, }
def __len__(self):
return (len(self.dataset) + self.batch_size - 1) // self.batch_size
# def split_hf_dataset(self, hf_dataset, train_frac, val_frac):
# total_samples = len(hf_dataset)
# train_end = int(total_samples * train_frac)
# val_end = train_end + int(total_samples * val_frac)
# train_set = hf_dataset[:train_end]
# val_set = hf_dataset[train_end:val_end]
# return train_set, val_set
# def set_datasets(self, hf_datasets, train_frac=0.8, val_frac=0.1):
# # split the huggingface train set into train and validation
# train_set, val_set = self.split_hf_dataset(hf_datasets['train'], train_frac, val_frac)
# self.dataset = {
# 'train': train_set,
# 'val': val_set,
# 'test': hf_datasets['test'],
# 'eval': hf_datasets['eval']
# }
| # define task prompts for various datasets
def split_tag(chunk_tag):
"""
split chunk tag into IOBES prefix and chunk_type
e.g.
B-PER -> (B, PER)
O -> (O, None)
"""
if chunk_tag == 'O':
return ('O', None)
return chunk_tag.split('-', maxsplit=1)
def is_chunk_end(prev_tag, tag):
"""
check if the previous chunk ended between the previous and current word
e.g.
(B-PER, I-PER) -> False
(B-LOC, O) -> True
Note: in case of contradicting tags, e.g. (B-PER, I-LOC)
this is considered as (B-PER, B-LOC)
"""
prefix1, chunk_type1 = split_tag(prev_tag)
prefix2, chunk_type2 = split_tag(tag)
if prefix1 == 'O':
return False
if prefix2 == 'O':
return prefix1 != 'O'
if chunk_type1 != chunk_type2:
return True
return prefix2 in ['B', 'S'] or prefix1 in ['E', 'S']
def is_chunk_start(prev_tag, tag):
"""
check if a new chunk started between the previous and current word
"""
prefix1, chunk_type1 = split_tag(prev_tag)
prefix2, chunk_type2 = split_tag(tag)
if prefix2 == 'O':
return False
if prefix1 == 'O':
return prefix2 != 'O'
if chunk_type1 != chunk_type2:
return True
return prefix2 in ['B', 'S'] or prefix1 in ['E', 'S']
def calc_metrics(tp, p, t, percent=False):
"""
compute overall precision, recall and FB1 (default values are 0.0)
if percent is True, return 100 * original decimal value
"""
precision = tp / p if p else 0
recall = tp / t if t else 0
fb1 = 2 * precision * recall / (precision + recall) if precision + recall else 0
if percent:
return 100 * precision, 100 * recall, 100 * fb1
else:
return precision, recall, fb1
def count_chunks(true_seqs, pred_seqs):
"""
true_seqs: a list of true tags
pred_seqs: a list of predicted tags
return:
correct_chunks: a dict (counter),
key = chunk types,
value = number of correctly identified chunks per type
true_chunks: a dict, number of true chunks per type
pred_chunks: a dict, number of identified chunks per type
correct_counts, true_counts, pred_counts: similar to above, but for tags
"""
correct_chunks = defaultdict(int)
true_chunks = defaultdict(int)
pred_chunks = defaultdict(int)
correct_counts = defaultdict(int)
true_counts = defaultdict(int)
pred_counts = defaultdict(int)
prev_true_tag, prev_pred_tag = 'O', 'O'
correct_chunk = None
for true_tag, pred_tag in zip(true_seqs, pred_seqs):
if true_tag == pred_tag:
correct_counts[true_tag] += 1
true_counts[true_tag] += 1
pred_counts[pred_tag] += 1
_, true_type = split_tag(true_tag)
_, pred_type = split_tag(pred_tag)
if correct_chunk is not None:
true_end = is_chunk_end(prev_true_tag, true_tag)
pred_end = is_chunk_end(prev_pred_tag, pred_tag)
if pred_end and true_end:
correct_chunks[correct_chunk] += 1
correct_chunk = None
elif pred_end != true_end or true_type != pred_type:
correct_chunk = None
true_start = is_chunk_start(prev_true_tag, true_tag)
pred_start = is_chunk_start(prev_pred_tag, pred_tag)
if true_start and pred_start and true_type == pred_type:
correct_chunk = true_type
if true_start:
true_chunks[true_type] += 1
if pred_start:
pred_chunks[pred_type] += 1
prev_true_tag, prev_pred_tag = true_tag, pred_tag
if correct_chunk is not None:
correct_chunks[correct_chunk] += 1
return (correct_chunks, true_chunks, pred_chunks,
correct_counts, true_counts, pred_counts)
def get_result(correct_chunks, true_chunks, pred_chunks,
correct_counts, true_counts, pred_counts, verbose=True):
"""get_result
if verbose, print overall performance, as well as preformance per chunk type;
otherwise, simply return overall prec, rec, f1 scores
"""
# sum counts
sum_correct_chunks = sum(correct_chunks.values())
sum_true_chunks = sum(true_chunks.values())
sum_pred_chunks = sum(pred_chunks.values())
sum_correct_counts = sum(correct_counts.values())
sum_true_counts = sum(true_counts.values())
nonO_correct_counts = sum(v for k, v in correct_counts.items() if k != 'O')
nonO_true_counts = sum(v for k, v in true_counts.items() if k != 'O')
chunk_types = sorted(list(set(list(true_chunks) + list(pred_chunks))))
# compute overall precision, recall and FB1 (default values are 0.0)
prec, rec, f1 = calc_metrics(sum_correct_chunks, sum_pred_chunks, sum_true_chunks)
res = (prec, rec, f1)
if not verbose:
return res
# print overall performance, and performance per chunk type
print("processed %i tokens with %i phrases; " % (sum_true_counts, sum_true_chunks), end='')
print("found: %i phrases; correct: %i.\n" % (sum_pred_chunks, sum_correct_chunks), end='')
if nonO_correct_counts > 0:
print("accuracy: %6.2f%%; (non-O)" % (100 * nonO_correct_counts / nonO_true_counts))
print("accuracy: %6.2f%%; " % (100 * sum_correct_counts / sum_true_counts), end='')
print("precision: %6.2f%%; recall: %6.2f%%; FB1: %6.2f%%" % (prec, rec, f1))
else:
print("accuracy: %6.2f%%; (non-O)" % 0)
print("accuracy: %6.2f%%; " % 0, end='')
print("precision: %6.2f%%; recall: %6.2f%%; FB1: %6.2f%%" % (prec, rec, f1))
# for each chunk type, compute precision, recall and FB1 (default values are 0.0)
for t in chunk_types:
prec, rec, f1 = calc_metrics(correct_chunks[t], pred_chunks[t], true_chunks[t])
print("%17s: " % t, end='')
print("precision: %6.2f%%; recall: %6.2f%%; FB1: %6.2f%%" %
(prec, rec, f1), end='')
print(" %d" % pred_chunks[t])
return res
# you can generate LaTeX output for tables like in
# http://cnts.uia.ac.be/conll2003/ner/example.tex
# but I'm not implementing this
def evaluate(true_seqs, pred_seqs, verbose=True):
(correct_chunks, true_chunks, pred_chunks,
correct_counts, true_counts, pred_counts) = count_chunks(true_seqs, pred_seqs)
result = get_result(correct_chunks, true_chunks, pred_chunks, correct_counts, true_counts, pred_counts, verbose)
return result
def evaluate_conll_file(fileIterator):
true_seqs, pred_seqs = [], []
for line in fileIterator:
cols = line.strip().split()
# each non-empty line must contain >= 3 columns
if not cols:
true_seqs.append('O')
pred_seqs.append('O')
elif len(cols) < 3:
raise IOError("conlleval: too few columns in line %s\n" % line)
else:
# extract tags from last 2 columns
true_seqs.append(cols[-2])
pred_seqs.append(cols[-1])
return evaluate(true_seqs, pred_seqs)
class CustomDataLoader:
def __init__(self, dataset, batch_size, shuffle=False):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
def __iter__(self):
indices = list(range(len(self.dataset)))
if self.shuffle:
random.shuffle(indices)
for i in range(0, len(indices), self.batch_size):
batch_indices = indices[i:i+self.batch_size]
batch_data = [self.dataset[idx] for idx in batch_indices]
yield self._collate_fn(batch_data)
def _collate_fn(self, batch_data):
# This function will transform a batch of data into the desired format.
question, answers = zip(*[(item['question'], item['answer']) for item in batch_data]) # Changed to tags
return {'question': question, 'answer': answers, }
def __len__(self):
return (len(self.dataset) + self.batch_size - 1) // self.batch_size
# def split_hf_dataset(self, hf_dataset, train_frac, val_frac):
# total_samples = len(hf_dataset)
# train_end = int(total_samples * train_frac)
# val_end = train_end + int(total_samples * val_frac)
# train_set = hf_dataset[:train_end]
# val_set = hf_dataset[train_end:val_end]
# return train_set, val_set
# def set_datasets(self, hf_datasets, train_frac=0.8, val_frac=0.1):
# # split the huggingface train set into train and validation
# train_set, val_set = self.split_hf_dataset(hf_datasets['train'], train_frac, val_frac)
# self.dataset = {
# 'train': train_set,
# 'val': val_set,
# 'test': hf_datasets['test'],
# 'eval': hf_datasets['eval']
# }
| class NCBIDataset(BaseDataset): | 0 | 2023-11-03 19:14:00+00:00 | 8k |
bytedance/cryostar | projects/star/miscs.py | [
{
"identifier": "ca_ca",
"path": "cryostar/common/residue_constants.py",
"snippet": "def load_stereo_chemical_props(\n) -> Tuple[Mapping[str, List[Bond]], Mapping[str, List[Bond]], Mapping[str, List[BondAngle]]]:\n def make_bond_key(atom1_name, atom2_name):\ndef sequence_to_onehot(sequence: str, mapp... | from functools import lru_cache
from pathlib import Path
from torch import linalg as LA
from torch import nn
from cryostar.common.residue_constants import ca_ca
from cryostar.utils.misc import log_to_current
from cryostar.utils.ml_modules import VAEEncoder, Decoder, reparameterize
from cryostar.utils.ctf import parse_ctf_star
from lightning.pytorch.utilities import rank_zero_only
from typing import Union
import einops
import numpy as np
import cupy as cp # type: ignore
import torch
import torch.nn.functional as F | 5,056 |
chain_pairs = [[] for _ in range(len(chain2idx))]
pair_index_np = pair_index.cpu().numpy()
pair_chain_id = chain_id[pair_index_np]
for pair_idx, pair in enumerate(pair_chain_id):
if pair[0] == pair[1]:
chain_pairs[chain2idx[pair[0]]].append(pair_idx)
chain_pairs = [torch.tensor(ele, device=device) for ele in chain_pairs if len(ele) > 10]
return chain_pairs
def calc_pair_dist_loss(pred_struc, pair_index, target_dist, type="vanilla", chain_id=None):
bsz = pred_struc.shape[0]
pred_dist = pred_struc[:, pair_index] # bsz, num_pair, 2, 3
pred_dist = LA.vector_norm(torch.diff(pred_dist, dim=-2), axis=-1).squeeze(-1) # bsz, num_pair
if type == "vanilla":
return F.mse_loss(pred_dist, target_dist.repeat(bsz, 1))
elif "all-var-relax" in type:
# optional value:
# all-var-relax@p0.99 keep bonds whose variance is the smallest 99%
# all-var-relax@q1.0 keep bonds whose variance >= 1.0
if "@" in type:
arg = type.split("@")[1]
assert arg[0] in ["p", "q"]
use_percentile = arg[0] == "p"
loss_filter = float(arg[1:])
else:
use_percentile = True
loss_filter = 0.99
loss = F.mse_loss(pred_dist, target_dist.repeat(bsz, 1), reduction="none")
loss_var = loss.var(0, keepdim=False).detach()
# if "var-relax-ema" in type:
# other.running_variance = 0.9 * other.running_variance + 0.1 * loss_var
# loss_var = other.running_variance
if np.random.rand() < 0.001:
log_to_current("variance statistics:")
q = [0.0, 0.9, 0.95, 0.97, 0.99, 0.999]
v = torch.quantile(loss_var, torch.tensor(q, device=loss.device)).tolist()
log_to_current("|".join([f" {q[i] * 100}%: {v[i]:.3f} " for i in range(len(q))]))
p = [0.25, 1.0, 4.0, 16.0]
v = [(loss_var > p[i]).sum() / len(loss_var) for i in range(len(p))]
log_to_current("|".join([f" {p[i]}: {v[i] * 100:.1f}% " for i in range(len(p))]))
if use_percentile:
loss_ind = loss_var.sort(descending=False).indices
loss = loss.index_select(1, loss_ind[:int(len(loss_var) * loss_filter)])
else:
loss_mask = loss_var < loss_filter
loss = loss[loss_mask[None, :].repeat(bsz, 1)]
avg_loss = loss.mean()
return avg_loss
elif "chain-var-relax" in type:
if "@" in type:
arg = type.split("@")[1]
loss_filter = float(arg[1:])
else:
loss_filter = 0.95
loss = F.mse_loss(pred_dist, target_dist.repeat(bsz, 1), reduction="none")
chain_pairs = prepare_dynamic_intra_chain_loss(tuple(chain_id), pair_index)
chain_losses = []
for i in range(len(chain_pairs)):
chain_loss = loss.index_select(1, chain_pairs[i])
chain_loss_var = chain_loss.var(0, keepdim=False).detach()
chain_loss_ind = chain_loss_var.sort(descending=False).indices
chain_loss = chain_loss.index_select(1, chain_loss_ind[:int(len(chain_loss_var) * loss_filter)])
chain_losses.append(chain_loss)
loss = torch.cat(chain_losses, 1)
avg_loss = loss.mean()
return avg_loss
elif type == "inverse":
target_dist = target_dist.repeat(bsz, 1)
loss = F.mse_loss(pred_dist, target_dist, reduction="none")
lt6_loss = (loss[target_dist <= 6]).sum()
gt6_loss = loss[target_dist > 6]
gt6_weight = 1 / (target_dist[target_dist > 6].detach() - 5)
gt6_loss = (gt6_loss * gt6_weight).sum()
total_loss = lt6_loss + gt6_loss
avg_loss = total_loss / target_dist.numel()
return avg_loss
elif "dynamic" in type:
if "@" in type:
ratio = float(type.split("@")[1])
else:
ratio = 0.85
num_nodes = pred_struc.shape[1]
num_node_nbrs, reshape_indices, reshape_valid_mask, reshape_top_p_mask = prepare_dynamic_loss(
num_nodes, pair_index, ratio)
dist_mse = (pred_dist - target_dist)**2 # bsz x num_nodes
dist_mse = dist_mse.index_select(1, reshape_indices.reshape(-1)) # bsz x (num_nodes * max_node_nbr)
dist_mse = dist_mse.reshape(bsz, num_nodes, num_node_nbrs.max())
dist_mse = dist_mse.masked_fill(~reshape_valid_mask[None, ...], 10000.)
dist_mse = dist_mse.sort(descending=False, dim=2).values # bsz x num_nodes x max_node_nbr
batch_mask = einops.repeat(reshape_top_p_mask, "num_nodes max_node_nbr -> bsz num_nodes max_node_nbr", bsz=bsz)
avg_loss = dist_mse[batch_mask].sum() / batch_mask.sum()
return avg_loss
elif type == "90p":
target_dist = target_dist.repeat(bsz, 1)
loss = F.mse_loss(pred_dist, target_dist, reduction="none")
mask = torch.le(loss, torch.quantile(loss, 0.9, dim=1, keepdim=True))
avg_loss = loss[mask].sum() / mask.float().sum()
return avg_loss
else:
raise NotImplementedError
class VAE(nn.Module):
def __init__(
self,
encoder_cls: str,
decoder_cls: str,
in_dim: int,
e_hidden_dim: Union[int, list, tuple],
latent_dim: int,
d_hidden_dim: Union[int, list, tuple],
out_dim: int,
e_hidden_layers: int,
d_hidden_layers: int,
):
super().__init__()
if encoder_cls == "MLP":
|
try:
except ImportError:
cp = np
CA_CA = round(ca_ca, 2)
log_to_current = rank_zero_only(log_to_current)
def infer_ctf_params_from_config(cfg):
star_file_path = Path(cfg.dataset_attr.starfile_path)
ctf_params = parse_ctf_star(star_file_path, side_shape=cfg.data_process.down_side_shape,
apix=cfg.data_process.down_apix)[0].tolist()
ctf_params = {
"size": cfg.data_process.down_side_shape,
"resolution": cfg.data_process.down_apix,
"kV": ctf_params[5],
"cs": ctf_params[6],
"amplitudeContrast": ctf_params[7]
}
return ctf_params
def low_pass_mask3d(shape, apix=1., bandwidth=2):
freq = np.fft.fftshift(np.fft.fftfreq(shape, apix))
freq = freq**2
freq = np.sqrt(freq[:, None, None] + freq[None, :, None] + freq[None, None])
mask = np.asarray(freq < 1 / bandwidth, dtype=np.float32)
# trick to avoid "ringing", however you should increase sigma to about 11 to completely remove artifact
# gaussian_filter(mask, 3, output=mask)
return mask
def low_pass_mask2d(shape, apix=1., bandwidth=2):
freq = np.fft.fftshift(np.fft.fftfreq(shape, apix))
freq = freq**2
freq = np.sqrt(freq[:, None] + freq[None, :])
mask = np.asarray(freq < 1 / bandwidth, dtype=np.float32)
return mask
def calc_clash_loss(pred_struc, pair_index, clash_cutoff=4.0):
pred_dist = pred_struc[:, pair_index] # bsz, num_pair, 2, 3
pred_dist = LA.vector_norm(torch.diff(pred_dist, dim=-2), axis=-1).squeeze(-1) # bsz, num_pair
possible_clash_dist = pred_dist[pred_dist < clash_cutoff]
if possible_clash_dist.numel() == 0:
avg_loss = torch.tensor(0.0).to(pred_struc)
else:
possible_clash_loss = (clash_cutoff - possible_clash_dist)**2
avg_loss = possible_clash_loss.mean()
return avg_loss
@lru_cache(maxsize=None)
def prepare_dynamic_loss(
num_nodes: int,
pair_index: torch.LongTensor, # shape: (edge, 2)
top_p_ratio: float,
):
"""
The left side of pair_index should be sorted in the ascending order!
[
[0, _], [0, _], [0, _],
[1, _], [1, _],
[2, _], [2, _], [2, _], [2, _],
...
]
"""
device = pair_index.device
num_node_nbrs = [0 for _ in range(num_nodes)]
left_nodes = pair_index[:, 0].tolist()
for ele in left_nodes:
num_node_nbrs[ele] += 1
num_node_nbrs = torch.tensor(num_node_nbrs, device=device)
reshape_indices = torch.zeros(num_nodes, max(num_node_nbrs), dtype=torch.long, device=device)
reshape_valid_mask = torch.zeros(num_nodes, max(num_node_nbrs), dtype=torch.bool, device=device)
reshape_top_p_mask = torch.zeros(num_nodes, max(num_node_nbrs), dtype=torch.bool, device=device)
start_idx = 0
for i in range(num_nodes):
reshape_indices[i, :num_node_nbrs[i]] = start_idx + torch.arange(num_node_nbrs[i], device=device)
reshape_valid_mask[i, :num_node_nbrs[i]] = True
reshape_top_p_mask[i, :int(top_p_ratio * num_node_nbrs[i])] = True
start_idx += num_node_nbrs[i]
return num_node_nbrs, reshape_indices, reshape_valid_mask, reshape_top_p_mask
@lru_cache(maxsize=None)
def prepare_dynamic_intra_chain_loss(
chain_id: tuple, # shape: (node, ), converted from np.ndarray since it may be unhashable
pair_index: torch.LongTensor, # shape: (edge, 2)
):
chain_id = np.array(chain_id)
device = pair_index.device
chain2idx = {}
idx = 0
for ele in set(chain_id):
chain2idx[ele] = idx
idx += 1
chain_pairs = [[] for _ in range(len(chain2idx))]
pair_index_np = pair_index.cpu().numpy()
pair_chain_id = chain_id[pair_index_np]
for pair_idx, pair in enumerate(pair_chain_id):
if pair[0] == pair[1]:
chain_pairs[chain2idx[pair[0]]].append(pair_idx)
chain_pairs = [torch.tensor(ele, device=device) for ele in chain_pairs if len(ele) > 10]
return chain_pairs
def calc_pair_dist_loss(pred_struc, pair_index, target_dist, type="vanilla", chain_id=None):
bsz = pred_struc.shape[0]
pred_dist = pred_struc[:, pair_index] # bsz, num_pair, 2, 3
pred_dist = LA.vector_norm(torch.diff(pred_dist, dim=-2), axis=-1).squeeze(-1) # bsz, num_pair
if type == "vanilla":
return F.mse_loss(pred_dist, target_dist.repeat(bsz, 1))
elif "all-var-relax" in type:
# optional value:
# all-var-relax@p0.99 keep bonds whose variance is the smallest 99%
# all-var-relax@q1.0 keep bonds whose variance >= 1.0
if "@" in type:
arg = type.split("@")[1]
assert arg[0] in ["p", "q"]
use_percentile = arg[0] == "p"
loss_filter = float(arg[1:])
else:
use_percentile = True
loss_filter = 0.99
loss = F.mse_loss(pred_dist, target_dist.repeat(bsz, 1), reduction="none")
loss_var = loss.var(0, keepdim=False).detach()
# if "var-relax-ema" in type:
# other.running_variance = 0.9 * other.running_variance + 0.1 * loss_var
# loss_var = other.running_variance
if np.random.rand() < 0.001:
log_to_current("variance statistics:")
q = [0.0, 0.9, 0.95, 0.97, 0.99, 0.999]
v = torch.quantile(loss_var, torch.tensor(q, device=loss.device)).tolist()
log_to_current("|".join([f" {q[i] * 100}%: {v[i]:.3f} " for i in range(len(q))]))
p = [0.25, 1.0, 4.0, 16.0]
v = [(loss_var > p[i]).sum() / len(loss_var) for i in range(len(p))]
log_to_current("|".join([f" {p[i]}: {v[i] * 100:.1f}% " for i in range(len(p))]))
if use_percentile:
loss_ind = loss_var.sort(descending=False).indices
loss = loss.index_select(1, loss_ind[:int(len(loss_var) * loss_filter)])
else:
loss_mask = loss_var < loss_filter
loss = loss[loss_mask[None, :].repeat(bsz, 1)]
avg_loss = loss.mean()
return avg_loss
elif "chain-var-relax" in type:
if "@" in type:
arg = type.split("@")[1]
loss_filter = float(arg[1:])
else:
loss_filter = 0.95
loss = F.mse_loss(pred_dist, target_dist.repeat(bsz, 1), reduction="none")
chain_pairs = prepare_dynamic_intra_chain_loss(tuple(chain_id), pair_index)
chain_losses = []
for i in range(len(chain_pairs)):
chain_loss = loss.index_select(1, chain_pairs[i])
chain_loss_var = chain_loss.var(0, keepdim=False).detach()
chain_loss_ind = chain_loss_var.sort(descending=False).indices
chain_loss = chain_loss.index_select(1, chain_loss_ind[:int(len(chain_loss_var) * loss_filter)])
chain_losses.append(chain_loss)
loss = torch.cat(chain_losses, 1)
avg_loss = loss.mean()
return avg_loss
elif type == "inverse":
target_dist = target_dist.repeat(bsz, 1)
loss = F.mse_loss(pred_dist, target_dist, reduction="none")
lt6_loss = (loss[target_dist <= 6]).sum()
gt6_loss = loss[target_dist > 6]
gt6_weight = 1 / (target_dist[target_dist > 6].detach() - 5)
gt6_loss = (gt6_loss * gt6_weight).sum()
total_loss = lt6_loss + gt6_loss
avg_loss = total_loss / target_dist.numel()
return avg_loss
elif "dynamic" in type:
if "@" in type:
ratio = float(type.split("@")[1])
else:
ratio = 0.85
num_nodes = pred_struc.shape[1]
num_node_nbrs, reshape_indices, reshape_valid_mask, reshape_top_p_mask = prepare_dynamic_loss(
num_nodes, pair_index, ratio)
dist_mse = (pred_dist - target_dist)**2 # bsz x num_nodes
dist_mse = dist_mse.index_select(1, reshape_indices.reshape(-1)) # bsz x (num_nodes * max_node_nbr)
dist_mse = dist_mse.reshape(bsz, num_nodes, num_node_nbrs.max())
dist_mse = dist_mse.masked_fill(~reshape_valid_mask[None, ...], 10000.)
dist_mse = dist_mse.sort(descending=False, dim=2).values # bsz x num_nodes x max_node_nbr
batch_mask = einops.repeat(reshape_top_p_mask, "num_nodes max_node_nbr -> bsz num_nodes max_node_nbr", bsz=bsz)
avg_loss = dist_mse[batch_mask].sum() / batch_mask.sum()
return avg_loss
elif type == "90p":
target_dist = target_dist.repeat(bsz, 1)
loss = F.mse_loss(pred_dist, target_dist, reduction="none")
mask = torch.le(loss, torch.quantile(loss, 0.9, dim=1, keepdim=True))
avg_loss = loss[mask].sum() / mask.float().sum()
return avg_loss
else:
raise NotImplementedError
class VAE(nn.Module):
def __init__(
self,
encoder_cls: str,
decoder_cls: str,
in_dim: int,
e_hidden_dim: Union[int, list, tuple],
latent_dim: int,
d_hidden_dim: Union[int, list, tuple],
out_dim: int,
e_hidden_layers: int,
d_hidden_layers: int,
):
super().__init__()
if encoder_cls == "MLP": | self.encoder = VAEEncoder(in_dim, e_hidden_dim, latent_dim, e_hidden_layers) | 2 | 2023-11-06 07:15:26+00:00 | 8k |
xyongLu/SBCFormer | main.py | [
{
"identifier": "Mixup",
"path": "mixup.py",
"snippet": "class Mixup:\n \"\"\" Mixup/Cutmix that applies different params to each element or whole batch\n\n Args:\n mixup_alpha (float): mixup alpha value, mixup is active if > 0.\n cutmix_alpha (float): cutmix alpha value, cutmix is a... | import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import utils
from pathlib import Path
from mixup import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.scheduler import create_scheduler
from timm.optim import create_optimizer
from timm.utils import NativeScaler, get_state_dict, ModelEma
from datasets import build_dataset
from engine import train_one_epoch, evaluate
from losses import DistillationLoss
from samplers import RASampler
from models import * | 7,144 | parser.add_argument('--patience-epochs', type=int, default=10, metavar='N', help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=False)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Distillation parameters distilled
parser.add_argument('--distilled', action='store_true', default=False, help='Perform distilled ')
parser.add_argument('--teacher-model', default='regnety_200mf', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
# Dataset parameters
parser.add_argument('--data-path', default= '../../PythonWork_E/Data/ImageNet_2012',#'./data', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR10', 'CIFAR100' , 'IMNET'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='./outputs', help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default= '', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', default=False, help='Perform evaluation only')
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
# test throught
parser.add_argument('--throughout', action='store_true', help='Perform throughout only')
return parser
@torch.no_grad()
def throughput(data_loader, model, logger):
model.eval()
for _, (images, _) in enumerate(data_loader):
images = images.cuda(non_blocking=True)
batch_size = images.shape[0]
for i in range(50):
model(images)
torch.cuda.synchronize()
logger.info(f"throughput averaged with 30 times")
tic1 = time.time()
for i in range(30):
model(images)
torch.cuda.synchronize()
tic2 = time.time()
logger.info(f"batch_size {batch_size} throughput {30 * batch_size / (tic2 - tic1)}")
return
def main(args):
utils.init_distributed_mode(args)
print('------------ Options -------------')
for key, value in sorted(vars(args).items()):
print('%16.16s: %16.16s' % (str(key), str(value)))
print('-------------- End ----------------')
if args.distillation_type != 'none' and args.finetune and not args.eval:
raise NotImplementedError("Finetuning with distillation not yet supported")
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
| # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
# from ptflops import get_model_complexity_info
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("running on {} device.".format(device))
def get_args_parser():
parser = argparse.ArgumentParser('SlenderViT training and evaluation script', add_help=False)
# Model parameters
parser.add_argument('--uni-note', default='', type=str, help='unique note on the name of model to train')
parser.add_argument('--model', default='SBCFormer_B', type=str, metavar='MODEL',
help='Name of model to train.')
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--in-chans', type=int, default=3, help='the channel of inputs ')
parser.add_argument('--batch-size', default=30, type=int)
parser.add_argument('--drop', type=float, default=0., metavar='PCT', help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)')
parser.add_argument('--model-ema', action='store_true')
parser.add_argument('--no-model-ema', action='store_false', dest='model_ema')
parser.set_defaults(model_ema=False)
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON', help='Optimizer Epsilon (defaudevice = torch.device(args.device)ult: None, no clipping)')
parser.add_argument('--clip-grad', type=float, default=5, metavar='NORM', help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05, help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER', help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=2.5e-4, metavar='LR', help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR', help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N', help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N', help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N', help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=False)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Distillation parameters distilled
parser.add_argument('--distilled', action='store_true', default=False, help='Perform distilled ')
parser.add_argument('--teacher-model', default='regnety_200mf', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
# Dataset parameters
parser.add_argument('--data-path', default= '../../PythonWork_E/Data/ImageNet_2012',#'./data', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR10', 'CIFAR100' , 'IMNET'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='./outputs', help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default= '', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', default=False, help='Perform evaluation only')
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
# test throught
parser.add_argument('--throughout', action='store_true', help='Perform throughout only')
return parser
@torch.no_grad()
def throughput(data_loader, model, logger):
model.eval()
for _, (images, _) in enumerate(data_loader):
images = images.cuda(non_blocking=True)
batch_size = images.shape[0]
for i in range(50):
model(images)
torch.cuda.synchronize()
logger.info(f"throughput averaged with 30 times")
tic1 = time.time()
for i in range(30):
model(images)
torch.cuda.synchronize()
tic2 = time.time()
logger.info(f"batch_size {batch_size} throughput {30 * batch_size / (tic2 - tic1)}")
return
def main(args):
utils.init_distributed_mode(args)
print('------------ Options -------------')
for key, value in sorted(vars(args).items()):
print('%16.16s: %16.16s' % (str(key), str(value)))
print('-------------- End ----------------')
if args.distillation_type != 'none' and args.finetune and not args.eval:
raise NotImplementedError("Finetuning with distillation not yet supported")
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
cudnn.benchmark = True
| dataset_train, args.nb_classes = build_dataset(is_train=True, args=args) | 1 | 2023-11-06 03:31:47+00:00 | 8k |
mihirp1998/Diffusion-TTA | main.py | [
{
"identifier": "DatasetCatalog",
"path": "dataset/catalog.py",
"snippet": "class DatasetCatalog:\n def __init__(self, config):\n ########### Define image transformations ###########\n mean = config.input.mean\n std = config.input.std\n \n interpolation = Interpolat... | import os
import copy
import random
import warnings
import wandb
import hydra
import numpy as np
import pickle
import torch
import torch.backends.cudnn as cudnn
from hydra.utils import get_original_cwd
from omegaconf import OmegaConf, open_dict
from mergedeep import merge
from dataset.catalog import DatasetCatalog
from diff_tta import utils, engine
from diff_tta.vis_utils import (
visualize_classification_with_image,
visualize_diffusion_loss,
visualize_classification_improvements,
)
from diff_tta.models import build | 4,396 | """Main script for Diffusion-TTA"""
torch.backends.cudnn.benchmark = True
def tta_one_epoch(config, dataloader, tta_model, optimizer, scaler,
autoencoder, image_renormalizer):
"""Perform test time adaptation over the entire dataset.
Args:
config: configuration object for hyper-parameters.
dataloader: The dataloader for the dataset.
tta_model: A test-time adaptation wrapper model.
optimizer: A gradient-descent optimizer for updating classifier.
scaler: A gradient scaler used jointly with optimizer.
autoencoder: A pre-trained autoencoder model (e.g. VQVAE).
image_renormalizer: An object for renormalizing images.
"""
cwd = config.cwd
discrete_sampling_accuracy = []
tta_model.eval()
# Keep a copy of the original model state dict, so that we can reset the
# model after each image
tta_class_state_dict = copy.deepcopy(tta_model.state_dict())
# Enlarge batch size by accumulating gradients over multiple iterations
config.tta.gradient_descent.train_steps = (
config.tta.gradient_descent.train_steps
* config.tta.gradient_descent.accum_iter
)
# Start iterations
start_index = 0
last_index = len(dataloader.dataset)
for img_ind in range(start_index, last_index):
# Enable/disable to upload visualization to wandb
visualize = (
(config.log_freq > 0 and img_ind % config.log_freq == 0)
or img_ind == last_index - 1
)
# The dictionary for visualization
wandb_dict = {}
# Fetch data from the dataset
print(f"\n\n Example: {img_ind}/{last_index} \n\n")
batch = dataloader.dataset[img_ind]
batch = engine.preprocess_input(batch, config.gpu)
# We will classify before and after test-time adaptation via
# gradient descent. We run tta_model.evaluate(batch, after_tta=True) to
# save the classification results
# Step 1: Predict pre-TTA classification. The results are saved in
# `before_tta_stats_dict` and `tta_model.before_tta_acc`
before_tta_stats_dict = tta_model.evaluate(batch, before_tta=True)
# Step 2: TTA by gradient descent
losses, after_tta_outputs = engine.tta_one_image_by_gradient_descent(
batch, tta_model, optimizer, scaler,
autoencoder, image_renormalizer, config,
before_tta_stats_dict['pred_topk_idx']
)
# Step 3: Predict post-TTA classification. The results are saved in
# `after_tta_stats_dict` and `tta_model.after_tta_acc`
after_tta_stats_dict = tta_model.evaluate(batch, after_tta=True)
# Reload the original model state dict
if not config.tta.online:
tta_model.load_state_dict(tta_class_state_dict)
optimizer = build.load_optimizer(config, tta_model)
if visualize:
# wandb_dict is updated in-place
wandb_dict = visualize_classification_with_image(
batch, config, dataloader.dataset,
before_tta_stats_dict["before_tta_logits"],
before_tta_stats_dict["before_tta_topk_idx"],
before_tta_stats_dict["before_tta_pred_class_idx"],
before_tta_stats_dict["before_tta_topk_class_idx"],
wandb_dict
)
wandb_dict = visualize_diffusion_loss(losses, config, wandb_dict)
# Plot accuracy curve every image
| """Main script for Diffusion-TTA"""
torch.backends.cudnn.benchmark = True
def tta_one_epoch(config, dataloader, tta_model, optimizer, scaler,
autoencoder, image_renormalizer):
"""Perform test time adaptation over the entire dataset.
Args:
config: configuration object for hyper-parameters.
dataloader: The dataloader for the dataset.
tta_model: A test-time adaptation wrapper model.
optimizer: A gradient-descent optimizer for updating classifier.
scaler: A gradient scaler used jointly with optimizer.
autoencoder: A pre-trained autoencoder model (e.g. VQVAE).
image_renormalizer: An object for renormalizing images.
"""
cwd = config.cwd
discrete_sampling_accuracy = []
tta_model.eval()
# Keep a copy of the original model state dict, so that we can reset the
# model after each image
tta_class_state_dict = copy.deepcopy(tta_model.state_dict())
# Enlarge batch size by accumulating gradients over multiple iterations
config.tta.gradient_descent.train_steps = (
config.tta.gradient_descent.train_steps
* config.tta.gradient_descent.accum_iter
)
# Start iterations
start_index = 0
last_index = len(dataloader.dataset)
for img_ind in range(start_index, last_index):
# Enable/disable to upload visualization to wandb
visualize = (
(config.log_freq > 0 and img_ind % config.log_freq == 0)
or img_ind == last_index - 1
)
# The dictionary for visualization
wandb_dict = {}
# Fetch data from the dataset
print(f"\n\n Example: {img_ind}/{last_index} \n\n")
batch = dataloader.dataset[img_ind]
batch = engine.preprocess_input(batch, config.gpu)
# We will classify before and after test-time adaptation via
# gradient descent. We run tta_model.evaluate(batch, after_tta=True) to
# save the classification results
# Step 1: Predict pre-TTA classification. The results are saved in
# `before_tta_stats_dict` and `tta_model.before_tta_acc`
before_tta_stats_dict = tta_model.evaluate(batch, before_tta=True)
# Step 2: TTA by gradient descent
losses, after_tta_outputs = engine.tta_one_image_by_gradient_descent(
batch, tta_model, optimizer, scaler,
autoencoder, image_renormalizer, config,
before_tta_stats_dict['pred_topk_idx']
)
# Step 3: Predict post-TTA classification. The results are saved in
# `after_tta_stats_dict` and `tta_model.after_tta_acc`
after_tta_stats_dict = tta_model.evaluate(batch, after_tta=True)
# Reload the original model state dict
if not config.tta.online:
tta_model.load_state_dict(tta_class_state_dict)
optimizer = build.load_optimizer(config, tta_model)
if visualize:
# wandb_dict is updated in-place
wandb_dict = visualize_classification_with_image(
batch, config, dataloader.dataset,
before_tta_stats_dict["before_tta_logits"],
before_tta_stats_dict["before_tta_topk_idx"],
before_tta_stats_dict["before_tta_pred_class_idx"],
before_tta_stats_dict["before_tta_topk_class_idx"],
wandb_dict
)
wandb_dict = visualize_diffusion_loss(losses, config, wandb_dict)
# Plot accuracy curve every image | wandb_dict = visualize_classification_improvements( | 5 | 2023-11-07 21:09:50+00:00 | 8k |
VILA-Lab/GBLM-Pruner | lib/prune.py | [
{
"identifier": "SparseGPT",
"path": "lib/sparsegpt.py",
"snippet": "class SparseGPT:\n\n def __init__(self, layer):\n self.layer = layer\n self.dev = self.layer.weight.device\n W = layer.weight.data.clone()\n if isinstance(self.layer, nn.Conv2d):\n W = W.flatte... | import time
import heapq
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
import gc
import csv
import os
from .sparsegpt import SparseGPT
from .layerwrapper import WrappedGPT
from .data import get_loaders
from torch.utils.data import DataLoader
from transformers import AdamW
from pdb import set_trace as st | 4,004 | device = model.hf_device_map["model.embed_tokens"]
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros((nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=device)
inps.requires_grad = False
cache = {'i': 0, 'attention_mask': None, "position_ids": None}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
cache['attention_mask'] = kwargs['attention_mask']
cache['position_ids'] = kwargs['position_ids']
raise ValueError
layers[0] = Catcher(layers[0])
for batch in dataloader:
try:
model(batch[0].to(device))
except ValueError:
pass
layers[0] = layers[0].module
outs = torch.zeros_like(inps)
attention_mask = cache['attention_mask']
position_ids = cache['position_ids']
model.config.use_cache = use_cache
return inps, outs, attention_mask, position_ids
def return_given_alpha(alpha, sort_res, W_metric, tmp_metric, sum_before):
thres_cumsum = sum_before * alpha
sort_mask = tmp_metric <= thres_cumsum.reshape((-1,1))
thres = torch.gather(sort_res[0], dim=1, index=sort_mask.sum(dim=1, keepdims=True)-1)
W_mask = (W_metric <= thres)
cur_sparsity = (W_mask==True).sum() / W_mask.numel()
return W_mask, cur_sparsity
def prune_magnitude(args, model, tokenizer, device=torch.device("cuda:0"), prune_n=0, prune_m=0, layer_no=-1):
layers = model.model.layers
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
for name in subset:
W = subset[name].weight.data
W_metric = torch.abs(W)
if prune_n != 0:
W_mask = (torch.zeros_like(W)==1)
for ii in range(W_metric.shape[1]):
if ii % prune_m == 0:
tmp = W_metric[:,ii:(ii+prune_m)].float()
W_mask.scatter_(1,ii+torch.topk(tmp, prune_n,dim=1, largest=False)[1], True)
else:
# thresh = torch.sort(W_metric.flatten().cuda())[0][int(W.numel()*args.sparsity_ratio)].cpu()
thresh = torch.sort(W_metric.flatten())[0][int(W_metric.numel()*args.sparsity_ratio)].cpu()
W_mask = (W_metric<=thresh)
W[W_mask] = 0
def prune_gradient(args, model, tokenizer, device=torch.device("cuda:0"), prune_n=0, prune_m=0, layer_no=-1):
layers = model.model.layers
with open(args.gradient_path, 'rb') as file:
gradients = torch.load(args.gradient_path, map_location=torch.device('cpu'))
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
for name in subset:
indexed_name = f"{name}_layer_{i}"
W = subset[name].weight.data
W_metric = torch.abs(W)
if not args.gradient_inv:
W_metric = W_metric.to(dtype=torch.float32) * torch.abs(gradients[indexed_name].to(device=W_metric.device)).to(dtype=torch.float32)#+ small_value)
else:
small_value = torch.tensor(1e-8, dtype=gradients[indexed_name].dtype, device=gradients[indexed_name].device)
gradient_inv = 1 / (torch.abs(gradients[indexed_name]) + small_value)
W_metric = W_metric.to(dtype=torch.float32) * gradient_inv.to(device=W_metric.device).to(dtype=torch.float32)
W_mask = (torch.zeros_like(W)==1)
if prune_n != 0:
for ii in range(W_metric.shape[1]):
if ii % prune_m == 0:
tmp = W_metric[:,ii:(ii+prune_m)].float()
W_mask.scatter_(1,ii+torch.topk(tmp, prune_n,dim=1, largest=False)[1], True)
else:
sort_res = torch.sort(W_metric, dim=-1, stable=True)
indices = sort_res[1][:,:int(W_metric.shape[1]*args.sparsity_ratio)]
W_mask.scatter_(1, indices, True)
W[W_mask] = 0
def prune_gblm(args, model, tokenizer, device=torch.device("cuda:0"), prune_n=0, prune_m=0, layer_no=-1):
use_cache = model.config.use_cache
model.config.use_cache = False
with open(args.gradient_path, 'rb') as file:
gradients = torch.load(args.gradient_path, map_location=torch.device('cpu'))
print("loading calibdation data")
dataloader, _ = get_loaders("c4",nsamples=args.nsamples,seed=args.seed,seqlen=2048,tokenizer=tokenizer)
print("dataset loading complete")
with torch.no_grad():
inps, outs, attention_mask, position_ids = prepare_calibration_input(model, dataloader, args.nsamples, device)
layers = model.model.layers
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
if f"model.layers.{i}" in model.hf_device_map: ## handle the case for llama-30B and llama-65B, when the device map has multiple GPUs;
dev = model.hf_device_map[f"model.layers.{i}"]
inps, outs, attention_mask, position_ids = inps.to(dev), outs.to(dev), attention_mask.to(dev), position_ids.to(dev)
wrapped_layers = {}
for name in subset:
|
def no_zero(data):
zero_count = (data == 0).sum().item()
return zero_count
def plot_subsampled_matrix_and_save(matrix, output_prefix, subsample_factor):
odd_subsampled_matrix = matrix[::subsample_factor, ::subsample_factor]
even_subsampled_matrix = matrix[1::subsample_factor, 1::subsample_factor]
ones_matrix = np.ones_like(odd_subsampled_matrix)
zeros_matrix = np.zeros_like(even_subsampled_matrix)
# print(ones_matrix)
# print(zeros_matrix)
plt.figure(figsize=(20, 10))
plt.subplot(2, 2, 1)
plt.imshow(odd_subsampled_matrix, cmap='gray', interpolation='nearest')
plt.title('Odd Subsampling')
plt.grid(which='both', color='black', linewidth=1)
plt.xticks([])
plt.yticks([])
plt.subplot(2, 2, 2)
plt.imshow(even_subsampled_matrix, cmap='gray', interpolation='nearest')
plt.title('Even Subsampling')
plt.grid(which='both', color='black', linewidth=1)
plt.xticks([])
plt.yticks([])
plt.subplot(2, 2, 3)
plt.imshow(ones_matrix, cmap='gray', interpolation='nearest')
plt.title('All Ones')
plt.grid(which='both', color='black', linewidth=1)
plt.xticks([])
plt.yticks([])
plt.subplot(2, 2, 4)
plt.imshow(zeros_matrix, cmap='gray_r', interpolation='nearest')
plt.title('All Zeros')
plt.grid(which='both', color='black', linewidth=1)
plt.xticks([])
plt.yticks([])
plt.tight_layout()
plt.savefig(output_prefix + '_subsampled_plots.png', dpi=300)
plt.clf() # Clear the figure after saving
def find_layers(module, layers=[nn.Linear], name=''):
"""
Recursively find the layers of a certain type in a module.
Args:
module (nn.Module): PyTorch module.
layers (list): List of layer types to find.
name (str): Name of the module.
Returns:
dict: Dictionary of layers of the given type(s) within the module.
"""
if type(module) in layers:
return {name: module}
res = {}
for name1, child in module.named_children():
res.update(find_layers(
child, layers=layers, name=name + '.' + name1 if name != '' else name1
))
return res
def check_sparsity(model, args):
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.model.layers
count = 0
total_params = 0
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
sub_count = 0
sub_params = 0
for name in subset:
W = subset[name].weight.data
count += (W==0).sum().item()
total_params += W.numel()
sub_count += (W==0).sum().item()
sub_params += W.numel()
print(f"layer {i} sparsity {float(sub_count)/sub_params:.6f}")
model.config.use_cache = use_cache
return float(count)/total_params
def prepare_calibration_input(model, dataloader, nsamples, device):
use_cache = model.config.use_cache
model.config.use_cache = False
layers = model.model.layers
# dev = model.hf_device_map["model.embed_tokens"]
if "model.embed_tokens" in model.hf_device_map:
device = model.hf_device_map["model.embed_tokens"]
dtype = next(iter(model.parameters())).dtype
inps = torch.zeros((nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=device)
inps.requires_grad = False
cache = {'i': 0, 'attention_mask': None, "position_ids": None}
class Catcher(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inp, **kwargs):
inps[cache['i']] = inp
cache['i'] += 1
cache['attention_mask'] = kwargs['attention_mask']
cache['position_ids'] = kwargs['position_ids']
raise ValueError
layers[0] = Catcher(layers[0])
for batch in dataloader:
try:
model(batch[0].to(device))
except ValueError:
pass
layers[0] = layers[0].module
outs = torch.zeros_like(inps)
attention_mask = cache['attention_mask']
position_ids = cache['position_ids']
model.config.use_cache = use_cache
return inps, outs, attention_mask, position_ids
def return_given_alpha(alpha, sort_res, W_metric, tmp_metric, sum_before):
thres_cumsum = sum_before * alpha
sort_mask = tmp_metric <= thres_cumsum.reshape((-1,1))
thres = torch.gather(sort_res[0], dim=1, index=sort_mask.sum(dim=1, keepdims=True)-1)
W_mask = (W_metric <= thres)
cur_sparsity = (W_mask==True).sum() / W_mask.numel()
return W_mask, cur_sparsity
def prune_magnitude(args, model, tokenizer, device=torch.device("cuda:0"), prune_n=0, prune_m=0, layer_no=-1):
layers = model.model.layers
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
for name in subset:
W = subset[name].weight.data
W_metric = torch.abs(W)
if prune_n != 0:
W_mask = (torch.zeros_like(W)==1)
for ii in range(W_metric.shape[1]):
if ii % prune_m == 0:
tmp = W_metric[:,ii:(ii+prune_m)].float()
W_mask.scatter_(1,ii+torch.topk(tmp, prune_n,dim=1, largest=False)[1], True)
else:
# thresh = torch.sort(W_metric.flatten().cuda())[0][int(W.numel()*args.sparsity_ratio)].cpu()
thresh = torch.sort(W_metric.flatten())[0][int(W_metric.numel()*args.sparsity_ratio)].cpu()
W_mask = (W_metric<=thresh)
W[W_mask] = 0
def prune_gradient(args, model, tokenizer, device=torch.device("cuda:0"), prune_n=0, prune_m=0, layer_no=-1):
layers = model.model.layers
with open(args.gradient_path, 'rb') as file:
gradients = torch.load(args.gradient_path, map_location=torch.device('cpu'))
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
for name in subset:
indexed_name = f"{name}_layer_{i}"
W = subset[name].weight.data
W_metric = torch.abs(W)
if not args.gradient_inv:
W_metric = W_metric.to(dtype=torch.float32) * torch.abs(gradients[indexed_name].to(device=W_metric.device)).to(dtype=torch.float32)#+ small_value)
else:
small_value = torch.tensor(1e-8, dtype=gradients[indexed_name].dtype, device=gradients[indexed_name].device)
gradient_inv = 1 / (torch.abs(gradients[indexed_name]) + small_value)
W_metric = W_metric.to(dtype=torch.float32) * gradient_inv.to(device=W_metric.device).to(dtype=torch.float32)
W_mask = (torch.zeros_like(W)==1)
if prune_n != 0:
for ii in range(W_metric.shape[1]):
if ii % prune_m == 0:
tmp = W_metric[:,ii:(ii+prune_m)].float()
W_mask.scatter_(1,ii+torch.topk(tmp, prune_n,dim=1, largest=False)[1], True)
else:
sort_res = torch.sort(W_metric, dim=-1, stable=True)
indices = sort_res[1][:,:int(W_metric.shape[1]*args.sparsity_ratio)]
W_mask.scatter_(1, indices, True)
W[W_mask] = 0
def prune_gblm(args, model, tokenizer, device=torch.device("cuda:0"), prune_n=0, prune_m=0, layer_no=-1):
use_cache = model.config.use_cache
model.config.use_cache = False
with open(args.gradient_path, 'rb') as file:
gradients = torch.load(args.gradient_path, map_location=torch.device('cpu'))
print("loading calibdation data")
dataloader, _ = get_loaders("c4",nsamples=args.nsamples,seed=args.seed,seqlen=2048,tokenizer=tokenizer)
print("dataset loading complete")
with torch.no_grad():
inps, outs, attention_mask, position_ids = prepare_calibration_input(model, dataloader, args.nsamples, device)
layers = model.model.layers
for i in range(len(layers)):
layer = layers[i]
subset = find_layers(layer)
if f"model.layers.{i}" in model.hf_device_map: ## handle the case for llama-30B and llama-65B, when the device map has multiple GPUs;
dev = model.hf_device_map[f"model.layers.{i}"]
inps, outs, attention_mask, position_ids = inps.to(dev), outs.to(dev), attention_mask.to(dev), position_ids.to(dev)
wrapped_layers = {}
for name in subset: | wrapped_layers[name] = WrappedGPT(subset[name], layer_id=i, layer_name=name) | 1 | 2023-11-08 20:10:51+00:00 | 8k |
zamaniamin/fastapi-shop | apps/products/tests/test_product.py | [
{
"identifier": "FakeUser",
"path": "apps/accounts/faker/data.py",
"snippet": "class FakeUser(BaseFakeAccount):\n\n @classmethod\n def populate_members(cls):\n \"\"\"\n Create an admin and a user.\n \"\"\"\n\n # --- admin ---\n user, access_token = FakeAccount.ve... | import asyncio
import pytest
from fastapi import status
from fastapi.testclient import TestClient
from apps.accounts.faker.data import FakeUser
from apps.accounts.models import User
from apps.core.base_test_case import BaseTestCase
from apps.main import app
from apps.products.faker.data import FakeProduct
from apps.products.services import ProductService
from config.database import DatabaseManager | 6,834 |
class ProductTestBase(BaseTestCase):
product_endpoint = '/products/'
# --- members ---
admin: User | None = None
admin_authorization = {}
@classmethod
def setup_class(cls):
cls.client = TestClient(app)
# Initialize the test database and session before the test class starts
DatabaseManager.create_test_database()
# --- create an admin ---
cls.admin, access_token = FakeUser.populate_admin()
cls.admin_authorization = {"Authorization": f"Bearer {access_token}"}
@classmethod
def teardown_class(cls):
# Drop the test database after all tests in the class have finished
DatabaseManager.drop_all_tables()
class TestCreateProduct(ProductTestBase):
"""
Test create a product on the multi scenario
"""
def test_access_permission(self):
"""
Test permissions as admin and non-admin user for CRUD methods of create product.
"""
# TODO admin permission can access to all CRUD of a product also list of products
# TODO non admin users only can use read a product or read a list of products if it status is
# 'active or archive'
...
def test_create_product(self):
"""
Test create a product by assuming valid data.
* every time we create product, the media should be None, because the Media after creating a product will be
attached to it.
"""
# --- request ---
|
class ProductTestBase(BaseTestCase):
product_endpoint = '/products/'
# --- members ---
admin: User | None = None
admin_authorization = {}
@classmethod
def setup_class(cls):
cls.client = TestClient(app)
# Initialize the test database and session before the test class starts
DatabaseManager.create_test_database()
# --- create an admin ---
cls.admin, access_token = FakeUser.populate_admin()
cls.admin_authorization = {"Authorization": f"Bearer {access_token}"}
@classmethod
def teardown_class(cls):
# Drop the test database after all tests in the class have finished
DatabaseManager.drop_all_tables()
class TestCreateProduct(ProductTestBase):
"""
Test create a product on the multi scenario
"""
def test_access_permission(self):
"""
Test permissions as admin and non-admin user for CRUD methods of create product.
"""
# TODO admin permission can access to all CRUD of a product also list of products
# TODO non admin users only can use read a product or read a list of products if it status is
# 'active or archive'
...
def test_create_product(self):
"""
Test create a product by assuming valid data.
* every time we create product, the media should be None, because the Media after creating a product will be
attached to it.
"""
# --- request --- | payload = FakeProduct.get_payload() | 4 | 2023-11-06 04:46:03+00:00 | 8k |
Subsets and Splits
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have consistent code formatting levels across multiple scales (2k, 4k, 8k, 12k) and reveals the structured formatting patterns within these repositories.
SQL Console for tianyang/repobench_python_v1.1
Compares cross-file and in-file code structure patterns across different complexity levels, revealing how file organization strategies vary with code size and potentially informing better code architecture decisions.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have complete performance data across all seven code complexity levels, revealing consistent benchmarking patterns across different code sizes.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that contain all 7 distinct quality levels (2k through 32k), revealing complete datasets that might be useful for comprehensive analysis.