id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
7,781 | import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
from einops import rearrange
from packaging import version
from torch import nn
from .norm import LPLayerNorm
def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):
if attn_impl == "flash":
return None
elif attn_impl in ["torch", "triton"]:
if alibi:
if (prefix_lm or not causal) or use_sequence_id:
return (1, n_heads, seq_len, seq_len)
return (1, n_heads, 1, seq_len)
elif prefix_lm or use_sequence_id:
return (1, 1, seq_len, seq_len)
return None
else:
raise ValueError(f"attn_impl={attn_impl!r} is an invalid setting.") | null |
7,782 | import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
from einops import rearrange
from packaging import version
from torch import nn
from .norm import LPLayerNorm
def build_alibi_bias(n_heads, seq_len, full=False, alibi_bias_max=8, device=None, dtype=None):
alibi_bias = torch.arange(1 - seq_len, 1, dtype=torch.int32, device=device).view(1, 1, 1, seq_len)
if full:
alibi_bias = alibi_bias - torch.arange(1 - seq_len, 1, dtype=torch.int32, device=device).view(1, 1, seq_len, 1)
alibi_bias = alibi_bias.abs().mul(-1)
slopes = gen_slopes(n_heads, alibi_bias_max, device=device)
alibi_bias = alibi_bias * slopes
return alibi_bias.to(dtype=dtype)
def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):
if attn_impl == "flash":
return None
elif attn_impl in ["torch", "triton"]:
if alibi:
(device, dtype) = (attn_bias.device, attn_bias.dtype)
attn_bias = attn_bias.add(
build_alibi_bias(
n_heads,
seq_len,
full=not causal,
alibi_bias_max=alibi_bias_max,
device=device,
dtype=dtype,
)
)
return attn_bias
else:
raise ValueError(f"attn_impl={attn_impl!r} is an invalid setting.") | null |
7,783 | import torch
def _cast_if_autocast_enabled(tensor):
if torch.is_autocast_enabled():
if tensor.device.type == "cuda":
dtype = torch.get_autocast_gpu_dtype()
elif tensor.device.type == "cpu":
dtype = torch.get_autocast_cpu_dtype()
else:
raise NotImplementedError()
return tensor.to(dtype=dtype)
return tensor | null |
7,785 | import math
import torch
import triton_pre_mlir as triton
import triton_pre_mlir.language as tl
def init_to_zero(name):
return lambda nargs: nargs[name].zero_() | null |
7,786 | import math
import torch
import triton_pre_mlir as triton
import triton_pre_mlir.language as tl
def _fwd_kernel(
Q,
K,
V,
Bias,
Out,
Lse,
TMP,
softmax_scale,
stride_qb,
stride_qh,
stride_qm,
stride_kb,
stride_kh,
stride_kn,
stride_vb,
stride_vh,
stride_vn,
stride_bb,
stride_bh,
stride_bm,
stride_ob,
stride_oh,
stride_om,
nheads,
seqlen_q,
seqlen_k,
seqlen_q_rounded,
headdim,
CACHE_KEY_SEQLEN_Q,
CACHE_KEY_SEQLEN_K,
BIAS_TYPE: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
EVEN_M: tl.constexpr,
EVEN_N: tl.constexpr,
EVEN_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
):
start_m = tl.program_id(0)
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_HEADDIM)
q_ptrs = Q + off_b * stride_qb + off_h * stride_qh + (offs_m[:, None] * stride_qm + offs_d[None, :])
k_ptrs = K + off_b * stride_kb + off_h * stride_kh + (offs_n[:, None] * stride_kn + offs_d[None, :])
v_ptrs = V + off_b * stride_vb + off_h * stride_vh + (offs_n[:, None] * stride_vn + offs_d[None, :])
if BIAS_TYPE == "vector":
b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + offs_n
elif BIAS_TYPE == "matrix":
b_ptrs = Bias + off_b * stride_bb + off_h * stride_bh + (offs_m[:, None] * stride_bm + offs_n[None, :])
t_ptrs = TMP + off_hb * seqlen_q_rounded + offs_m
lse_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
acc_o = tl.zeros([BLOCK_M, BLOCK_HEADDIM], dtype=tl.float32)
if EVEN_M & EVEN_N:
if EVEN_HEADDIM:
q = tl.load(q_ptrs)
else:
q = tl.load(q_ptrs, mask=offs_d[None, :] < headdim, other=0.0)
elif EVEN_HEADDIM:
q = tl.load(q_ptrs, mask=offs_m[:, None] < seqlen_q, other=0.0)
else:
q = tl.load(
q_ptrs,
mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
other=0.0,
)
end_n = seqlen_k if not IS_CAUSAL else tl.minimum((start_m + 1) * BLOCK_M, seqlen_k)
for start_n in range(0, end_n, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
if EVEN_N & EVEN_M:
if EVEN_HEADDIM:
k = tl.load(k_ptrs + start_n * stride_kn)
else:
k = tl.load(
k_ptrs + start_n * stride_kn,
mask=offs_d[None, :] < headdim,
other=0.0,
)
elif EVEN_HEADDIM:
k = tl.load(
k_ptrs + start_n * stride_kn,
mask=(start_n + offs_n)[:, None] < seqlen_k,
other=0.0,
)
else:
k = tl.load(
k_ptrs + start_n * stride_kn,
mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim),
other=0.0,
)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k, trans_b=True)
if not EVEN_N:
qk += tl.where((start_n + offs_n)[None, :] < seqlen_k, 0, float("-inf"))
if IS_CAUSAL:
qk += tl.where(offs_m[:, None] >= (start_n + offs_n)[None, :], 0, float("-inf"))
if BIAS_TYPE != "none":
if BIAS_TYPE == "vector":
if EVEN_N:
bias = tl.load(b_ptrs + start_n).to(tl.float32)
else:
bias = tl.load(b_ptrs + start_n, mask=start_n + offs_n < seqlen_k, other=0.0).to(tl.float32)
bias = bias[None, :]
elif BIAS_TYPE == "matrix":
if EVEN_M & EVEN_N:
bias = tl.load(b_ptrs + start_n).to(tl.float32)
else:
bias = tl.load(
b_ptrs + start_n,
mask=(offs_m[:, None] < seqlen_q) & ((start_n + offs_n)[None, :] < seqlen_k),
other=0.0,
).to(tl.float32)
qk = qk * softmax_scale + bias
m_ij = tl.maximum(tl.max(qk, 1), lse_i)
p = tl.exp(qk - m_ij[:, None])
else:
m_ij = tl.maximum(tl.max(qk, 1) * softmax_scale, lse_i)
p = tl.exp(qk * softmax_scale - m_ij[:, None])
l_ij = tl.sum(p, 1)
acc_o_scale = tl.exp(m_i - m_ij)
tl.store(t_ptrs, acc_o_scale)
acc_o_scale = tl.load(t_ptrs)
acc_o = acc_o * acc_o_scale[:, None]
if EVEN_N & EVEN_M:
if EVEN_HEADDIM:
v = tl.load(v_ptrs + start_n * stride_vn)
else:
v = tl.load(
v_ptrs + start_n * stride_vn,
mask=offs_d[None, :] < headdim,
other=0.0,
)
elif EVEN_HEADDIM:
v = tl.load(
v_ptrs + start_n * stride_vn,
mask=(start_n + offs_n)[:, None] < seqlen_k,
other=0.0,
)
else:
v = tl.load(
v_ptrs + start_n * stride_vn,
mask=((start_n + offs_n)[:, None] < seqlen_k) & (offs_d[None, :] < headdim),
other=0.0,
)
p = p.to(v.dtype)
acc_o += tl.dot(p, v)
m_i = m_ij
l_i_new = tl.exp(lse_i - m_ij) + l_ij
lse_i = m_ij + tl.log(l_i_new)
o_scale = tl.exp(m_i - lse_i)
tl.store(t_ptrs, o_scale)
o_scale = tl.load(t_ptrs)
acc_o = acc_o * o_scale[:, None]
start_m = tl.program_id(0)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
lse_ptrs = Lse + off_hb * seqlen_q_rounded + offs_m
tl.store(lse_ptrs, lse_i)
offs_d = tl.arange(0, BLOCK_HEADDIM)
out_ptrs = Out + off_b * stride_ob + off_h * stride_oh + (offs_m[:, None] * stride_om + offs_d[None, :])
if EVEN_M:
if EVEN_HEADDIM:
tl.store(out_ptrs, acc_o)
else:
tl.store(out_ptrs, acc_o, mask=offs_d[None, :] < headdim)
elif EVEN_HEADDIM:
tl.store(out_ptrs, acc_o, mask=offs_m[:, None] < seqlen_q)
else:
tl.store(
out_ptrs,
acc_o,
mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
)
def _flash_attn_forward(q, k, v, bias=None, causal=False, softmax_scale=None):
(batch, seqlen_q, nheads, d) = q.shape
(_, seqlen_k, _, _) = k.shape
assert k.shape == (batch, seqlen_k, nheads, d)
assert v.shape == (batch, seqlen_k, nheads, d)
assert d <= 128, "FlashAttention only support head dimensions up to 128"
assert q.dtype == k.dtype == v.dtype, "All tensors must have the same type"
assert q.dtype in [torch.float16, torch.bfloat16], "Only support fp16 and bf16"
assert q.is_cuda and k.is_cuda and v.is_cuda
softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
has_bias = bias is not None
bias_type = "none"
if has_bias:
assert bias.dtype in [q.dtype, torch.float]
assert bias.is_cuda
assert bias.dim() == 4
if bias.stride(-1) != 1:
bias = bias.contiguous()
if bias.shape[2:] == (1, seqlen_k):
bias_type = "vector"
elif bias.shape[2:] == (seqlen_q, seqlen_k):
bias_type = "matrix"
else:
raise RuntimeError("Last 2 dimensions of bias must be (1, seqlen_k) or (seqlen_q, seqlen_k)")
bias = bias.expand(batch, nheads, seqlen_q, seqlen_k)
bias_strides = (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0)
seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
lse = torch.empty((batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32)
tmp = torch.empty((batch, nheads, seqlen_q_rounded), device=q.device, dtype=torch.float32)
o = torch.empty_like(q)
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
BLOCK = 128
num_warps = 4 if d <= 64 else 8
grid = lambda META: (triton.cdiv(seqlen_q, META["BLOCK_M"]), batch * nheads)
_fwd_kernel[grid](
q,
k,
v,
bias,
o,
lse,
tmp,
softmax_scale,
q.stride(0),
q.stride(2),
q.stride(1),
k.stride(0),
k.stride(2),
k.stride(1),
v.stride(0),
v.stride(2),
v.stride(1),
*bias_strides,
o.stride(0),
o.stride(2),
o.stride(1),
nheads,
seqlen_q,
seqlen_k,
seqlen_q_rounded,
d,
seqlen_q // 32,
seqlen_k // 32,
bias_type,
causal,
BLOCK_HEADDIM,
BLOCK_M=BLOCK,
BLOCK_N=BLOCK,
num_warps=num_warps,
num_stages=1
)
return (o, lse, softmax_scale) | null |
7,787 | import math
import torch
import triton_pre_mlir as triton
import triton_pre_mlir.language as tl
def _bwd_preprocess_do_o_dot(
Out,
DO,
Delta,
stride_ob,
stride_oh,
stride_om,
stride_dob,
stride_doh,
stride_dom,
nheads,
seqlen_q,
seqlen_q_rounded,
headdim,
BLOCK_M: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
):
start_m = tl.program_id(0)
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_d = tl.arange(0, BLOCK_HEADDIM)
o = tl.load(
Out + off_b * stride_ob + off_h * stride_oh + offs_m[:, None] * stride_om + offs_d[None, :],
mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
other=0.0,
).to(tl.float32)
do = tl.load(
DO + off_b * stride_dob + off_h * stride_doh + offs_m[:, None] * stride_dom + offs_d[None, :],
mask=(offs_m[:, None] < seqlen_q) & (offs_d[None, :] < headdim),
other=0.0,
).to(tl.float32)
delta = tl.sum(o * do, axis=1)
tl.store(Delta + off_hb * seqlen_q_rounded + offs_m, delta)
def _bwd_kernel(
Q,
K,
V,
Bias,
DO,
DQ,
DK,
DV,
LSE,
D,
softmax_scale,
stride_qb,
stride_qh,
stride_qm,
stride_kb,
stride_kh,
stride_kn,
stride_vb,
stride_vh,
stride_vn,
stride_bb,
stride_bh,
stride_bm,
stride_dob,
stride_doh,
stride_dom,
stride_dqb,
stride_dqh,
stride_dqm,
stride_dkb,
stride_dkh,
stride_dkn,
stride_dvb,
stride_dvh,
stride_dvn,
nheads,
seqlen_q,
seqlen_k,
seqlen_q_rounded,
headdim,
CACHE_KEY_SEQLEN_Q,
CACHE_KEY_SEQLEN_K,
BIAS_TYPE: tl.constexpr,
IS_CAUSAL: tl.constexpr,
BLOCK_HEADDIM: tl.constexpr,
SEQUENCE_PARALLEL: tl.constexpr,
EVEN_M: tl.constexpr,
EVEN_N: tl.constexpr,
EVEN_HEADDIM: tl.constexpr,
BLOCK_M: tl.constexpr,
BLOCK_N: tl.constexpr,
):
off_hb = tl.program_id(1)
off_b = off_hb // nheads
off_h = off_hb % nheads
Q += off_b * stride_qb + off_h * stride_qh
K += off_b * stride_kb + off_h * stride_kh
V += off_b * stride_vb + off_h * stride_vh
DO += off_b * stride_dob + off_h * stride_doh
DQ += off_b * stride_dqb + off_h * stride_dqh
DK += off_b * stride_dkb + off_h * stride_dkh
DV += off_b * stride_dvb + off_h * stride_dvh
if BIAS_TYPE != "none":
Bias += off_b * stride_bb + off_h * stride_bh
D += off_hb * seqlen_q_rounded
LSE += off_hb * seqlen_q_rounded
if not SEQUENCE_PARALLEL:
num_block_n = tl.cdiv(seqlen_k, BLOCK_N)
for start_n in range(0, num_block_n):
_bwd_kernel_one_col_block(
start_n,
Q,
K,
V,
Bias,
DO,
DQ,
DK,
DV,
LSE,
D,
softmax_scale,
stride_qm,
stride_kn,
stride_vn,
stride_bm,
stride_dom,
stride_dqm,
stride_dkn,
stride_dvn,
seqlen_q,
seqlen_k,
headdim,
ATOMIC_ADD=False,
BIAS_TYPE=BIAS_TYPE,
IS_CAUSAL=IS_CAUSAL,
BLOCK_HEADDIM=BLOCK_HEADDIM,
EVEN_M=EVEN_M,
EVEN_N=EVEN_N,
EVEN_HEADDIM=EVEN_HEADDIM,
BLOCK_M=BLOCK_M,
BLOCK_N=BLOCK_N,
)
else:
start_n = tl.program_id(0)
_bwd_kernel_one_col_block(
start_n,
Q,
K,
V,
Bias,
DO,
DQ,
DK,
DV,
LSE,
D,
softmax_scale,
stride_qm,
stride_kn,
stride_vn,
stride_bm,
stride_dom,
stride_dqm,
stride_dkn,
stride_dvn,
seqlen_q,
seqlen_k,
headdim,
ATOMIC_ADD=True,
BIAS_TYPE=BIAS_TYPE,
IS_CAUSAL=IS_CAUSAL,
BLOCK_HEADDIM=BLOCK_HEADDIM,
EVEN_M=EVEN_M,
EVEN_N=EVEN_N,
EVEN_HEADDIM=EVEN_HEADDIM,
BLOCK_M=BLOCK_M,
BLOCK_N=BLOCK_N,
)
def _flash_attn_backward(do, q, k, v, o, lse, dq, dk, dv, bias=None, causal=False, softmax_scale=None):
if do.stride(-1) != 1:
do = do.contiguous()
(batch, seqlen_q, nheads, d) = q.shape
(_, seqlen_k, _, _) = k.shape
assert d <= 128
seqlen_q_rounded = math.ceil(seqlen_q / 128) * 128
assert lse.shape == (batch, nheads, seqlen_q_rounded)
assert q.stride(-1) == k.stride(-1) == v.stride(-1) == o.stride(-1) == 1
assert dq.stride(-1) == dk.stride(-1) == dv.stride(-1) == 1
softmax_scale = softmax_scale or 1.0 / math.sqrt(d)
dq_accum = torch.empty_like(q, dtype=torch.float32)
delta = torch.empty_like(lse)
BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)
grid = lambda META: (triton.cdiv(seqlen_q, META["BLOCK_M"]), batch * nheads)
_bwd_preprocess_do_o_dot[grid](
o,
do,
delta,
o.stride(0),
o.stride(2),
o.stride(1),
do.stride(0),
do.stride(2),
do.stride(1),
nheads,
seqlen_q,
seqlen_q_rounded,
d,
BLOCK_M=128,
BLOCK_HEADDIM=BLOCK_HEADDIM,
)
has_bias = bias is not None
bias_type = "none"
if has_bias:
assert bias.dtype in [q.dtype, torch.float]
assert bias.is_cuda
assert bias.dim() == 4
assert bias.stride(-1) == 1
if bias.shape[2:] == (1, seqlen_k):
bias_type = "vector"
elif bias.shape[2:] == (seqlen_q, seqlen_k):
bias_type = "matrix"
else:
raise RuntimeError("Last 2 dimensions of bias must be (1, seqlen_k) or (seqlen_q, seqlen_k)")
bias = bias.expand(batch, nheads, seqlen_q, seqlen_k)
bias_strides = (bias.stride(0), bias.stride(1), bias.stride(2)) if has_bias else (0, 0, 0)
grid = lambda META: (
triton.cdiv(seqlen_k, META["BLOCK_N"]) if META["SEQUENCE_PARALLEL"] else 1,
batch * nheads,
)
_bwd_kernel[grid](
q,
k,
v,
bias,
do,
dq_accum,
dk,
dv,
lse,
delta,
softmax_scale,
q.stride(0),
q.stride(2),
q.stride(1),
k.stride(0),
k.stride(2),
k.stride(1),
v.stride(0),
v.stride(2),
v.stride(1),
*bias_strides,
do.stride(0),
do.stride(2),
do.stride(1),
dq_accum.stride(0),
dq_accum.stride(2),
dq_accum.stride(1),
dk.stride(0),
dk.stride(2),
dk.stride(1),
dv.stride(0),
dv.stride(2),
dv.stride(1),
nheads,
seqlen_q,
seqlen_k,
seqlen_q_rounded,
d,
seqlen_q // 32,
seqlen_k // 32,
bias_type,
causal,
BLOCK_HEADDIM
)
dq.copy_(dq_accum) | null |
7,788 | from contextlib import contextmanager
import torch
import torch.nn as nn
def init_on_device(device: torch.device, include_buffers: bool = False):
"""Device initialization context manager.
A context manager under which models are initialized with all parameters
on the specified device.
Args:
device (`torch.device`): Device to initialize all parameters on.
include_buffers (`bool`, *optional*, defaults to `False`): Whether or
not to also put all buffers on the meta device while initializing.
Example:
```python
import torch.nn as nn
with init_on_device(device=torch.device("cuda")):
tst = nn.Liner(100, 100) # on `cuda` device
```
"""
old_register_parameter = nn.Module.register_parameter
if include_buffers:
old_register_buffer = nn.Module.register_buffer
def register_empty_parameter(module, name, param):
old_register_parameter(module, name, param)
if param is not None:
param_cls = type(module._parameters[name])
kwargs = module._parameters[name].__dict__
module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
def register_empty_buffer(module, name, buffer):
old_register_buffer(module, name, buffer)
if buffer is not None:
module._buffers[name] = module._buffers[name].to(device)
if include_buffers:
tensor_constructors_to_patch = {torch_function_name: getattr(torch, torch_function_name) for torch_function_name in ["empty", "zeros", "ones", "full"]}
else:
tensor_constructors_to_patch = {}
def patch_tensor_constructor(fn):
def wrapper(*args, **kwargs):
kwargs["device"] = device
return fn(*args, **kwargs)
return wrapper
try:
nn.Module.register_parameter = register_empty_parameter
if include_buffers:
nn.Module.register_buffer = register_empty_buffer
for torch_function_name in tensor_constructors_to_patch.keys():
setattr(
torch,
torch_function_name,
patch_tensor_constructor(getattr(torch, torch_function_name)),
)
yield
finally:
nn.Module.register_parameter = old_register_parameter
if include_buffers:
nn.Module.register_buffer = old_register_buffer
for (
torch_function_name,
old_torch_function,
) in tensor_constructors_to_patch.items():
setattr(torch, torch_function_name, old_torch_function)
The provided code snippet includes necessary dependencies for implementing the `init_empty_weights` function. Write a Python function `def init_empty_weights(include_buffers: bool = False)` to solve the following problem:
Meta initialization context manager. A context manager under which models are initialized with all parameters on the meta device, therefore creating an empty model. Useful when just initializing the model would blow the available RAM. Args: include_buffers (`bool`, *optional*, defaults to `False`): Whether or not to also put all buffers on the meta device while initializing. Example: ```python import torch.nn as nn # Initialize a model with 100 billions parameters in no time and without using any RAM. with init_empty_weights(): tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) ``` <Tip warning={true}> Any model created under this context manager has no weights. As such you can't do something like `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`]. </Tip>
Here is the function:
def init_empty_weights(include_buffers: bool = False):
"""Meta initialization context manager.
A context manager under which models are initialized with all parameters
on the meta device, therefore creating an empty model. Useful when just
initializing the model would blow the available RAM.
Args:
include_buffers (`bool`, *optional*, defaults to `False`): Whether or
not to also put all buffers on the meta device while initializing.
Example:
```python
import torch.nn as nn
# Initialize a model with 100 billions parameters in no time and without using any RAM.
with init_empty_weights():
tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
```
<Tip warning={true}>
Any model created under this context manager has no weights. As such you can't do something like
`model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].
</Tip>
"""
with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f:
yield f | Meta initialization context manager. A context manager under which models are initialized with all parameters on the meta device, therefore creating an empty model. Useful when just initializing the model would blow the available RAM. Args: include_buffers (`bool`, *optional*, defaults to `False`): Whether or not to also put all buffers on the meta device while initializing. Example: ```python import torch.nn as nn # Initialize a model with 100 billions parameters in no time and without using any RAM. with init_empty_weights(): tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) ``` <Tip warning={true}> Any model created under this context manager has no weights. As such you can't do something like `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`]. </Tip> |
7,789 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
from .norm import NORM_CLASS_REGISTRY
def torch_default_param_init_fn_(module: nn.Module, verbose: int = 0, **kwargs):
del kwargs
if verbose > 1:
warnings.warn(f"Initializing network using module's reset_parameters attribute")
if hasattr(module, "reset_parameters"):
module.reset_parameters() | null |
7,790 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
from .norm import NORM_CLASS_REGISTRY
def _normal_param_init_fn_(
module: nn.Module,
std: float,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
del kwargs
init_fn_ = _normal_init_(std=std)
if verbose > 1:
warnings.warn(f"Using torch.nn.init.normal_ init fn mean=0.0, std={std}")
generic_param_init_fn_(
module=module,
init_fn_=init_fn_,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
)
def baseline_param_init_fn_(
module: nn.Module,
init_std: float,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
del kwargs
if init_std is None:
raise ValueError("You must set model.init_config['init_std'] to a float value to use the default initialization scheme.")
_normal_param_init_fn_(
module=module,
std=init_std,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
) | null |
7,791 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
from .norm import NORM_CLASS_REGISTRY
def small_param_init_fn_(
module: nn.Module,
n_layers: int,
d_model: int,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
del kwargs
std = math.sqrt(2 / (5 * d_model))
_normal_param_init_fn_(
module=module,
std=std,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
)
The provided code snippet includes necessary dependencies for implementing the `neox_param_init_fn_` function. Write a Python function `def neox_param_init_fn_( module: nn.Module, n_layers: int, d_model: int, emb_init_std: Optional[float] = None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None, verbose: int = 0, **kwargs, )` to solve the following problem:
From section 2.3.1 of GPT-NeoX-20B: An Open-Source AutoregressiveLanguage Model — Black et. al. (2022) see https://github.com/EleutherAI/gpt-neox/blob/9610391ab319403cef079b438edd016a2443af54/megatron/model/init_functions.py#L151 and https://github.com/EleutherAI/gpt-neox/blob/main/megatron/model/transformer.py
Here is the function:
def neox_param_init_fn_(
module: nn.Module,
n_layers: int,
d_model: int,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
"""From section 2.3.1 of GPT-NeoX-20B:
An Open-Source AutoregressiveLanguage Model — Black et. al. (2022)
see https://github.com/EleutherAI/gpt-neox/blob/9610391ab319403cef079b438edd016a2443af54/megatron/model/init_functions.py#L151
and https://github.com/EleutherAI/gpt-neox/blob/main/megatron/model/transformer.py
"""
del kwargs
residual_div = n_layers / math.sqrt(10)
if verbose > 1:
warnings.warn(f"setting init_div_is_residual to {residual_div}")
small_param_init_fn_(
module=module,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=residual_div,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
) | From section 2.3.1 of GPT-NeoX-20B: An Open-Source AutoregressiveLanguage Model — Black et. al. (2022) see https://github.com/EleutherAI/gpt-neox/blob/9610391ab319403cef079b438edd016a2443af54/megatron/model/init_functions.py#L151 and https://github.com/EleutherAI/gpt-neox/blob/main/megatron/model/transformer.py |
7,792 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
from .norm import NORM_CLASS_REGISTRY
def generic_param_init_fn_(
module: nn.Module,
init_fn_,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
del kwargs
if verbose > 1:
warnings.warn(f"If model has bias parameters they are initialized to 0.")
init_div_is_residual = init_div_is_residual
if init_div_is_residual is False:
div_is_residual = 1.0
elif init_div_is_residual is True:
div_is_residual = math.sqrt(2 * n_layers)
elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):
div_is_residual = init_div_is_residual
elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():
div_is_residual = float(init_div_is_residual)
else:
div_is_residual = 1.0
raise ValueError(f"Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}")
if init_div_is_residual is not False:
if verbose > 1:
warnings.warn(f"Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. " + f"Set `init_div_is_residual: false` in init config to disable this.")
if isinstance(module, nn.Linear):
if hasattr(module, "_fused"):
fused_init_helper_(module, init_fn_)
else:
init_fn_(module.weight)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
if init_div_is_residual is not False and getattr(module, "_is_residual", False):
with torch.no_grad():
module.weight.div_(div_is_residual)
elif isinstance(module, nn.Embedding):
if emb_init_std is not None:
std = emb_init_std
if std == 0:
warnings.warn(f"Embedding layer initialized to 0.")
emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)
if verbose > 1:
warnings.warn(f"Embedding layer initialized using normal distribution with mean=0 and std={std!r}.")
elif emb_init_uniform_lim is not None:
lim = emb_init_uniform_lim
if isinstance(lim, Sequence):
if len(lim) > 2:
raise ValueError(f"Uniform init requires a min and a max limit. User input: {lim}.")
if lim[0] == lim[1]:
warnings.warn(f"Embedding layer initialized to {lim[0]}.")
else:
if lim == 0:
warnings.warn(f"Embedding layer initialized to 0.")
lim = [-lim, lim]
(a, b) = lim
emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)
if verbose > 1:
warnings.warn(f"Embedding layer initialized using uniform distribution in range {lim}.")
else:
emb_init_fn_ = init_fn_
emb_init_fn_(module.weight)
elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):
if verbose > 1:
warnings.warn(f"Norm weights are set to 1. If norm layer has a bias it is initialized to 0.")
if hasattr(module, "weight") and module.weight is not None:
torch.nn.init.ones_(module.weight)
if hasattr(module, "bias") and module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.MultiheadAttention):
if module._qkv_same_embed_dim:
assert module.in_proj_weight is not None
assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)
assert d_model is not None
_d = d_model
splits = (0, _d, 2 * _d, 3 * _d)
for s, e in zip(splits[:-1], splits[1:]):
init_fn_(module.in_proj_weight[s:e])
else:
assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)
assert module.in_proj_weight is None
init_fn_(module.q_proj_weight)
init_fn_(module.k_proj_weight)
init_fn_(module.v_proj_weight)
if module.in_proj_bias is not None:
torch.nn.init.zeros_(module.in_proj_bias)
if module.bias_k is not None:
torch.nn.init.zeros_(module.bias_k)
if module.bias_v is not None:
torch.nn.init.zeros_(module.bias_v)
init_fn_(module.out_proj.weight)
if init_div_is_residual is not False and getattr(module.out_proj, "_is_residual", False):
with torch.no_grad():
module.out_proj.weight.div_(div_is_residual)
if module.out_proj.bias is not None:
torch.nn.init.zeros_(module.out_proj.bias)
else:
for _ in module.parameters(recurse=False):
raise NotImplementedError(f"{module.__class__.__name__} parameters are not initialized by param_init_fn.")
def kaiming_uniform_param_init_fn_(
module: nn.Module,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
init_gain: float = 0,
fan_mode: str = "fan_in",
init_nonlinearity: str = "leaky_relu",
verbose: int = 0,
**kwargs,
):
del kwargs
if verbose > 1:
warnings.warn(f"Using nn.init.kaiming_uniform_ init fn with parameters: " + f"a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}")
kaiming_uniform_ = partial(
nn.init.kaiming_uniform_,
a=init_gain,
mode=fan_mode,
nonlinearity=init_nonlinearity,
)
generic_param_init_fn_(
module=module,
init_fn_=kaiming_uniform_,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
) | null |
7,793 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
from .norm import NORM_CLASS_REGISTRY
def generic_param_init_fn_(
module: nn.Module,
init_fn_,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
del kwargs
if verbose > 1:
warnings.warn(f"If model has bias parameters they are initialized to 0.")
init_div_is_residual = init_div_is_residual
if init_div_is_residual is False:
div_is_residual = 1.0
elif init_div_is_residual is True:
div_is_residual = math.sqrt(2 * n_layers)
elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):
div_is_residual = init_div_is_residual
elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():
div_is_residual = float(init_div_is_residual)
else:
div_is_residual = 1.0
raise ValueError(f"Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}")
if init_div_is_residual is not False:
if verbose > 1:
warnings.warn(f"Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. " + f"Set `init_div_is_residual: false` in init config to disable this.")
if isinstance(module, nn.Linear):
if hasattr(module, "_fused"):
fused_init_helper_(module, init_fn_)
else:
init_fn_(module.weight)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
if init_div_is_residual is not False and getattr(module, "_is_residual", False):
with torch.no_grad():
module.weight.div_(div_is_residual)
elif isinstance(module, nn.Embedding):
if emb_init_std is not None:
std = emb_init_std
if std == 0:
warnings.warn(f"Embedding layer initialized to 0.")
emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)
if verbose > 1:
warnings.warn(f"Embedding layer initialized using normal distribution with mean=0 and std={std!r}.")
elif emb_init_uniform_lim is not None:
lim = emb_init_uniform_lim
if isinstance(lim, Sequence):
if len(lim) > 2:
raise ValueError(f"Uniform init requires a min and a max limit. User input: {lim}.")
if lim[0] == lim[1]:
warnings.warn(f"Embedding layer initialized to {lim[0]}.")
else:
if lim == 0:
warnings.warn(f"Embedding layer initialized to 0.")
lim = [-lim, lim]
(a, b) = lim
emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)
if verbose > 1:
warnings.warn(f"Embedding layer initialized using uniform distribution in range {lim}.")
else:
emb_init_fn_ = init_fn_
emb_init_fn_(module.weight)
elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):
if verbose > 1:
warnings.warn(f"Norm weights are set to 1. If norm layer has a bias it is initialized to 0.")
if hasattr(module, "weight") and module.weight is not None:
torch.nn.init.ones_(module.weight)
if hasattr(module, "bias") and module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.MultiheadAttention):
if module._qkv_same_embed_dim:
assert module.in_proj_weight is not None
assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)
assert d_model is not None
_d = d_model
splits = (0, _d, 2 * _d, 3 * _d)
for s, e in zip(splits[:-1], splits[1:]):
init_fn_(module.in_proj_weight[s:e])
else:
assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)
assert module.in_proj_weight is None
init_fn_(module.q_proj_weight)
init_fn_(module.k_proj_weight)
init_fn_(module.v_proj_weight)
if module.in_proj_bias is not None:
torch.nn.init.zeros_(module.in_proj_bias)
if module.bias_k is not None:
torch.nn.init.zeros_(module.bias_k)
if module.bias_v is not None:
torch.nn.init.zeros_(module.bias_v)
init_fn_(module.out_proj.weight)
if init_div_is_residual is not False and getattr(module.out_proj, "_is_residual", False):
with torch.no_grad():
module.out_proj.weight.div_(div_is_residual)
if module.out_proj.bias is not None:
torch.nn.init.zeros_(module.out_proj.bias)
else:
for _ in module.parameters(recurse=False):
raise NotImplementedError(f"{module.__class__.__name__} parameters are not initialized by param_init_fn.")
def kaiming_normal_param_init_fn_(
module: nn.Module,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
init_gain: float = 0,
fan_mode: str = "fan_in",
init_nonlinearity: str = "leaky_relu",
verbose: int = 0,
**kwargs,
):
del kwargs
if verbose > 1:
warnings.warn(f"Using nn.init.kaiming_normal_ init fn with parameters: " + f"a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}")
kaiming_normal_ = partial(
torch.nn.init.kaiming_normal_,
a=init_gain,
mode=fan_mode,
nonlinearity=init_nonlinearity,
)
generic_param_init_fn_(
module=module,
init_fn_=kaiming_normal_,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
) | null |
7,794 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
from .norm import NORM_CLASS_REGISTRY
def generic_param_init_fn_(
module: nn.Module,
init_fn_,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
del kwargs
if verbose > 1:
warnings.warn(f"If model has bias parameters they are initialized to 0.")
init_div_is_residual = init_div_is_residual
if init_div_is_residual is False:
div_is_residual = 1.0
elif init_div_is_residual is True:
div_is_residual = math.sqrt(2 * n_layers)
elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):
div_is_residual = init_div_is_residual
elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():
div_is_residual = float(init_div_is_residual)
else:
div_is_residual = 1.0
raise ValueError(f"Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}")
if init_div_is_residual is not False:
if verbose > 1:
warnings.warn(f"Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. " + f"Set `init_div_is_residual: false` in init config to disable this.")
if isinstance(module, nn.Linear):
if hasattr(module, "_fused"):
fused_init_helper_(module, init_fn_)
else:
init_fn_(module.weight)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
if init_div_is_residual is not False and getattr(module, "_is_residual", False):
with torch.no_grad():
module.weight.div_(div_is_residual)
elif isinstance(module, nn.Embedding):
if emb_init_std is not None:
std = emb_init_std
if std == 0:
warnings.warn(f"Embedding layer initialized to 0.")
emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)
if verbose > 1:
warnings.warn(f"Embedding layer initialized using normal distribution with mean=0 and std={std!r}.")
elif emb_init_uniform_lim is not None:
lim = emb_init_uniform_lim
if isinstance(lim, Sequence):
if len(lim) > 2:
raise ValueError(f"Uniform init requires a min and a max limit. User input: {lim}.")
if lim[0] == lim[1]:
warnings.warn(f"Embedding layer initialized to {lim[0]}.")
else:
if lim == 0:
warnings.warn(f"Embedding layer initialized to 0.")
lim = [-lim, lim]
(a, b) = lim
emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)
if verbose > 1:
warnings.warn(f"Embedding layer initialized using uniform distribution in range {lim}.")
else:
emb_init_fn_ = init_fn_
emb_init_fn_(module.weight)
elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):
if verbose > 1:
warnings.warn(f"Norm weights are set to 1. If norm layer has a bias it is initialized to 0.")
if hasattr(module, "weight") and module.weight is not None:
torch.nn.init.ones_(module.weight)
if hasattr(module, "bias") and module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.MultiheadAttention):
if module._qkv_same_embed_dim:
assert module.in_proj_weight is not None
assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)
assert d_model is not None
_d = d_model
splits = (0, _d, 2 * _d, 3 * _d)
for s, e in zip(splits[:-1], splits[1:]):
init_fn_(module.in_proj_weight[s:e])
else:
assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)
assert module.in_proj_weight is None
init_fn_(module.q_proj_weight)
init_fn_(module.k_proj_weight)
init_fn_(module.v_proj_weight)
if module.in_proj_bias is not None:
torch.nn.init.zeros_(module.in_proj_bias)
if module.bias_k is not None:
torch.nn.init.zeros_(module.bias_k)
if module.bias_v is not None:
torch.nn.init.zeros_(module.bias_v)
init_fn_(module.out_proj.weight)
if init_div_is_residual is not False and getattr(module.out_proj, "_is_residual", False):
with torch.no_grad():
module.out_proj.weight.div_(div_is_residual)
if module.out_proj.bias is not None:
torch.nn.init.zeros_(module.out_proj.bias)
else:
for _ in module.parameters(recurse=False):
raise NotImplementedError(f"{module.__class__.__name__} parameters are not initialized by param_init_fn.")
def xavier_uniform_param_init_fn_(
module: nn.Module,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
init_gain: float = 0,
verbose: int = 0,
**kwargs,
):
del kwargs
xavier_uniform_ = partial(torch.nn.init.xavier_uniform_, gain=init_gain)
if verbose > 1:
warnings.warn(f"Using torch.nn.init.xavier_uniform_ init fn with parameters: " + f"gain={init_gain}")
generic_param_init_fn_(
module=module,
init_fn_=xavier_uniform_,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
) | null |
7,795 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
from .norm import NORM_CLASS_REGISTRY
def generic_param_init_fn_(
module: nn.Module,
init_fn_,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
del kwargs
if verbose > 1:
warnings.warn(f"If model has bias parameters they are initialized to 0.")
init_div_is_residual = init_div_is_residual
if init_div_is_residual is False:
div_is_residual = 1.0
elif init_div_is_residual is True:
div_is_residual = math.sqrt(2 * n_layers)
elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):
div_is_residual = init_div_is_residual
elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():
div_is_residual = float(init_div_is_residual)
else:
div_is_residual = 1.0
raise ValueError(f"Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}")
if init_div_is_residual is not False:
if verbose > 1:
warnings.warn(f"Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. " + f"Set `init_div_is_residual: false` in init config to disable this.")
if isinstance(module, nn.Linear):
if hasattr(module, "_fused"):
fused_init_helper_(module, init_fn_)
else:
init_fn_(module.weight)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
if init_div_is_residual is not False and getattr(module, "_is_residual", False):
with torch.no_grad():
module.weight.div_(div_is_residual)
elif isinstance(module, nn.Embedding):
if emb_init_std is not None:
std = emb_init_std
if std == 0:
warnings.warn(f"Embedding layer initialized to 0.")
emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)
if verbose > 1:
warnings.warn(f"Embedding layer initialized using normal distribution with mean=0 and std={std!r}.")
elif emb_init_uniform_lim is not None:
lim = emb_init_uniform_lim
if isinstance(lim, Sequence):
if len(lim) > 2:
raise ValueError(f"Uniform init requires a min and a max limit. User input: {lim}.")
if lim[0] == lim[1]:
warnings.warn(f"Embedding layer initialized to {lim[0]}.")
else:
if lim == 0:
warnings.warn(f"Embedding layer initialized to 0.")
lim = [-lim, lim]
(a, b) = lim
emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)
if verbose > 1:
warnings.warn(f"Embedding layer initialized using uniform distribution in range {lim}.")
else:
emb_init_fn_ = init_fn_
emb_init_fn_(module.weight)
elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))):
if verbose > 1:
warnings.warn(f"Norm weights are set to 1. If norm layer has a bias it is initialized to 0.")
if hasattr(module, "weight") and module.weight is not None:
torch.nn.init.ones_(module.weight)
if hasattr(module, "bias") and module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.MultiheadAttention):
if module._qkv_same_embed_dim:
assert module.in_proj_weight is not None
assert module.q_proj_weight is None and module.k_proj_weight is None and (module.v_proj_weight is None)
assert d_model is not None
_d = d_model
splits = (0, _d, 2 * _d, 3 * _d)
for s, e in zip(splits[:-1], splits[1:]):
init_fn_(module.in_proj_weight[s:e])
else:
assert module.q_proj_weight is not None and module.k_proj_weight is not None and (module.v_proj_weight is not None)
assert module.in_proj_weight is None
init_fn_(module.q_proj_weight)
init_fn_(module.k_proj_weight)
init_fn_(module.v_proj_weight)
if module.in_proj_bias is not None:
torch.nn.init.zeros_(module.in_proj_bias)
if module.bias_k is not None:
torch.nn.init.zeros_(module.bias_k)
if module.bias_v is not None:
torch.nn.init.zeros_(module.bias_v)
init_fn_(module.out_proj.weight)
if init_div_is_residual is not False and getattr(module.out_proj, "_is_residual", False):
with torch.no_grad():
module.out_proj.weight.div_(div_is_residual)
if module.out_proj.bias is not None:
torch.nn.init.zeros_(module.out_proj.bias)
else:
for _ in module.parameters(recurse=False):
raise NotImplementedError(f"{module.__class__.__name__} parameters are not initialized by param_init_fn.")
def xavier_normal_param_init_fn_(
module: nn.Module,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
init_gain: float = 0,
verbose: int = 0,
**kwargs,
):
xavier_normal_ = partial(torch.nn.init.xavier_normal_, gain=init_gain)
if verbose > 1:
warnings.warn(f"Using torch.nn.init.xavier_normal_ init fn with parameters: " + f"gain={init_gain}")
generic_param_init_fn_(
module=module,
init_fn_=xavier_normal_,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
) | null |
7,796 | import math
import warnings
from types import MethodType
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from transformers.models.bloom.modeling_bloom import (
BaseModelOutputWithPastAndCrossAttentions,
BloomForCausalLM,
BloomModel,
CausalLMOutputWithCrossAttentions,
CrossEntropyLoss,
)
from transformers.models.bloom.modeling_bloom import _expand_mask as _expand_mask_bloom
from transformers.models.bloom.modeling_bloom import (
_make_causal_mask as _make_causal_mask_bloom,
)
from transformers.models.bloom.modeling_bloom import logging
from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel
from transformers.models.gpt_neo.modeling_gpt_neo import GPTNeoForCausalLM
from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXForCausalLM
from transformers.models.gptj.modeling_gptj import GPTJForCausalLM
from transformers.models.opt.modeling_opt import OPTForCausalLM
from transformers.models.opt.modeling_opt import _expand_mask as _expand_mask_opt
from transformers.models.opt.modeling_opt import (
_make_causal_mask as _make_causal_mask_opt,
)
_SUPPORTED_GPT_MODELS = (
GPT2LMHeadModel,
GPTJForCausalLM,
GPTNeoForCausalLM,
GPTNeoXForCausalLM,
)
def _convert_gpt_causal_lm_to_prefix_lm(model: CAUSAL_GPT_TYPES) -> CAUSAL_GPT_TYPES:
"""Converts a GPT-style Causal LM to a Prefix LM.
Supported HuggingFace model classes:
- `GPT2LMHeadModel`
- `GPTNeoForCausalLM`
- `GPTNeoXForCausalLM`
- `GPTJForCausalLM`
See `convert_hf_causal_lm_to_prefix_lm` for more details.
"""
if hasattr(model, "_prefix_lm_converted"):
return model
assert isinstance(model, _SUPPORTED_GPT_MODELS)
assert model.config.add_cross_attention == False, "Only supports GPT-style decoder-only models"
def _get_attn_modules(model: CAUSAL_GPT_TYPES) -> List[torch.nn.Module]:
"""Helper that gets a list of the model's attention modules.
Each module has a `bias` buffer used for causal masking. The Prefix LM
conversion adds logic to dynamically manipulate these biases to support
Prefix LM attention masking.
"""
attn_modules = []
if isinstance(model, GPTNeoXForCausalLM):
blocks = model.gpt_neox.layers
else:
blocks = model.transformer.h
for block in blocks:
if isinstance(model, GPTNeoForCausalLM):
if block.attn.attention_type != "global":
continue
attn_module = block.attn.attention
elif isinstance(model, GPTNeoXForCausalLM):
attn_module = block.attention
else:
attn_module = block.attn
attn_modules.append(attn_module)
return attn_modules
setattr(model, "_original_forward", getattr(model, "forward"))
setattr(model, "_original_generate", getattr(model, "generate"))
def forward(
self: CAUSAL_GPT_TYPES,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
attention_mask: Optional[torch.FloatTensor] = None,
bidirectional_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
"""Wraps original forward to enable PrefixLM attention."""
def call_og_forward():
if isinstance(self, GPTNeoXForCausalLM):
return self._original_forward(
input_ids=input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
else:
return self._original_forward(
input_ids=input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if bidirectional_mask is None:
return call_og_forward()
assert isinstance(bidirectional_mask, torch.Tensor)
attn_modules = _get_attn_modules(model)
(b, s) = bidirectional_mask.shape
max_length = attn_modules[0].bias.shape[-1]
if s > max_length:
raise ValueError(f"bidirectional_mask sequence length (={s}) exceeds the " + f"max length allowed by the model ({max_length}).")
assert s <= max_length
if s < max_length:
pad = torch.zeros(
(int(b), int(max_length - s)),
dtype=bidirectional_mask.dtype,
device=bidirectional_mask.device,
)
bidirectional_mask = torch.cat([bidirectional_mask, pad], dim=1)
bidirectional = bidirectional_mask.unsqueeze(1).unsqueeze(1)
for attn_module in attn_modules:
attn_module.bias.data = torch.logical_or(attn_module.bias.data, bidirectional)
output = call_og_forward()
for attn_module in attn_modules:
attn_module.bias.data = torch.tril(attn_module.bias.data[0, 0])[None, None]
return output
def generate(self: CAUSAL_GPT_TYPES, *args: tuple, **kwargs: Dict[str, Any]):
"""Wraps original generate to enable PrefixLM attention."""
attn_modules = _get_attn_modules(model)
for attn_module in attn_modules:
attn_module.bias.data[:] = 1
output = self._original_generate(*args, **kwargs)
for attn_module in attn_modules:
attn_module.bias.data = torch.tril(attn_module.bias.data[0, 0])[None, None]
return output
setattr(model, "forward", MethodType(forward, model))
setattr(model, "generate", MethodType(generate, model))
setattr(model, "_prefix_lm_converted", True)
return model
def _convert_bloom_causal_lm_to_prefix_lm(model: BloomForCausalLM) -> BloomForCausalLM:
"""Converts a BLOOM Causal LM to a Prefix LM.
Supported HuggingFace model classes:
- `BloomForCausalLM`
See `convert_hf_causal_lm_to_prefix_lm` for more details.
"""
if hasattr(model, "_prefix_lm_converted"):
return model
assert isinstance(model, BloomForCausalLM)
assert model.config.add_cross_attention == False, "Only supports BLOOM decoder-only models"
def _prepare_attn_mask(
self: BloomModel,
attention_mask: torch.Tensor,
bidirectional_mask: Optional[torch.Tensor],
input_shape: Tuple[int, int],
past_key_values_length: int,
) -> torch.BoolTensor:
combined_attention_mask = None
device = attention_mask.device
(_, src_length) = input_shape
if src_length > 1:
combined_attention_mask = _make_causal_mask_bloom(
input_shape,
device=device,
past_key_values_length=past_key_values_length,
)
if bidirectional_mask is not None:
assert attention_mask.shape == bidirectional_mask.shape
expanded_bidirectional_mask = _expand_mask_bloom(bidirectional_mask, tgt_length=src_length)
combined_attention_mask = torch.logical_and(combined_attention_mask, expanded_bidirectional_mask)
expanded_attn_mask = _expand_mask_bloom(attention_mask, tgt_length=src_length)
combined_attention_mask = expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask | combined_attention_mask
return combined_attention_mask
def _build_alibi_tensor(
self: BloomModel,
batch_size: int,
query_length: int,
key_length: int,
dtype: torch.dtype,
device: torch.device,
) -> torch.Tensor:
num_heads = self.config.n_head
closest_power_of_2 = 2 ** math.floor(math.log2(num_heads))
base = torch.tensor(
2 ** (-(2 ** (-(math.log2(closest_power_of_2) - 3)))),
device=device,
dtype=torch.float32,
)
powers = torch.arange(1, 1 + closest_power_of_2, device=device, dtype=torch.int32)
slopes = torch.pow(base, powers)
if closest_power_of_2 != num_heads:
extra_base = torch.tensor(
2 ** (-(2 ** (-(math.log2(2 * closest_power_of_2) - 3)))),
device=device,
dtype=torch.float32,
)
num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2)
extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=device, dtype=torch.int32)
slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0)
qa = torch.arange(query_length, device=device, dtype=torch.int32).view(-1, 1)
ka = torch.arange(key_length, device=device, dtype=torch.int32).view(1, -1)
diffs = qa - ka + key_length - query_length
diffs = -diffs.abs()
alibi = slopes.view(1, num_heads, 1, 1) * diffs.view(1, 1, query_length, key_length)
alibi = alibi.expand(batch_size, -1, -1, -1).reshape(-1, query_length, key_length)
return alibi.to(dtype)
KeyValueT = Tuple[torch.Tensor, torch.Tensor]
def forward(
self: BloomModel,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[KeyValueT, ...]] = None,
attention_mask: Optional[torch.Tensor] = None,
bidirectional_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**deprecated_arguments,
) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:
if deprecated_arguments.pop("position_ids", False) is not False:
warnings.warn(
"`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. " + "You can safely ignore passing `position_ids`.",
FutureWarning,
)
if len(deprecated_arguments) > 0:
raise ValueError(f"Got unexpected arguments: {deprecated_arguments}")
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
(batch_size, seq_length) = input_ids.shape
elif inputs_embeds is not None:
(batch_size, seq_length, _) = inputs_embeds.shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if past_key_values is None:
past_key_values = tuple([None] * len(self.h))
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
hidden_states = self.word_embeddings_layernorm(inputs_embeds)
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
seq_length_with_past = seq_length
past_key_values_length = 0
if past_key_values[0] is not None:
tmp = past_key_values[0][0]
past_key_values_length = tmp.shape[2]
seq_length_with_past = seq_length_with_past + past_key_values_length
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device)
else:
attention_mask = attention_mask.to(hidden_states.device)
alibi = self._build_alibi_tensor(
batch_size=batch_size,
query_length=seq_length,
key_length=seq_length_with_past,
dtype=hidden_states.dtype,
device=hidden_states.device,
)
causal_mask = self._prepare_attn_mask(
attention_mask,
bidirectional_mask,
input_shape=(batch_size, seq_length),
past_key_values_length=past_key_values_length,
)
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
if output_hidden_states:
hst = (hidden_states,)
all_hidden_states = all_hidden_states + hst
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...")
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(
*inputs,
use_cache=use_cache,
output_attentions=output_attentions,
)
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
alibi,
causal_mask,
head_mask[i],
)
else:
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=causal_mask,
head_mask=head_mask[i],
use_cache=use_cache,
output_attentions=output_attentions,
alibi=alibi,
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
oa = (outputs[2 if use_cache else 1],)
all_self_attentions = all_self_attentions + oa
hidden_states = self.ln_f(hidden_states)
if output_hidden_states:
hst = (hidden_states,)
all_hidden_states = all_hidden_states + hst
if not return_dict:
return tuple(
(
v
for v in [
hidden_states,
presents,
all_hidden_states,
all_self_attentions,
]
if v is not None
)
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
setattr(
model.transformer,
"_prepare_attn_mask",
MethodType(_prepare_attn_mask, model.transformer),
)
setattr(
model.transformer,
"_build_alibi_tensor",
MethodType(_build_alibi_tensor, model.transformer),
)
setattr(model.transformer, "forward", MethodType(forward, model.transformer))
KeyValueT = Tuple[torch.Tensor, torch.Tensor]
def forward(
self: BloomForCausalLM,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[KeyValueT, ...]] = None,
attention_mask: Optional[torch.Tensor] = None,
bidirectional_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**deprecated_arguments,
) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
"""Replacement forward method for BloomCausalLM."""
if deprecated_arguments.pop("position_ids", False) is not False:
warnings.warn(
"`position_ids` have no functionality in BLOOM and will be removed " + "in v5.0.0. You can safely ignore passing `position_ids`.",
FutureWarning,
)
if len(deprecated_arguments) > 0:
raise ValueError(f"Got unexpected arguments: {deprecated_arguments}")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
bidirectional_mask=bidirectional_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
(batch_size, seq_length, vocab_size) = shift_logits.shape
loss_fct = CrossEntropyLoss()
loss = loss_fct(
shift_logits.view(batch_size * seq_length, vocab_size),
shift_labels.view(batch_size * seq_length),
)
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
def prepare_inputs_for_generation(
self: BloomForCausalLM,
input_ids: torch.LongTensor,
past: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
**kwargs,
) -> dict:
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
bidirectional_mask = None
if past[0][0].shape[0] == input_ids.shape[0]:
past = self._convert_to_bloom_cache(past)
else:
bidirectional_mask = torch.ones_like(input_ids)
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": True,
"attention_mask": attention_mask,
"bidirectional_mask": bidirectional_mask,
}
setattr(model, "forward", MethodType(forward, model))
setattr(
model,
"prepare_inputs_for_generation",
MethodType(prepare_inputs_for_generation, model),
)
setattr(model, "_prefix_lm_converted", True)
return model
def _convert_opt_causal_lm_to_prefix_lm(model: OPTForCausalLM) -> OPTForCausalLM:
"""Converts an OPT Causal LM to a Prefix LM.
Supported HuggingFace model classes:
- `OPTForCausalLM`
See `convert_hf_causal_lm_to_prefix_lm` for more details.
"""
if hasattr(model, "_prefix_lm_converted"):
return model
assert isinstance(model, OPTForCausalLM)
assert model.config.add_cross_attention == False, "Only supports OPT decoder-only models"
setattr(model, "_original_forward", getattr(model, "forward"))
setattr(model, "_original_generate", getattr(model, "generate"))
model.model.decoder.bidirectional_mask = None
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
combined_attention_mask = None
if input_shape[-1] > 1:
if self.bidirectional_mask == "g":
(bsz, src_length) = input_shape
combined_attention_mask = torch.zeros(
(bsz, 1, src_length, src_length + past_key_values_length),
dtype=inputs_embeds.dtype,
device=inputs_embeds.device,
)
else:
combined_attention_mask = _make_causal_mask_opt(
input_shape,
inputs_embeds.dtype,
past_key_values_length=past_key_values_length,
).to(inputs_embeds.device)
if self.bidirectional_mask is not None:
assert attention_mask.shape == self.bidirectional_mask.shape
expanded_bidirectional_mask = _expand_mask_opt(
self.bidirectional_mask,
inputs_embeds.dtype,
tgt_len=input_shape[-1],
).to(inputs_embeds.device)
combined_attention_mask = torch.maximum(expanded_bidirectional_mask, combined_attention_mask)
if attention_mask is not None:
expanded_attn_mask = _expand_mask_opt(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(inputs_embeds.device)
combined_attention_mask = expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
return combined_attention_mask
setattr(
model.model.decoder,
"_prepare_decoder_attention_mask",
MethodType(_prepare_decoder_attention_mask, model.model.decoder),
)
def forward(
self: OPTForCausalLM,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
bidirectional_mask: Optional[torch.ByteTensor] = None,
head_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[List[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
def call_og_forward():
return self._original_forward(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
labels=labels,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if bidirectional_mask is None:
return call_og_forward()
self.model.decoder.bidirectional_mask = bidirectional_mask
try:
outputs = call_og_forward()
except:
self.model.decoder.bidirectional_mask = None
raise
self.model.decoder.bidirectional_mask = None
return outputs
def generate(self: OPTForCausalLM, *args: tuple, **kwargs: Dict[str, Any]):
"""Wraps original generate to enable PrefixLM-style attention."""
self.model.decoder.bidirectional_mask = "g"
try:
output = self._original_generate(*args, **kwargs)
except:
self.model.decoder.bidirectional_mask = None
raise
self.model.decoder.bidirectional_mask = None
return output
setattr(model, "forward", MethodType(forward, model))
setattr(model, "generate", MethodType(generate, model))
setattr(model, "_prefix_lm_converted", True)
return model
_SUPPORTED_HF_MODELS = _SUPPORTED_GPT_MODELS + (BloomForCausalLM, OPTForCausalLM)
CAUSAL_LM_TYPES = Union[
GPT2LMHeadModel,
GPTJForCausalLM,
GPTNeoForCausalLM,
GPTNeoXForCausalLM,
BloomForCausalLM,
OPTForCausalLM,
]
The provided code snippet includes necessary dependencies for implementing the `convert_hf_causal_lm_to_prefix_lm` function. Write a Python function `def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES` to solve the following problem:
Converts a HuggingFace Causal LM to a Prefix LM. Supported HuggingFace model classes: - `GPT2LMHeadModel` - `GPTNeoForCausalLM` - `GPTNeoXForCausalLM` - `GPTJForCausalLM` - `BloomForCausalLM` - `OPTForCausalLM` Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the `generate` method and/or select underlying methods depending on the model class. These changes preserve the model API, but add a new input to `forward`: "bidirectional_mask". Notes on training: To actually train the converted model as a Prefix LM, training batches will need to indicate the prefix/target structure by including `bidirectional_mask` as part of the batch inputs. **This is not a standard input and requires custom layers either within or after your dataloader.** In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels` such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`. That is, the prefix portion of the sequence should not generate any loss. Loss should only be generated by the target portion of the sequence. Notes on `GPTNeoForCausalLM`: To simplify the implementation, "global" and "local" attention layers are handled differently. For "global" layers, we handle conversion as described above. For "local" layers, which use a causal attention mask within a restricted local window, we do not alter the masking. Notes on `forward` method conversion: After conversion, the `forward` method will handle a new input, `bidirectional_mask`, which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions belonging to the prefix (prefix tokens can attend to one another bidirectionally), and 0 indicates token positions belonging to the target. The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset the causal masks before returning the result. Notes on `generate` method conversion: After conversion, the `generate` method will have the same signature but will internally convert all causal masks to be purely bidirectional, call the original `generate` method, and (where appropriate) reset the causal masks before returning the result. This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token "prompt" passed to `generate` (which is treated as the prefix) and then sequentially generates each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and previously-generated tokens (also as expected in a Prefix LM). To preserve the API, the original methods are renamed to `_original_forward` and `_original_generate`, and replaced with new `forward` and `generate` methods that wrap them, respectively. Although implementation details vary by model class.
Here is the function:
def convert_hf_causal_lm_to_prefix_lm(model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES:
"""Converts a HuggingFace Causal LM to a Prefix LM.
Supported HuggingFace model classes:
- `GPT2LMHeadModel`
- `GPTNeoForCausalLM`
- `GPTNeoXForCausalLM`
- `GPTJForCausalLM`
- `BloomForCausalLM`
- `OPTForCausalLM`
Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the
`generate` method and/or select underlying methods depending on the model class.
These changes preserve the model API, but add a new input to `forward`: "bidirectional_mask".
Notes on training:
To actually train the converted model as a Prefix LM, training batches will need to indicate
the prefix/target structure by including `bidirectional_mask` as part of the batch inputs.
**This is not a standard input and requires custom layers either within or after your dataloader.**
In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels`
such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`.
That is, the prefix portion of the sequence should not generate any loss. Loss should only be
generated by the target portion of the sequence.
Notes on `GPTNeoForCausalLM`:
To simplify the implementation, "global" and "local" attention layers are handled differently.
For "global" layers, we handle conversion as described above. For "local" layers, which use a
causal attention mask within a restricted local window, we do not alter the masking.
Notes on `forward` method conversion:
After conversion, the `forward` method will handle a new input, `bidirectional_mask`,
which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions
belonging to the prefix (prefix tokens can attend to one another bidirectionally), and
0 indicates token positions belonging to the target.
The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing
causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset
the causal masks before returning the result.
Notes on `generate` method conversion:
After conversion, the `generate` method will have the same signature but will internally
convert all causal masks to be purely bidirectional, call the original `generate` method, and
(where appropriate) reset the causal masks before returning the result.
This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token
"prompt" passed to `generate` (which is treated as the prefix) and then sequentially generates
each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one
another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and
previously-generated tokens (also as expected in a Prefix LM).
To preserve the API, the original methods are renamed to `_original_forward` and
`_original_generate`, and replaced with new `forward` and `generate` methods that wrap
them, respectively. Although implementation details vary by model class.
"""
if isinstance(model, _SUPPORTED_GPT_MODELS):
return _convert_gpt_causal_lm_to_prefix_lm(model)
elif isinstance(model, BloomForCausalLM):
return _convert_bloom_causal_lm_to_prefix_lm(model)
elif isinstance(model, OPTForCausalLM):
return _convert_opt_causal_lm_to_prefix_lm(model)
else:
raise TypeError(f"Cannot convert model to Prefix LM. " + f"Model does not belong to set of supported HF models:" + f"\n{_SUPPORTED_HF_MODELS}") | Converts a HuggingFace Causal LM to a Prefix LM. Supported HuggingFace model classes: - `GPT2LMHeadModel` - `GPTNeoForCausalLM` - `GPTNeoXForCausalLM` - `GPTJForCausalLM` - `BloomForCausalLM` - `OPTForCausalLM` Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the `generate` method and/or select underlying methods depending on the model class. These changes preserve the model API, but add a new input to `forward`: "bidirectional_mask". Notes on training: To actually train the converted model as a Prefix LM, training batches will need to indicate the prefix/target structure by including `bidirectional_mask` as part of the batch inputs. **This is not a standard input and requires custom layers either within or after your dataloader.** In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels` such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`. That is, the prefix portion of the sequence should not generate any loss. Loss should only be generated by the target portion of the sequence. Notes on `GPTNeoForCausalLM`: To simplify the implementation, "global" and "local" attention layers are handled differently. For "global" layers, we handle conversion as described above. For "local" layers, which use a causal attention mask within a restricted local window, we do not alter the masking. Notes on `forward` method conversion: After conversion, the `forward` method will handle a new input, `bidirectional_mask`, which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions belonging to the prefix (prefix tokens can attend to one another bidirectionally), and 0 indicates token positions belonging to the target. The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset the causal masks before returning the result. Notes on `generate` method conversion: After conversion, the `generate` method will have the same signature but will internally convert all causal masks to be purely bidirectional, call the original `generate` method, and (where appropriate) reset the causal masks before returning the result. This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token "prompt" passed to `generate` (which is treated as the prefix) and then sequentially generates each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and previously-generated tokens (also as expected in a Prefix LM). To preserve the API, the original methods are renamed to `_original_forward` and `_original_generate`, and replaced with new `forward` and `generate` methods that wrap them, respectively. Although implementation details vary by model class. |
7,797 | import math
import warnings
from types import MethodType
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from transformers.models.bloom.modeling_bloom import (
BaseModelOutputWithPastAndCrossAttentions,
BloomForCausalLM,
BloomModel,
CausalLMOutputWithCrossAttentions,
CrossEntropyLoss,
)
from transformers.models.bloom.modeling_bloom import _expand_mask as _expand_mask_bloom
from transformers.models.bloom.modeling_bloom import (
_make_causal_mask as _make_causal_mask_bloom,
)
from transformers.models.bloom.modeling_bloom import logging
from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel
from transformers.models.gpt_neo.modeling_gpt_neo import GPTNeoForCausalLM
from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXForCausalLM
from transformers.models.gptj.modeling_gptj import GPTJForCausalLM
from transformers.models.opt.modeling_opt import OPTForCausalLM
from transformers.models.opt.modeling_opt import _expand_mask as _expand_mask_opt
from transformers.models.opt.modeling_opt import (
_make_causal_mask as _make_causal_mask_opt,
)
The provided code snippet includes necessary dependencies for implementing the `add_bidirectional_mask_if_missing` function. Write a Python function `def add_bidirectional_mask_if_missing(batch: Dict[str, Any])` to solve the following problem:
Attempts to add bidirectional_mask to batch if missing. Raises: KeyError if bidirectional_mask is missing and can't be inferred
Here is the function:
def add_bidirectional_mask_if_missing(batch: Dict[str, Any]):
"""Attempts to add bidirectional_mask to batch if missing.
Raises:
KeyError if bidirectional_mask is missing and can't be inferred
"""
if "bidirectional_mask" not in batch:
if batch.get("mode", None) == "icl_task":
batch["bidirectional_mask"] = batch["attention_mask"].clone()
for i, continuation_indices in enumerate(batch["continuation_indices"]):
batch["bidirectional_mask"][i, continuation_indices] = 0
elif "labels" in batch and "attention_mask" in batch:
batch["bidirectional_mask"] = torch.logical_and(torch.eq(batch["attention_mask"], 1), torch.eq(batch["labels"], -100)).type_as(batch["attention_mask"])
else:
raise KeyError("No bidirectional_mask in batch and not sure how to construct one.") | Attempts to add bidirectional_mask to batch if missing. Raises: KeyError if bidirectional_mask is missing and can't be inferred |
7,798 | from typing import Union
from transformers import AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
NUM_SENTINEL_TOKENS: int = 100
The provided code snippet includes necessary dependencies for implementing the `adapt_tokenizer_for_denoising` function. Write a Python function `def adapt_tokenizer_for_denoising(tokenizer: Tokenizer)` to solve the following problem:
Adds sentinel tokens and padding token (if missing). Expands the tokenizer vocabulary to include sentinel tokens used in mixture-of-denoiser tasks as well as a padding token. All added tokens are added as special tokens. No tokens are added if sentinel tokens and padding token already exist.
Here is the function:
def adapt_tokenizer_for_denoising(tokenizer: Tokenizer):
"""Adds sentinel tokens and padding token (if missing).
Expands the tokenizer vocabulary to include sentinel tokens
used in mixture-of-denoiser tasks as well as a padding token.
All added tokens are added as special tokens. No tokens are
added if sentinel tokens and padding token already exist.
"""
sentinels_to_add = [f"<extra_id_{i}>" for i in range(NUM_SENTINEL_TOKENS)]
tokenizer.add_tokens(sentinels_to_add, special_tokens=True)
if tokenizer.pad_token is None:
tokenizer.add_tokens("<pad>", special_tokens=True)
tokenizer.pad_token = "<pad>"
assert tokenizer.pad_token_id is not None
sentinels = "".join([f"<extra_id_{i}>" for i in range(NUM_SENTINEL_TOKENS)])
_sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids
tokenizer.sentinel_token_ids = _sentinel_token_ids | Adds sentinel tokens and padding token (if missing). Expands the tokenizer vocabulary to include sentinel tokens used in mixture-of-denoiser tasks as well as a padding token. All added tokens are added as special tokens. No tokens are added if sentinel tokens and padding token already exist. |
7,799 | import argparse
import os
import torch
from modeling_otter import OtterForConditionalGeneration
class OtterForConditionalGeneration(OtterPreTrainedModel):
def __init__(
self,
config: OtterConfig,
):
def get_input_embeddings(self) -> nn.Module:
def set_input_embeddings(self, new_embeddings):
def get_output_embeddings(self) -> nn.Module:
def set_output_embeddings(self, new_embeddings):
def get_image_encoder(self) -> nn.Module:
def get_lang_encoder(self) -> nn.Module:
def init_weights(self):
def forward(
self,
vision_x: torch.Tensor,
lang_x: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cached_vision_x: bool = False,
clear_conditioned_layers: bool = True,
past_key_values: Optional[List[torch.FloatTensor]] = None,
use_cache: bool = False,
**kwargs,
) -> CausalLMOutputWithPast:
def _encode_vision_x(self, vision_x: torch.Tensor):
def generate(
self,
vision_x: torch.Tensor,
lang_x: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
**generate_kwargs,
):
def dump_hf_model(pretrained_model_path: str, old_ckpt_path: str, new_folder_path: str) -> None:
old_ckpt = torch.load(old_ckpt_path, map_location="cpu")
if old_ckpt.get("model_state_dict", None) is not None:
old_ckpt = old_ckpt["model_state_dict"]
new_ckpt = old_ckpt
# folder_path = os.path.dirname(old_ckpt_path)
# config_path = os.path.join(folder_path, "config.json") if os.path.exists(os.path.join(folder_path, "config.json")) else "otter/config.json"
model = OtterForConditionalGeneration.from_pretrained(
args.pretrained_model_path,
device_map="auto",
)
if "flamingo" in args.pretrained_model_path:
model.text_tokenizer.add_special_tokens({"additional_special_tokens": ["<answer>"]})
if "LlamaForCausalLM" in model.lang_encoder.__class__.__name__:
model.lang_encoder.resize_token_embeddings(len(model.text_tokenizer))
_ = model.load_state_dict(new_ckpt, strict=False)
print(f"Saving HF model to {new_folder_path}")
model.save_pretrained(new_folder_path) | null |
7,800 | import random
import sys
from typing import List, Optional
import torch
import torch.distributed as dist
import torch.nn as nn
from accelerate.hooks import AlignDevicesHook, add_hook_to_module
from einops import rearrange, repeat
from peft import LoraConfig, TaskType, get_peft_model
from transformers.modeling_outputs import CausalLMOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.models.auto import AutoTokenizer
from ..falcon.modelling_RW import RWForCausalLM
from ..mpt.modeling_mpt import MPTForCausalLM
from ..mpt_redpajama.mosaic_gpt import MosaicGPT
from .configuration_otter import OtterConfig
import torch.distributed as dist
__KNOWN_DECODER_LAYERS_ATTR_NAMES = {
"opt": "model.decoder.layers",
"gptneo": "transformer.h",
"gptj": "transformer.h",
"gpt-j": "transformer.h",
"pythia": "gpt_neox.layers",
"llama": "model.layers",
"RWForCausalLM": "transformer.h",
"MPTForCausalLM": "transformer.blocks",
"MosaicGPT": "transformer.blocks",
}
def _infer_decoder_layers_attr_name(model: nn.Module):
for k in __KNOWN_DECODER_LAYERS_ATTR_NAMES:
if k.lower() in model.__class__.__name__.lower():
return __KNOWN_DECODER_LAYERS_ATTR_NAMES[k]
raise ValueError(f"We require the attribute name for the nn.ModuleList in the decoder storing the transformer block layers. Please supply this string manually.") | null |
7,801 | import random
import sys
from typing import List, Optional
import torch
import torch.distributed as dist
import torch.nn as nn
from accelerate.hooks import AlignDevicesHook, add_hook_to_module
from einops import rearrange, repeat
from peft import LoraConfig, TaskType, get_peft_model
from transformers.modeling_outputs import CausalLMOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.models.auto import AutoTokenizer
from ..falcon.modelling_RW import RWForCausalLM
from ..mpt.modeling_mpt import MPTForCausalLM
from ..mpt_redpajama.mosaic_gpt import MosaicGPT
from .configuration_otter import OtterConfig
import torch.distributed as dist
The provided code snippet includes necessary dependencies for implementing the `extend_instance` function. Write a Python function `def extend_instance(obj, mixin)` to solve the following problem:
Apply mixins to a class instance after creation
Here is the function:
def extend_instance(obj, mixin):
"""Apply mixins to a class instance after creation"""
base_cls = obj.__class__
base_cls_name = obj.__class__.__name__
obj.__class__ = type(base_cls_name, (mixin, base_cls), {}) # mixin needs to go first for our forward() logic to work | Apply mixins to a class instance after creation |
7,802 | import random
import sys
from typing import List, Optional
import torch
import torch.distributed as dist
import torch.nn as nn
from accelerate.hooks import AlignDevicesHook, add_hook_to_module
from einops import rearrange, repeat
from peft import LoraConfig, TaskType, get_peft_model
from transformers.modeling_outputs import CausalLMOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.models.auto import AutoTokenizer
from ..falcon.modelling_RW import RWForCausalLM
from ..mpt.modeling_mpt import MPTForCausalLM
from ..mpt_redpajama.mosaic_gpt import MosaicGPT
from .configuration_otter import OtterConfig
import torch.distributed as dist
def getattr_recursive(obj, att):
"""
Return nested attribute of obj
Example: getattr_recursive(obj, 'a.b.c') is equivalent to obj.a.b.c
"""
if att == "":
return obj
i = att.find(".")
if i < 0:
return getattr(obj, att)
else:
return getattr_recursive(getattr(obj, att[:i]), att[i + 1 :])
The provided code snippet includes necessary dependencies for implementing the `setattr_recursive` function. Write a Python function `def setattr_recursive(obj, att, val)` to solve the following problem:
Set nested attribute of obj Example: setattr_recursive(obj, 'a.b.c', val) is equivalent to obj.a.b.c = val
Here is the function:
def setattr_recursive(obj, att, val):
"""
Set nested attribute of obj
Example: setattr_recursive(obj, 'a.b.c', val) is equivalent to obj.a.b.c = val
"""
if "." in att:
obj = getattr_recursive(obj, ".".join(att.split(".")[:-1]))
setattr(obj, att.split(".")[-1], val) | Set nested attribute of obj Example: setattr_recursive(obj, 'a.b.c', val) is equivalent to obj.a.b.c = val |
7,803 | import random
import sys
from typing import List, Optional
import torch
import torch.distributed as dist
import torch.nn as nn
from accelerate.hooks import AlignDevicesHook, add_hook_to_module
from einops import rearrange, repeat
from peft import LoraConfig, TaskType, get_peft_model
from transformers.modeling_outputs import CausalLMOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.models.auto import AutoTokenizer
from ..falcon.modelling_RW import RWForCausalLM
from ..mpt.modeling_mpt import MPTForCausalLM
from ..mpt_redpajama.mosaic_gpt import MosaicGPT
from .configuration_otter import OtterConfig
import torch.distributed as dist
def exists(val):
return val is not None | null |
7,804 | import re
import argparse
import os
import torch
import torch.nn as nn
from transformers import CLIPVisionModel, LlamaForCausalLM, LlamaTokenizer
from otter_ai.models.otter.modeling_otter import (
OtterPreTrainedModel,
OtterLMMixin,
extend_instance,
_infer_decoder_layers_attr_name,
OtterPerceiverResampler,
)
from otter_ai.models.otter.configuration_otter import OtterConfig
class OtterModel(OtterPreTrainedModel):
def __init__(
self,
config: OtterConfig,
):
def get_input_embeddings(self) -> nn.Module:
def set_input_embeddings(self, new_embeddings):
def get_output_embeddings(self) -> nn.Module:
def set_output_embeddings(self, new_embeddings):
def rename_flamingo_checkpoint(old_ckpt: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
class OtterConfig(PretrainedConfig):
def __init__(self, vision_config=None, text_config=None, cross_attn_every_n_layers: int = 4, use_media_placement_augmentation: bool = True, **kwargs):
def to_dict(self):
def dump_hf_model(old_ckpt_path: str, new_folder_path: str) -> None:
os.makedirs(new_folder_path, exist_ok=True)
old_ckpt = torch.load(old_ckpt_path, map_location="cpu")
if old_ckpt.get("model", None) is not None:
old_ckpt = old_ckpt["model"]
config = OtterConfig.from_json_file("otter/config.json")
model = OtterModel(config)
new_ckpt = rename_flamingo_checkpoint(old_ckpt)
model.load_state_dict(new_ckpt, strict=False)
text_tokenizer = model.text_tokenizer
text_tokenizer.add_special_tokens({"additional_special_tokens": ["<|endofchunk|>", "<image>", "<answer>"]})
model.lang_encoder.resize_token_embeddings(len(text_tokenizer))
print(f"Saving HF model to {new_folder_path}")
model.save_pretrained(new_folder_path) | null |
7,805 | import argparse
import os
import torch
from modeling_otter import OtterForConditionalGeneration
class OtterForConditionalGeneration(OtterPreTrainedModel):
config_class = OtterConfig
def __init__(
self,
config: OtterConfig,
):
super().__init__(config)
### TODO: give "LlamaForCausalLM" as the name of text_config.architectures of Llama_based flamingo
if "llama" not in config.text_config._name_or_path:
if config.text_config.architectures[0] == "MPTForCausalLM":
text_tokenizer = AutoTokenizer.from_pretrained("mosaicml/mpt-7b-instruct")
lang_encoder = MPTForCausalLM(config=config.text_config)
elif config.text_config.architectures[0] == "MosaicGPT":
text_tokenizer = AutoTokenizer.from_pretrained("mosaicml/mosaic-llama-redpajama-final-candidate")
lang_encoder = MosaicGPT(config=config.text_config)
elif config.text_config.architectures[0] == "RWForCausalLM":
text_tokenizer = AutoTokenizer.from_pretrained("PATH-TO-YOUR-FALCON")
lang_encoder = RWForCausalLM(config=config.text_config)
elif config.text_config.architectures[0] == "LlamaForCausalLM":
text_tokenizer = AutoTokenizer.from_pretrained(config.text_config._name_or_path)
lang_encoder = LlamaForCausalLM(config=config.text_config)
else:
import pdb
pdb.set_trace()
else:
text_tokenizer = AutoTokenizer.from_pretrained(config.text_config._name_or_path)
lang_encoder = LlamaForCausalLM(config=config.text_config)
vision_encoder = CLIPVisionModel(config=config.vision_config)
text_tokenizer.add_special_tokens({"additional_special_tokens": ["<|endofchunk|>", "<image>", "<answer>"]})
if text_tokenizer.pad_token is None:
text_tokenizer.add_special_tokens({"pad_token": "<PAD>"})
self.text_tokenizer = text_tokenizer
self.eoc_token_id = text_tokenizer.encode("<|endofchunk|>")[-1]
self.media_token_id = text_tokenizer.encode("<image>")[-1]
extend_instance(lang_encoder, OtterLMMixin)
decoder_layers_attr_name = _infer_decoder_layers_attr_name(lang_encoder)
lang_encoder.set_decoder_layers_attr_name(decoder_layers_attr_name)
# if lang_encoder.__class__.__name__ == "LlamaForCausalLM":
# lang_encoder.resize_token_embeddings(len(text_tokenizer))
self.lang_encoder = lang_encoder
self.cross_attn_every_n_layers = config.cross_attn_every_n_layers
# use_media_placement_augmentation is strictly false for Otter model
self.use_media_placement_augmentation = False # config.use_media_placement_augmentation
self.max_num_frames = config.max_num_frames if hasattr(config, "max_num_frames") else None
# Informative master_print statement
if self.max_num_frames is None or self.max_num_frames == 1:
master_print(f"The current model version is configured for Otter-Image with max_num_frames set to {self.max_num_frames}.")
else:
master_print(f"The current model version is configured for Otter-Video with a maximum of {self.max_num_frames} frames.")
vision_encoder.output_tokens = True
self.vision_encoder = vision_encoder
self.vis_dim = 1024
self.perceiver = OtterPerceiverResampler(dim=self.vis_dim, max_num_frames=self.max_num_frames)
self.lang_encoder.init_otter(
media_token_id=self.media_token_id,
vis_hidden_size=self.vis_dim,
cross_attn_every_n_layers=self.cross_attn_every_n_layers,
use_media_placement_augmentation=self.use_media_placement_augmentation,
)
if "lora_config" in config.__dict__:
original_architecture_name = self.lang_encoder.__class__.__name__
master_print(f"Using LoRA with config:{config.lora_config}")
standard_modules = ["q_proj", "v_proj"]
lang_encoder_short_name = MODEL_CLASSES[config.text_config.architectures[0]]
model_to_lora_modules = {
"llama": standard_modules,
"opt": standard_modules,
"gptj": standard_modules,
"gpt_neox": ["query_key_value"],
"mpt": ["Wqkv"],
}
lora_config = LoraConfig(
r=config.lora_config["r"],
lora_alpha=config.lora_config["lora_alpha"],
lora_dropout=config.lora_config["lora_dropout"],
task_type=TaskType.CAUSAL_LM,
target_modules=model_to_lora_modules[lang_encoder_short_name],
)
self.lang_encoder = get_peft_model(self.lang_encoder, lora_config)
self.lang_encoder.master_print_trainable_parameters()
self.lang_encoder.__class__.__name__ = f"{original_architecture_name}LoRA"
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.lang_encoder.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
self.lang_encoder.set_input_embeddings(new_embeddings)
def get_output_embeddings(self) -> nn.Module:
return self.lang_encoder.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
self.lang_encoder.set_output_embeddings(new_embeddings)
def get_image_encoder(self) -> nn.Module:
return self.vision_encoder
def get_lang_encoder(self) -> nn.Module:
return self.lang_encoder
def init_weights(self):
# Freeze all parameters in self.model if train_vision_encoder is False or train_lang_encoder is False
if not ("train_full_model" in self.config.__dict__ and self.config.train_full_model is True):
for param in self.parameters():
param.requires_grad = False
# Freeze all parameters in vision encoder
if "train_vision_encoder" in self.config.__dict__ and self.config.train_vision_encoder is True:
master_print("Unfreeze vision encoder.")
for param in self.vision_encoder.parameters():
param.requires_grad = True
# Freeze all parameters in lang encoders except gated_cross_attn_layers
if "train_lang_encoder" in self.config.__dict__ and self.config.train_lang_encoder is True:
master_print("Unfreeze language decoder.")
for name, param in self.lang_encoder.named_parameters():
param.requires_grad = True
# Freeze all parameters in vision encoder
if "train_vision_encoder" in self.config.__dict__ and self.config.train_vision_encoder is True:
for param in self.vision_encoder.parameters():
param.requires_grad = True
# Freeze all parameters in lang encoders except gated_cross_attn_layers
if "train_lang_encoder" in self.config.__dict__ and self.config.train_lang_encoder is True:
for name, param in self.lang_encoder.named_parameters():
param.requires_grad = True
# Freeze all parameters in vision encoder
if "train_vision_encoder" in self.config.__dict__ and self.config.train_vision_encoder is True:
for param in self.vision_encoder.parameters():
param.requires_grad = True
# Freeze all parameters in lang encoders except gated_cross_attn_layers
if "train_lang_encoder" in self.config.__dict__ and self.config.train_lang_encoder is True:
for name, param in self.lang_encoder.named_parameters():
param.requires_grad = True
if "lora_config" in self.config.__dict__:
# Use another logic to unfreeze gated_cross_attn_layers and perceivers
master_print(f"LoRA trainable param: {(sum(param.numel() for name, param in self.lang_encoder.named_parameters() if 'lora' in name)) / 1e6:.3f} M")
for name, param in self.lang_encoder.named_parameters():
if "lora" in name:
param.requires_grad = True
# Freeze all parameters in lang encoders except gated_cross_attn_layers
for name, param in self.lang_encoder.named_parameters():
if "gated_cross_attn_layer" in name:
param.requires_grad = True
for name, param in self.named_parameters():
if "perceiver" in name:
param.requires_grad = True
# Unfreeze LM input and output embeddings
self.lang_encoder.get_input_embeddings().requires_grad_(True)
## MPTForCausalLM is tied word embedding
if "LlamaForCausalLM" in self.lang_encoder.__class__.__name__:
self.lang_encoder.lm_head.requires_grad_(True)
total_params = 0
for name, param in self.named_parameters():
if param.requires_grad:
total_params += param.numel()
master_print(f"Parameter: {name}, Size: {param.numel() / 1e6:.6f} M")
master_print(f"Total Trainable param: {total_params / 1e9:.6f} B")
def forward(
self,
vision_x: torch.Tensor,
lang_x: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cached_vision_x: bool = False,
clear_conditioned_layers: bool = True,
past_key_values: Optional[List[torch.FloatTensor]] = None,
use_cache: bool = False,
**kwargs,
) -> CausalLMOutputWithPast:
"""
Forward pass of Otter.
Args:
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W) with F=1
lang_x (torch.Tensor): Language input ids
shape (B, T_txt)
attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.
labels (torch.Tensor, optional): Labels. Defaults to None.
clear_conditioned_layers: if True, clear the conditioned layers
once the foward pass is completed. Set this to false if the
same set of images will be reused in another subsequent
forward pass.
past_key_values: pre-computed values to pass to language model.
See past_key_values documentation in Hugging Face
CausalLM models.
use_cache: whether to use cached key values. See use_cache
documentation in Hugging Face CausalLM models.
"""
assert (vision_x is not None) or use_cached_vision_x, "Must provide either vision_x or use_cached_vision_x to True."
if use_cached_vision_x:
# Case: use cached; vision_x should be cached and other
# vision-related inputs should not be provided.
assert vision_x is None, "Expect vision_x to be None when use_cached_vision_x is True."
assert self.lang_encoder.is_conditioned()
else:
# Case: do not use caching (i.e. this is a standard forward pass);
self._encode_vision_x(vision_x=vision_x)
output = self.lang_encoder(
input_ids=lang_x,
attention_mask=attention_mask,
labels=labels,
past_key_values=past_key_values,
use_cache=use_cache,
**kwargs,
)
if clear_conditioned_layers:
self.lang_encoder.clear_conditioned_layers()
return output
def _encode_vision_x(self, vision_x: torch.Tensor):
"""
Compute media tokens from vision input by passing it through vision encoder and conditioning language model.
Args:
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W)
Images in the same chunk are collated along T_img, and frames are collated along F
Currently only F=1 is supported (single-frame videos)
rearrange code based on https://github.com/dhansmair/flamingo-mini
"""
assert vision_x.ndim == 6, "vision_x should be of shape (b, T_img, F, C, H, W)"
b, T, F = vision_x.shape[:3]
vision_x = rearrange(vision_x, "b T F c h w -> (b T F) c h w")
vision_x = self.vision_encoder(vision_x)[0][:, 1:, :]
vision_x = rearrange(vision_x, "(b T F) v d -> b T F v d", b=b, T=T, F=F)
vision_x = self.perceiver(vision_x) # reshapes to (b, T, n, d)
for layer in self.lang_encoder._get_decoder_layers():
layer.condition_vis_x(vision_x)
def generate(
self,
vision_x: torch.Tensor,
lang_x: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
**generate_kwargs,
):
"""
Generate text conditioned on vision and language inputs.
Args:
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W)
images in the same chunk are collated along T_img, and frames are collated along F
currently only F=1 is supported (single-frame videos)
lang_x (torch.Tensor): Language input
shape (B, T_txt)
max_length (int, optional): Maximum length of the output. Defaults to None.
attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.
Returns:
torch.Tensor: lang_x with generated tokens appended to it
"""
if hasattr(self, "_hf_hook"):
# add a hook to make sure that the output of lang_encoder is mapped to the same device as the lang_x
hook = AlignDevicesHook(
execution_device=lang_x.device,
io_same_device=True,
place_submodules=False,
)
add_hook_to_module(self.lang_encoder, hook)
num_beams = generate_kwargs.get("num_beams", 1)
if num_beams > 1:
vision_x = vision_x.repeat_interleave(num_beams, dim=0)
self._encode_vision_x(vision_x=vision_x)
output = self.lang_encoder.generate(
input_ids=lang_x,
attention_mask=attention_mask,
eos_token_id=self.eoc_token_id,
**generate_kwargs,
)
self.lang_encoder.clear_conditioned_layers()
return output
def dump_hf_model(pretrained_model_path: str, old_ckpt_path: str, new_folder_path: str) -> None:
old_ckpt = torch.load(old_ckpt_path, map_location="cpu")
if old_ckpt.get("model_state_dict", None) is not None:
old_ckpt = old_ckpt["model_state_dict"]
new_ckpt = old_ckpt
# folder_path = os.path.dirname(old_ckpt_path)
# config_path = os.path.join(folder_path, "config.json") if os.path.exists(os.path.join(folder_path, "config.json")) else "otter/config.json"
model = OtterForConditionalGeneration.from_pretrained(
args.pretrained_model_path,
device_map="auto",
)
if "flamingo" in args.pretrained_model_path:
model.text_tokenizer.add_special_tokens({"additional_special_tokens": ["<answer>"]})
if "LlamaForCausalLM" in model.lang_encoder.__class__.__name__:
model.lang_encoder.resize_token_embeddings(len(model.text_tokenizer))
_ = model.load_state_dict(new_ckpt, strict=False)
print(f"Saving HF model to {new_folder_path}")
model.save_pretrained(new_folder_path) | null |
7,806 | import re
import argparse
import os
import torch
import torch.nn as nn
from transformers import CLIPVisionModel, LlamaForCausalLM, LlamaTokenizer
from otter_ai.models.otter.modeling_otter import (
OtterPreTrainedModel,
OtterLMMixin,
extend_instance,
_infer_decoder_layers_attr_name,
OtterPerceiverResampler,
)
from otter_ai.models.otter.configuration_otter import OtterConfig
class OtterModel(OtterPreTrainedModel):
def __init__(
self,
config: OtterConfig,
):
def get_input_embeddings(self) -> nn.Module:
def set_input_embeddings(self, new_embeddings):
def get_output_embeddings(self) -> nn.Module:
def set_output_embeddings(self, new_embeddings):
def rename_flamingo_checkpoint(old_ckpt: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
class OtterConfig(PretrainedConfig):
def __init__(self, vision_config=None, text_config=None, cross_attn_every_n_layers: int = 4, use_media_placement_augmentation: bool = True, **kwargs):
def to_dict(self):
def dump_hf_model(old_ckpt_path: str, new_folder_path: str) -> None:
os.makedirs(new_folder_path, exist_ok=True)
old_ckpt = torch.load(old_ckpt_path, map_location="cpu")
if old_ckpt.get("model", None) is not None:
old_ckpt = old_ckpt["model"]
config = OtterConfig.from_json_file("otter/config.json")
model = OtterModel(config)
new_ckpt = rename_flamingo_checkpoint(old_ckpt)
model.load_state_dict(new_ckpt, strict=False)
text_tokenizer = model.text_tokenizer
text_tokenizer.add_special_tokens({"additional_special_tokens": ["<|endofchunk|>", "<image>", "<answer>"]})
model.lang_encoder.resize_token_embeddings(len(text_tokenizer))
print(f"Saving HF model to {new_folder_path}")
model.save_pretrained(new_folder_path) | null |
7,807 | import math
import warnings
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
from torch.nn import functional as F
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
QuestionAnsweringModelOutput,
SequenceClassifierOutputWithPast,
TokenClassifierOutput,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import logging
from .configuration_RW import RWConfig
from einops import rearrange
def rotate_half(x):
x1, x2 = x[..., : x.shape[-1] // 2], x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in torch < 1.8.0 | null |
7,808 | import math
import warnings
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
from torch.nn import functional as F
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
QuestionAnsweringModelOutput,
SequenceClassifierOutputWithPast,
TokenClassifierOutput,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import logging
from .configuration_RW import RWConfig
from einops import rearrange
def _make_causal_mask(input_ids_shape: torch.Size, device: torch.device, past_key_values_length: int) -> torch.BoolTensor:
batch_size, target_length = input_ids_shape
mask = torch.empty(
(target_length, target_length + past_key_values_length),
dtype=torch.bool,
device=device,
)
# ONNX doesn't support `torch.Tensor.triu` properly, thus we use this workaround
seq_ids = torch.arange(target_length, device=device)
mask[:, past_key_values_length:] = seq_ids[:, None] < seq_ids[None, :]
if past_key_values_length > 0:
mask[:, :past_key_values_length] = False
expanded_mask = mask[None, None, :, :].expand(batch_size, 1, target_length, target_length + past_key_values_length)
return expanded_mask | null |
7,809 | import math
import warnings
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
from torch.nn import functional as F
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
QuestionAnsweringModelOutput,
SequenceClassifierOutputWithPast,
TokenClassifierOutput,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import logging
from .configuration_RW import RWConfig
from einops import rearrange
def _expand_mask(mask: torch.Tensor, tgt_length: int) -> torch.BoolTensor:
batch_size, src_length = mask.shape
tgt_length = tgt_length if tgt_length is not None else src_length
expanded_mask = ~(mask[:, None, None, :].to(torch.bool))
return expanded_mask.expand(batch_size, 1, tgt_length, src_length) | null |
7,810 | import math
import warnings
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
from torch.nn import functional as F
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
QuestionAnsweringModelOutput,
SequenceClassifierOutputWithPast,
TokenClassifierOutput,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import logging
from .configuration_RW import RWConfig
from einops import rearrange
def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor:
batch_size, seq_length = attention_mask.shape
closest_power_of_2 = 2 ** math.floor(math.log2(num_heads))
base = torch.tensor(
2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))),
device=attention_mask.device,
dtype=torch.float32,
)
powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32)
slopes = torch.pow(base, powers)
if closest_power_of_2 != num_heads:
extra_base = torch.tensor(
2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))),
device=attention_mask.device,
dtype=torch.float32,
)
num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2)
extra_powers = torch.arange(
1,
1 + 2 * num_remaining_heads,
2,
device=attention_mask.device,
dtype=torch.int32,
)
slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0)
# Note: alibi will added to the attention bias that will be applied to the query, key product of attention
# => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length)
# => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length)
# => the query_length dimension will then be broadcasted correctly
# This is more or less identical to T5's relative position bias:
# https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527
arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :]
alibi = slopes[..., None].bfloat16() * arange_tensor
return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype) | null |
7,811 | import math
import warnings
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
from torch.nn import functional as F
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
QuestionAnsweringModelOutput,
SequenceClassifierOutputWithPast,
TokenClassifierOutput,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import logging
from .configuration_RW import RWConfig
from einops import rearrange
def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor:
out = F.dropout(x, p=prob, training=training)
out = residual + out
return out | null |
7,812 | import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
from einops import rearrange
from torch import nn
from .low_precision_layernorm import LPLayerNorm
def scaled_multihead_dot_product_attention(
query,
key,
value,
n_heads,
softmax_scale=None,
attn_bias=None,
key_padding_mask=None,
is_causal=False,
dropout_p=0.0,
training=False,
needs_weights=False,
):
q = rearrange(query, "b s (h d) -> b h s d", h=n_heads)
k = rearrange(key, "b s (h d) -> b h d s", h=n_heads) # includes key.t()
v = rearrange(value, "b s (h d) -> b h s d", h=n_heads)
min_val = torch.finfo(q.dtype).min
b, _, s_q, d = q.shape
s_k = k.size(-1)
if softmax_scale is None:
softmax_scale = 1 / math.sqrt(d)
attn_weight = q.matmul(k) * softmax_scale
if attn_bias is not None:
if (attn_bias.size(-1) != 1 and attn_bias.size(-1) != s_k) or (attn_bias.size(-2) != 1 and attn_bias.size(-2) != s_q):
raise RuntimeError(f"attn_bias (shape: {attn_bias.shape}) is expected to broadcast to shape: {attn_weight.shape}.")
attn_weight = attn_weight + attn_bias
if key_padding_mask is not None:
if attn_bias is not None:
warnings.warn(
"Propogating key_padding_mask to the attention module "
+ "and applying it within the attention module can cause "
+ "unneccessary computation/memory usage. Consider integrating "
+ "into attn_bias once and passing that to each attention "
+ "module instead."
)
attn_weight = attn_weight.masked_fill(~key_padding_mask.view((b, 1, 1, s_k)), min_val)
if is_causal:
s = max(s_q, s_k)
causal_mask = attn_weight.new_ones(s, s, dtype=torch.float16)
causal_mask = causal_mask.tril()
causal_mask = causal_mask.to(torch.bool)
causal_mask = ~causal_mask
causal_mask = causal_mask[-s_q:, -s_k:]
attn_weight = attn_weight.masked_fill(causal_mask.view(1, 1, s_q, s_k), min_val)
attn_weight = torch.softmax(attn_weight, dim=-1)
if dropout_p:
attn_weight = torch.nn.functional.dropout(attn_weight, p=dropout_p, training=training, inplace=True)
out = attn_weight.matmul(v)
out = rearrange(out, "b h s d -> b s (h d)")
if needs_weights:
return out, attn_weight
return out, None | null |
7,813 | import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
from einops import rearrange
from torch import nn
from .low_precision_layernorm import LPLayerNorm
def _reset_is_causal(num_query_tokens: int, num_key_tokens: int, original_is_causal: bool):
def check_valid_inputs(*tensors, valid_dtypes=[torch.float16, torch.bfloat16]):
def flash_attn_fn(
query,
key,
value,
n_heads,
softmax_scale=None,
attn_bias=None,
key_padding_mask=None,
is_causal=False,
dropout_p=0.0,
training=False,
needs_weights=False,
):
try:
from flash_attn import bert_padding, flash_attn_interface
except:
raise RuntimeError("Please install flash_attn==0.2.8")
check_valid_inputs(query, key, value)
if attn_bias is not None:
raise NotImplementedError(f"attn_bias not implemented for flash attn.")
batch_size, seqlen = query.shape[:2]
if key_padding_mask is None:
key_padding_mask = torch.ones_like(key[:, :, 0], dtype=torch.bool)
query_padding_mask = key_padding_mask[:, -query.size(1) :]
query_unpad, indices_q, cu_seqlens_q, max_seqlen_q = bert_padding.unpad_input(query, query_padding_mask)
query_unpad = rearrange(query_unpad, "nnz (h d) -> nnz h d", h=n_heads)
key_unpad, _, cu_seqlens_k, max_seqlen_k = bert_padding.unpad_input(key, key_padding_mask)
key_unpad = rearrange(key_unpad, "nnz (h d) -> nnz h d", h=n_heads)
value_unpad, _, _, _ = bert_padding.unpad_input(value, key_padding_mask)
value_unpad = rearrange(value_unpad, "nnz (h d) -> nnz h d", h=n_heads)
dropout_p = dropout_p if training else 0.0
reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
output_unpad = flash_attn_interface.flash_attn_unpadded_func(
query_unpad,
key_unpad,
value_unpad,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale=softmax_scale,
causal=reset_is_causal,
return_attn_probs=needs_weights,
)
output = bert_padding.pad_input(rearrange(output_unpad, "nnz h d -> nnz (h d)"), indices_q, batch_size, seqlen)
return output, None | null |
7,814 | import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
from einops import rearrange
from torch import nn
from .low_precision_layernorm import LPLayerNorm
def _reset_is_causal(num_query_tokens: int, num_key_tokens: int, original_is_causal: bool):
if original_is_causal and num_query_tokens != num_key_tokens:
if num_query_tokens != 1:
raise NotImplementedError("MosaicGPT does not support query and key with different number of tokens, unless number of query tokens is 1.")
else:
return False
return original_is_causal
def check_valid_inputs(*tensors, valid_dtypes=[torch.float16, torch.bfloat16]):
for tensor in tensors:
if tensor.dtype not in valid_dtypes:
raise TypeError(f"{tensor.dtype=} must be in {valid_dtypes=}.")
if not tensor.is_cuda:
raise TypeError(f"Inputs must be cuda tensors ({tensor.is_cuda=}).")
def triton_flash_attn_fn(
query,
key,
value,
n_heads,
softmax_scale=None,
attn_bias=None,
key_padding_mask=None,
is_causal=False,
dropout_p=0.0,
training=False,
needs_weights=False,
):
try:
from flash_attn import flash_attn_triton # type: ignore
except:
raise RuntimeError("Please install flash_attn==0.2.8 and triton==2.0.0.dev20221202.")
check_valid_inputs(query, key, value)
if dropout_p:
raise NotImplementedError(f"Dropout not implemented for attn_impl: triton.")
if needs_weights:
raise NotImplementedError(f"attn_impl: triton cannot return attn weights.")
if key_padding_mask is not None:
warnings.warn(
"Propagating key_padding_mask to the attention module "
+ "and applying it within the attention module can cause "
+ "unnecessary computation/memory usage. Consider integrating "
+ "into attn_bias once and passing that to each attention "
+ "module instead."
)
b_size, s_k = key_padding_mask.shape[:2]
if attn_bias is None:
attn_bias = query.new_zeros(b_size, 1, 1, s_k)
attn_bias = attn_bias.masked_fill(~key_padding_mask.view((b_size, 1, 1, s_k)), torch.finfo(query.dtype).min)
query = rearrange(query, "b s (h d) -> b s h d", h=n_heads)
key = rearrange(key, "b s (h d) -> b s h d", h=n_heads)
value = rearrange(value, "b s (h d) -> b s h d", h=n_heads)
reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
attn_output = flash_attn_triton.flash_attn_func(query, key, value, attn_bias, reset_is_causal, softmax_scale)
output = attn_output.view(*attn_output.shape[:2], -1)
return output, None | null |
7,815 | import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
from einops import rearrange
from torch import nn
from .low_precision_layernorm import LPLayerNorm
def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):
if attn_impl == "flash":
return None
elif attn_impl in ["torch", "triton"]:
if alibi:
if (prefix_lm or not causal) or use_sequence_id:
return (1, n_heads, seq_len, seq_len)
return (1, n_heads, 1, seq_len)
elif prefix_lm or use_sequence_id:
return (1, 1, seq_len, seq_len)
return None
else:
raise ValueError(f"{attn_impl=} is an invalid setting.") | null |
7,816 | import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
from einops import rearrange
from torch import nn
from .low_precision_layernorm import LPLayerNorm
def alibi_bias(n_heads, seq_len, full=False, alibi_bias_max=8, device=None, dtype=None):
def attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):
if attn_impl == "flash":
return None
elif attn_impl in ["torch", "triton"]:
if alibi:
# in place add alibi to attn bias
device, dtype = attn_bias.device, attn_bias.dtype
attn_bias = attn_bias.add(
alibi_bias(
n_heads,
seq_len,
full=not causal,
alibi_bias_max=alibi_bias_max,
device=device,
dtype=dtype,
)
)
return attn_bias
else:
raise ValueError(f"{attn_impl=} is an invalid setting.") | null |
7,817 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
def torch_default_param_init_fn_(
module: nn.Module,
verbose: int = 0,
**kwargs,
):
del kwargs # unused, just to capture any extra args from the config
if verbose > 1:
warnings.warn(f"Initializing network using module's reset_parameters attribute")
if hasattr(module, "reset_parameters"):
module.reset_parameters() # type: ignore | null |
7,818 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
def _normal_param_init_fn_(
module: nn.Module,
std: float,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
del kwargs # unused, just to capture any extra args from the config
init_fn_ = _normal_init_(std=std)
if verbose > 1:
warnings.warn(f"Using torch.nn.init.normal_ init fn mean=0.0, std={std}")
generic_param_init_fn_(
module=module,
init_fn_=init_fn_,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
)
def baseline_param_init_fn_(
module: nn.Module,
init_std: float,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
del kwargs # unused, just to capture any extra args from the config
if init_std is None:
raise ValueError("You must set model.init_std to a float value to use the default initialization scheme.")
_normal_param_init_fn_(
module=module,
std=init_std,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
) | null |
7,819 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
def small_param_init_fn_(
module: nn.Module,
n_layers: int,
d_model: int,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
del kwargs # unused, just to capture any extra args from the config
# very close to kaiming normal
# from Transformers without Tears (2019) - Nguyen & Salazar
std = math.sqrt(2 / (5 * d_model))
_normal_param_init_fn_(
module=module,
std=std,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
)
The provided code snippet includes necessary dependencies for implementing the `neox_param_init_fn_` function. Write a Python function `def neox_param_init_fn_( module: nn.Module, n_layers: int, d_model: int, emb_init_std: Optional[float] = None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None, verbose: int = 0, **kwargs, )` to solve the following problem:
From section 2.3.1 of GPT-NeoX-20B: An Open-Source AutoregressiveLanguage Model — Black et. al. (2022) see https://github.com/EleutherAI/gpt-neox/blob/9610391ab319403cef079b438edd016a2443af54/megatron/model/init_functions.py#L151 and https://github.com/EleutherAI/gpt-neox/blob/main/megatron/model/transformer.py
Here is the function:
def neox_param_init_fn_(
module: nn.Module,
n_layers: int,
d_model: int,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
"""From section 2.3.1 of GPT-NeoX-20B:
An Open-Source AutoregressiveLanguage Model — Black et. al. (2022)
see https://github.com/EleutherAI/gpt-neox/blob/9610391ab319403cef079b438edd016a2443af54/megatron/model/init_functions.py#L151
and https://github.com/EleutherAI/gpt-neox/blob/main/megatron/model/transformer.py
"""
del kwargs # unused, just to capture any extra args from the config
residual_div = n_layers / math.sqrt(10) # small std / wang std
if verbose > 1:
warnings.warn(f"setting init_div_is_residual to {residual_div}")
small_param_init_fn_(
module=module,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=residual_div,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
) | From section 2.3.1 of GPT-NeoX-20B: An Open-Source AutoregressiveLanguage Model — Black et. al. (2022) see https://github.com/EleutherAI/gpt-neox/blob/9610391ab319403cef079b438edd016a2443af54/megatron/model/init_functions.py#L151 and https://github.com/EleutherAI/gpt-neox/blob/main/megatron/model/transformer.py |
7,820 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
def generic_param_init_fn_(
module: nn.Module,
init_fn_,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
del kwargs # unused, just to capture any extra args from the config
if verbose > 1:
warnings.warn(f"If model has bias parameters they are initialized to 0.")
# enable user to divide _is_residual weights by
# a value which defaults to math.sqrt(2 * cfg.n_layers)
init_div_is_residual = init_div_is_residual
if init_div_is_residual is False:
# not used, for pyright
div_is_residual = 1.0
elif init_div_is_residual is True:
div_is_residual = math.sqrt(2 * n_layers)
elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):
div_is_residual = init_div_is_residual
elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():
# do not trust YAML parsing to always convert numbers to numbers
div_is_residual = float(init_div_is_residual)
else:
# not used, for pyright
div_is_residual = 1.0
raise ValueError(f"Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}")
if init_div_is_residual is not False:
if verbose > 1:
warnings.warn(f"Initializing _is_residual layers then dividing them by {div_is_residual}." + f"set `init_div_is_residual: false` in model config to disable this.")
if isinstance(module, nn.Linear):
# Linear
if hasattr(module, "_fused"):
fused_init_helper_(module, init_fn_)
else:
init_fn_(module.weight)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
if init_div_is_residual is not False and getattr(module, "_is_residual", False):
with torch.no_grad():
module.weight.div_(div_is_residual)
elif isinstance(module, nn.Embedding):
# Embedding
if emb_init_std is not None:
std = emb_init_std
if std == 0:
warnings.warn(f"Embedding layer initialized to 0.")
emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)
if verbose > 1:
warnings.warn(f"Embedding layer initialized using normal distribution with mean=0 and {std=}.")
elif emb_init_uniform_lim is not None:
lim = emb_init_uniform_lim
if isinstance(lim, Sequence):
if len(lim) > 2:
raise ValueError(f"Uniform init requires a min and a max limit. User input: {lim}.")
if lim[0] == lim[1]:
warnings.warn(f"Embedding layer initialized to {lim[0]}.")
else:
if lim == 0:
warnings.warn(f"Embedding layer initialized to 0.")
lim = [-lim, lim]
a, b = lim
emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)
if verbose > 1:
warnings.warn(f"Embedding layer initialized using uniform distribution in range {lim}.")
else:
emb_init_fn_ = init_fn_
emb_init_fn_(module.weight)
elif isinstance(module, nn.LayerNorm):
# LayerNorm
if verbose > 1:
warnings.warn(f"LayerNorm gamma weights are set to 1. If the layer has a bias it is initialized to 0.")
torch.nn.init.ones_(module.weight)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.MultiheadAttention):
# torch's MultiheadAttention
if module._qkv_same_embed_dim:
assert module.in_proj_weight is not None
assert module.q_proj_weight is None and module.k_proj_weight is None and module.v_proj_weight is None
assert d_model is not None
# in_proj_weight is actually 3 layers and should be split up for width based init
_d = d_model
splits = (0, _d, 2 * _d, 3 * _d)
for s, e in zip(splits[:-1], splits[1:]):
init_fn_(module.in_proj_weight[s:e])
else:
assert module.q_proj_weight is not None and module.k_proj_weight is not None and module.v_proj_weight is not None
assert module.in_proj_weight is None
init_fn_(module.q_proj_weight)
init_fn_(module.k_proj_weight)
init_fn_(module.v_proj_weight)
# bias
if module.in_proj_bias is not None:
torch.nn.init.zeros_(module.in_proj_bias)
if module.bias_k is not None:
torch.nn.init.zeros_(module.bias_k)
if module.bias_v is not None:
torch.nn.init.zeros_(module.bias_v)
# out proj
init_fn_(module.out_proj.weight)
if init_div_is_residual is not False and getattr(module.out_proj, "_is_residual", False):
with torch.no_grad():
module.out_proj.weight.div_(div_is_residual)
if module.out_proj.bias is not None:
torch.nn.init.zeros_(module.out_proj.bias)
else:
for _ in module.parameters(recurse=False):
# raise error if uninitialized module has any parameters
raise NotImplementedError(f"{module.__class__.__name__} parameters are not initialized by param_init_fn.")
def kaiming_uniform_param_init_fn_(
module: nn.Module,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
init_gain: float = 0,
fan_mode: str = "fan_in",
init_nonlinearity: str = "leaky_relu",
verbose: int = 0,
**kwargs,
):
del kwargs # unused, just to capture any extra args from the config
if verbose > 1:
warnings.warn(f"Using nn.init.kaiming_uniform_ init fn with parameters: " + f"a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}")
kaiming_uniform_ = partial(
nn.init.kaiming_uniform_,
a=init_gain,
mode=fan_mode,
nonlinearity=init_nonlinearity,
)
generic_param_init_fn_(
module=module,
init_fn_=kaiming_uniform_,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
) | null |
7,821 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
def generic_param_init_fn_(
module: nn.Module,
init_fn_,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
del kwargs # unused, just to capture any extra args from the config
if verbose > 1:
warnings.warn(f"If model has bias parameters they are initialized to 0.")
# enable user to divide _is_residual weights by
# a value which defaults to math.sqrt(2 * cfg.n_layers)
init_div_is_residual = init_div_is_residual
if init_div_is_residual is False:
# not used, for pyright
div_is_residual = 1.0
elif init_div_is_residual is True:
div_is_residual = math.sqrt(2 * n_layers)
elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):
div_is_residual = init_div_is_residual
elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():
# do not trust YAML parsing to always convert numbers to numbers
div_is_residual = float(init_div_is_residual)
else:
# not used, for pyright
div_is_residual = 1.0
raise ValueError(f"Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}")
if init_div_is_residual is not False:
if verbose > 1:
warnings.warn(f"Initializing _is_residual layers then dividing them by {div_is_residual}." + f"set `init_div_is_residual: false` in model config to disable this.")
if isinstance(module, nn.Linear):
# Linear
if hasattr(module, "_fused"):
fused_init_helper_(module, init_fn_)
else:
init_fn_(module.weight)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
if init_div_is_residual is not False and getattr(module, "_is_residual", False):
with torch.no_grad():
module.weight.div_(div_is_residual)
elif isinstance(module, nn.Embedding):
# Embedding
if emb_init_std is not None:
std = emb_init_std
if std == 0:
warnings.warn(f"Embedding layer initialized to 0.")
emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)
if verbose > 1:
warnings.warn(f"Embedding layer initialized using normal distribution with mean=0 and {std=}.")
elif emb_init_uniform_lim is not None:
lim = emb_init_uniform_lim
if isinstance(lim, Sequence):
if len(lim) > 2:
raise ValueError(f"Uniform init requires a min and a max limit. User input: {lim}.")
if lim[0] == lim[1]:
warnings.warn(f"Embedding layer initialized to {lim[0]}.")
else:
if lim == 0:
warnings.warn(f"Embedding layer initialized to 0.")
lim = [-lim, lim]
a, b = lim
emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)
if verbose > 1:
warnings.warn(f"Embedding layer initialized using uniform distribution in range {lim}.")
else:
emb_init_fn_ = init_fn_
emb_init_fn_(module.weight)
elif isinstance(module, nn.LayerNorm):
# LayerNorm
if verbose > 1:
warnings.warn(f"LayerNorm gamma weights are set to 1. If the layer has a bias it is initialized to 0.")
torch.nn.init.ones_(module.weight)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.MultiheadAttention):
# torch's MultiheadAttention
if module._qkv_same_embed_dim:
assert module.in_proj_weight is not None
assert module.q_proj_weight is None and module.k_proj_weight is None and module.v_proj_weight is None
assert d_model is not None
# in_proj_weight is actually 3 layers and should be split up for width based init
_d = d_model
splits = (0, _d, 2 * _d, 3 * _d)
for s, e in zip(splits[:-1], splits[1:]):
init_fn_(module.in_proj_weight[s:e])
else:
assert module.q_proj_weight is not None and module.k_proj_weight is not None and module.v_proj_weight is not None
assert module.in_proj_weight is None
init_fn_(module.q_proj_weight)
init_fn_(module.k_proj_weight)
init_fn_(module.v_proj_weight)
# bias
if module.in_proj_bias is not None:
torch.nn.init.zeros_(module.in_proj_bias)
if module.bias_k is not None:
torch.nn.init.zeros_(module.bias_k)
if module.bias_v is not None:
torch.nn.init.zeros_(module.bias_v)
# out proj
init_fn_(module.out_proj.weight)
if init_div_is_residual is not False and getattr(module.out_proj, "_is_residual", False):
with torch.no_grad():
module.out_proj.weight.div_(div_is_residual)
if module.out_proj.bias is not None:
torch.nn.init.zeros_(module.out_proj.bias)
else:
for _ in module.parameters(recurse=False):
# raise error if uninitialized module has any parameters
raise NotImplementedError(f"{module.__class__.__name__} parameters are not initialized by param_init_fn.")
def kaiming_normal_param_init_fn_(
module: nn.Module,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
init_gain: float = 0,
fan_mode: str = "fan_in",
init_nonlinearity: str = "leaky_relu",
verbose: int = 0,
**kwargs,
):
del kwargs # unused, just to capture any extra args from the config
if verbose > 1:
warnings.warn(f"Using nn.init.kaiming_normal_ init fn with parameters: " + f"a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}")
kaiming_normal_ = partial(
torch.nn.init.kaiming_normal_,
a=init_gain,
mode=fan_mode,
nonlinearity=init_nonlinearity,
)
generic_param_init_fn_(
module=module,
init_fn_=kaiming_normal_,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
) | null |
7,822 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
def generic_param_init_fn_(
module: nn.Module,
init_fn_,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
del kwargs # unused, just to capture any extra args from the config
if verbose > 1:
warnings.warn(f"If model has bias parameters they are initialized to 0.")
# enable user to divide _is_residual weights by
# a value which defaults to math.sqrt(2 * cfg.n_layers)
init_div_is_residual = init_div_is_residual
if init_div_is_residual is False:
# not used, for pyright
div_is_residual = 1.0
elif init_div_is_residual is True:
div_is_residual = math.sqrt(2 * n_layers)
elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):
div_is_residual = init_div_is_residual
elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():
# do not trust YAML parsing to always convert numbers to numbers
div_is_residual = float(init_div_is_residual)
else:
# not used, for pyright
div_is_residual = 1.0
raise ValueError(f"Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}")
if init_div_is_residual is not False:
if verbose > 1:
warnings.warn(f"Initializing _is_residual layers then dividing them by {div_is_residual}." + f"set `init_div_is_residual: false` in model config to disable this.")
if isinstance(module, nn.Linear):
# Linear
if hasattr(module, "_fused"):
fused_init_helper_(module, init_fn_)
else:
init_fn_(module.weight)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
if init_div_is_residual is not False and getattr(module, "_is_residual", False):
with torch.no_grad():
module.weight.div_(div_is_residual)
elif isinstance(module, nn.Embedding):
# Embedding
if emb_init_std is not None:
std = emb_init_std
if std == 0:
warnings.warn(f"Embedding layer initialized to 0.")
emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)
if verbose > 1:
warnings.warn(f"Embedding layer initialized using normal distribution with mean=0 and {std=}.")
elif emb_init_uniform_lim is not None:
lim = emb_init_uniform_lim
if isinstance(lim, Sequence):
if len(lim) > 2:
raise ValueError(f"Uniform init requires a min and a max limit. User input: {lim}.")
if lim[0] == lim[1]:
warnings.warn(f"Embedding layer initialized to {lim[0]}.")
else:
if lim == 0:
warnings.warn(f"Embedding layer initialized to 0.")
lim = [-lim, lim]
a, b = lim
emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)
if verbose > 1:
warnings.warn(f"Embedding layer initialized using uniform distribution in range {lim}.")
else:
emb_init_fn_ = init_fn_
emb_init_fn_(module.weight)
elif isinstance(module, nn.LayerNorm):
# LayerNorm
if verbose > 1:
warnings.warn(f"LayerNorm gamma weights are set to 1. If the layer has a bias it is initialized to 0.")
torch.nn.init.ones_(module.weight)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.MultiheadAttention):
# torch's MultiheadAttention
if module._qkv_same_embed_dim:
assert module.in_proj_weight is not None
assert module.q_proj_weight is None and module.k_proj_weight is None and module.v_proj_weight is None
assert d_model is not None
# in_proj_weight is actually 3 layers and should be split up for width based init
_d = d_model
splits = (0, _d, 2 * _d, 3 * _d)
for s, e in zip(splits[:-1], splits[1:]):
init_fn_(module.in_proj_weight[s:e])
else:
assert module.q_proj_weight is not None and module.k_proj_weight is not None and module.v_proj_weight is not None
assert module.in_proj_weight is None
init_fn_(module.q_proj_weight)
init_fn_(module.k_proj_weight)
init_fn_(module.v_proj_weight)
# bias
if module.in_proj_bias is not None:
torch.nn.init.zeros_(module.in_proj_bias)
if module.bias_k is not None:
torch.nn.init.zeros_(module.bias_k)
if module.bias_v is not None:
torch.nn.init.zeros_(module.bias_v)
# out proj
init_fn_(module.out_proj.weight)
if init_div_is_residual is not False and getattr(module.out_proj, "_is_residual", False):
with torch.no_grad():
module.out_proj.weight.div_(div_is_residual)
if module.out_proj.bias is not None:
torch.nn.init.zeros_(module.out_proj.bias)
else:
for _ in module.parameters(recurse=False):
# raise error if uninitialized module has any parameters
raise NotImplementedError(f"{module.__class__.__name__} parameters are not initialized by param_init_fn.")
def xavier_uniform_param_init_fn_(
module: nn.Module,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
init_gain: float = 0,
verbose: int = 0,
**kwargs,
):
del kwargs # unused, just to capture any extra args from the config
xavier_uniform_ = partial(torch.nn.init.xavier_uniform_, gain=init_gain)
if verbose > 1:
warnings.warn(f"Using torch.nn.init.xavier_uniform_ init fn with parameters: " + f"gain={init_gain}")
generic_param_init_fn_(
module=module,
init_fn_=xavier_uniform_,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
) | null |
7,823 | import math
import warnings
from collections.abc import Sequence
from functools import partial
from typing import Optional, Tuple, Union
import torch
from torch import nn
def generic_param_init_fn_(
module: nn.Module,
init_fn_,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
verbose: int = 0,
**kwargs,
):
del kwargs # unused, just to capture any extra args from the config
if verbose > 1:
warnings.warn(f"If model has bias parameters they are initialized to 0.")
# enable user to divide _is_residual weights by
# a value which defaults to math.sqrt(2 * cfg.n_layers)
init_div_is_residual = init_div_is_residual
if init_div_is_residual is False:
# not used, for pyright
div_is_residual = 1.0
elif init_div_is_residual is True:
div_is_residual = math.sqrt(2 * n_layers)
elif isinstance(init_div_is_residual, float) or isinstance(init_div_is_residual, int):
div_is_residual = init_div_is_residual
elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric():
# do not trust YAML parsing to always convert numbers to numbers
div_is_residual = float(init_div_is_residual)
else:
# not used, for pyright
div_is_residual = 1.0
raise ValueError(f"Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}")
if init_div_is_residual is not False:
if verbose > 1:
warnings.warn(f"Initializing _is_residual layers then dividing them by {div_is_residual}." + f"set `init_div_is_residual: false` in model config to disable this.")
if isinstance(module, nn.Linear):
# Linear
if hasattr(module, "_fused"):
fused_init_helper_(module, init_fn_)
else:
init_fn_(module.weight)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
if init_div_is_residual is not False and getattr(module, "_is_residual", False):
with torch.no_grad():
module.weight.div_(div_is_residual)
elif isinstance(module, nn.Embedding):
# Embedding
if emb_init_std is not None:
std = emb_init_std
if std == 0:
warnings.warn(f"Embedding layer initialized to 0.")
emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std)
if verbose > 1:
warnings.warn(f"Embedding layer initialized using normal distribution with mean=0 and {std=}.")
elif emb_init_uniform_lim is not None:
lim = emb_init_uniform_lim
if isinstance(lim, Sequence):
if len(lim) > 2:
raise ValueError(f"Uniform init requires a min and a max limit. User input: {lim}.")
if lim[0] == lim[1]:
warnings.warn(f"Embedding layer initialized to {lim[0]}.")
else:
if lim == 0:
warnings.warn(f"Embedding layer initialized to 0.")
lim = [-lim, lim]
a, b = lim
emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b)
if verbose > 1:
warnings.warn(f"Embedding layer initialized using uniform distribution in range {lim}.")
else:
emb_init_fn_ = init_fn_
emb_init_fn_(module.weight)
elif isinstance(module, nn.LayerNorm):
# LayerNorm
if verbose > 1:
warnings.warn(f"LayerNorm gamma weights are set to 1. If the layer has a bias it is initialized to 0.")
torch.nn.init.ones_(module.weight)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif isinstance(module, nn.MultiheadAttention):
# torch's MultiheadAttention
if module._qkv_same_embed_dim:
assert module.in_proj_weight is not None
assert module.q_proj_weight is None and module.k_proj_weight is None and module.v_proj_weight is None
assert d_model is not None
# in_proj_weight is actually 3 layers and should be split up for width based init
_d = d_model
splits = (0, _d, 2 * _d, 3 * _d)
for s, e in zip(splits[:-1], splits[1:]):
init_fn_(module.in_proj_weight[s:e])
else:
assert module.q_proj_weight is not None and module.k_proj_weight is not None and module.v_proj_weight is not None
assert module.in_proj_weight is None
init_fn_(module.q_proj_weight)
init_fn_(module.k_proj_weight)
init_fn_(module.v_proj_weight)
# bias
if module.in_proj_bias is not None:
torch.nn.init.zeros_(module.in_proj_bias)
if module.bias_k is not None:
torch.nn.init.zeros_(module.bias_k)
if module.bias_v is not None:
torch.nn.init.zeros_(module.bias_v)
# out proj
init_fn_(module.out_proj.weight)
if init_div_is_residual is not False and getattr(module.out_proj, "_is_residual", False):
with torch.no_grad():
module.out_proj.weight.div_(div_is_residual)
if module.out_proj.bias is not None:
torch.nn.init.zeros_(module.out_proj.bias)
else:
for _ in module.parameters(recurse=False):
# raise error if uninitialized module has any parameters
raise NotImplementedError(f"{module.__class__.__name__} parameters are not initialized by param_init_fn.")
def xavier_normal_param_init_fn_(
module: nn.Module,
n_layers: int,
d_model: Optional[int] = None,
init_div_is_residual: Union[int, float, str, bool] = True,
emb_init_std: Optional[float] = None,
emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None,
init_gain: float = 0,
verbose: int = 0,
**kwargs,
):
xavier_normal_ = partial(torch.nn.init.xavier_normal_, gain=init_gain)
if verbose > 1:
warnings.warn(f"Using torch.nn.init.xavier_normal_ init fn with parameters: " + f"gain={init_gain}")
generic_param_init_fn_(
module=module,
init_fn_=xavier_normal_,
d_model=d_model,
n_layers=n_layers,
init_div_is_residual=init_div_is_residual,
emb_init_std=emb_init_std,
emb_init_uniform_lim=emb_init_uniform_lim,
verbose=verbose,
) | null |
7,824 | import torch
import torch.nn.functional as F
def _cast_if_autocast_enabled(tensor):
if torch.is_autocast_enabled():
if tensor.device.type == "cuda":
dtype = torch.get_autocast_gpu_dtype()
elif tensor.device.type == "cpu":
dtype = torch.get_autocast_cpu_dtype()
else:
raise NotImplementedError()
return tensor.to(dtype=dtype)
return tensor | null |
7,825 | import math
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from flash_attn.ops.layer_norm import layer_norm as fused_layer_norm
from flash_attn.ops.fused_dense import fused_mlp_func
from flash_attn.layers.rotary import apply_rotary_emb as fused_apply_rotary_emb
from transformers.activations import ACT2FN
from flash_attn import flash_attn_func
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from transformers import PersimmonConfig
The provided code snippet includes necessary dependencies for implementing the `_make_causal_mask` function. Write a Python function `def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0)` to solve the following problem:
Make causal mask used for bi-directional self-attention.
Here is the function:
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) | Make causal mask used for bi-directional self-attention. |
7,826 | import math
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from flash_attn.ops.layer_norm import layer_norm as fused_layer_norm
from flash_attn.ops.fused_dense import fused_mlp_func
from flash_attn.layers.rotary import apply_rotary_emb as fused_apply_rotary_emb
from transformers.activations import ACT2FN
from flash_attn import flash_attn_func
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from transformers import PersimmonConfig
The provided code snippet includes necessary dependencies for implementing the `_expand_mask` function. Write a Python function `def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None)` to solve the following problem:
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
Here is the function:
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) | Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. |
7,827 | import math
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from flash_attn.ops.layer_norm import layer_norm as fused_layer_norm
from flash_attn.ops.fused_dense import fused_mlp_func
from flash_attn.layers.rotary import apply_rotary_emb as fused_apply_rotary_emb
from transformers.activations import ACT2FN
from flash_attn import flash_attn_func
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
from transformers import PersimmonConfig
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
cos = cos[position_ids].unsqueeze(1) # [seq_len, dim] -> [batch_size, 1, seq_len, head_dim]
sin = sin[position_ids].unsqueeze(1)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed | null |
7,828 | import re
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from transformers.processing_utils import ProcessorMixin
from transformers.utils import TensorType, is_torch_available, logging, requires_backends
from transformers.tokenization_utils_base import TruncationStrategy, PaddingStrategy
The provided code snippet includes necessary dependencies for implementing the `full_unpacked_stream_to_tensor` function. Write a Python function `def full_unpacked_stream_to_tensor( all_bi_tokens_to_place: List[int], full_unpacked_stream: List["torch.Tensor"], fill_value: int, batch_size: int, new_seq_len: int, offset: int, ) -> "torch.Tensor"` to solve the following problem:
Takes an unpacked stream of tokens (i.e. a list of tensors, one for each item in the batch) and does the required padding to create a single tensor for the batch of shape batch_size x new_seq_len.
Here is the function:
def full_unpacked_stream_to_tensor(
all_bi_tokens_to_place: List[int],
full_unpacked_stream: List["torch.Tensor"],
fill_value: int,
batch_size: int,
new_seq_len: int,
offset: int,
) -> "torch.Tensor":
"""Takes an unpacked stream of tokens (i.e. a list of tensors, one for each item in the batch) and does
the required padding to create a single tensor for the batch of shape batch_size x new_seq_len.
"""
assert len(all_bi_tokens_to_place) == batch_size
assert len(full_unpacked_stream) == batch_size
# Create padded tensors for the full batch.
new_padded_tensor = torch.full(
[batch_size, new_seq_len],
fill_value=fill_value,
dtype=full_unpacked_stream[0].dtype,
device=full_unpacked_stream[0].device,
)
# Place each batch entry into the batch tensor.
for bi in range(batch_size):
tokens_to_place = all_bi_tokens_to_place[bi]
new_padded_tensor[bi, :tokens_to_place] = full_unpacked_stream[bi][offset : tokens_to_place + offset]
return new_padded_tensor | Takes an unpacked stream of tokens (i.e. a list of tensors, one for each item in the batch) and does the required padding to create a single tensor for the batch of shape batch_size x new_seq_len. |
7,829 | import re
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from transformers.processing_utils import ProcessorMixin
from transformers.utils import TensorType, is_torch_available, logging, requires_backends
from transformers.tokenization_utils_base import TruncationStrategy, PaddingStrategy
The provided code snippet includes necessary dependencies for implementing the `construct_full_unpacked_stream` function. Write a Python function `def construct_full_unpacked_stream( num_real_text_tokens: Union[List[List[int]], "torch.Tensor"], input_stream: "torch.Tensor", image_tokens: List[List["torch.Tensor"]], batch_size: int, num_sub_sequences: int, ) -> List["torch.Tensor"]` to solve the following problem:
Takes an input_stream tensor of shape B x S x ?. For each subsequence, adds any required padding to account for images and then unpacks the subsequences to create a single sequence per item in the batch. Returns a list of tensors, one for each item in the batch.
Here is the function:
def construct_full_unpacked_stream(
num_real_text_tokens: Union[List[List[int]], "torch.Tensor"],
input_stream: "torch.Tensor",
image_tokens: List[List["torch.Tensor"]],
batch_size: int,
num_sub_sequences: int,
) -> List["torch.Tensor"]:
"""Takes an input_stream tensor of shape B x S x ?. For each subsequence, adds any required
padding to account for images and then unpacks the subsequences to create a single sequence per item in the batch.
Returns a list of tensors, one for each item in the batch."""
all_bi_stream = []
for batch_index in range(batch_size):
all_si_stream = []
# First, construct full token stream (including image placeholder tokens) and loss mask for each subsequence
# and append to lists. We use lists rather than tensors because each subsequence is variable-sized.
# TODO Remove this logic in a subsequent release since subsequences are not supported.
image_adjustment = image_tokens[batch_index][0]
subsequence_stream = torch.cat([image_adjustment, input_stream[batch_index, 0]], dim=0)
num_real_tokens = image_adjustment.shape[0] + num_real_text_tokens[batch_index][0]
all_si_stream.append(subsequence_stream[:num_real_tokens])
all_bi_stream.append(torch.cat(all_si_stream, dim=0))
return all_bi_stream | Takes an input_stream tensor of shape B x S x ?. For each subsequence, adds any required padding to account for images and then unpacks the subsequences to create a single sequence per item in the batch. Returns a list of tensors, one for each item in the batch. |
7,830 | import re
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from transformers.processing_utils import ProcessorMixin
from transformers.utils import TensorType, is_torch_available, logging, requires_backends
from transformers.tokenization_utils_base import TruncationStrategy, PaddingStrategy
logger = logging.get_logger(__name__)
BEGINNING_OF_ANSWER_STRING = "<0x04>"
def _transform_coordinates_and_tokenize(prompt: str, scale_factor: float, tokenizer) -> List[int]:
"""
This function transforms the prompt in the following fashion:
- <box> <point> and </box> </point> to their respective token mappings
- extract the coordinates from the tag
- transform the coordinates into the transformed image space
- return the prompt tokens with the transformed coordinates and new tags
Bounding boxes and points MUST be in the following format: <box>y1, x1, y2, x2</box> <point>x, y</point> The spaces
and punctuation added above are NOT optional.
"""
# Make a namedtuple that stores "text" and "is_bbox"
# We want to do the following: Tokenize the code normally -> when we see a point or box, tokenize using the tokenize_within_tag function
# When point or box close tag, continue tokenizing normally
# First, we replace the point and box tags with their respective tokens
prompt = _replace_string_repr_with_token_tags(prompt)
# Tokenize the prompt
# Convert prompt into a list split
prompt_text_list = _segment_prompt_into_text_token_conversions(prompt)
transformed_prompt_tokens: List[int] = []
for elem in prompt_text_list:
if elem[1]:
# This is a location, we need to tokenize it
within_tag_tokenized = _transform_within_tags(elem[0], scale_factor, tokenizer)
# Surround the text with the open and close tags
transformed_prompt_tokens.extend(within_tag_tokenized)
else:
transformed_prompt_tokens.extend(tokenizer(elem[0], add_special_tokens=False).input_ids)
return transformed_prompt_tokens
The provided code snippet includes necessary dependencies for implementing the `_tokenize_prompts_with_image_and_batch` function. Write a Python function `def _tokenize_prompts_with_image_and_batch( tokenizer, prompts: List[List[str]], scale_factors: Optional[List[List["torch.Tensor"]]], max_tokens_to_generate: int, max_position_embeddings: int, add_BOS: bool, # Same issue with types as above add_beginning_of_answer_token: bool, ) -> Tuple["torch.Tensor", "torch.Tensor"]` to solve the following problem:
Given a set of prompts and number of tokens to generate: - tokenize prompts - set the sequence length to be the max of length of prompts plus the number of tokens we would like to generate - pad all the sequences to this length so we can convert them into a 3D tensor.
Here is the function:
def _tokenize_prompts_with_image_and_batch(
tokenizer,
prompts: List[List[str]],
scale_factors: Optional[List[List["torch.Tensor"]]],
max_tokens_to_generate: int,
max_position_embeddings: int,
add_BOS: bool, # Same issue with types as above
add_beginning_of_answer_token: bool,
) -> Tuple["torch.Tensor", "torch.Tensor"]:
"""
Given a set of prompts and number of tokens to generate:
- tokenize prompts
- set the sequence length to be the max of length of prompts plus the number of tokens we would like to generate
- pad all the sequences to this length so we can convert them into a 3D tensor.
"""
# If not tool use, tranform the coordinates while tokenizing
if scale_factors is not None:
transformed_prompt_tokens = []
for prompt_seq, scale_factor_seq in zip(prompts, scale_factors):
transformed_prompt_tokens.append([_transform_coordinates_and_tokenize(prompt, scale_factor.item(), tokenizer) for prompt, scale_factor in zip(prompt_seq, scale_factor_seq)])
else:
transformed_prompt_tokens = [[tokenizer.tokenize(prompt) for prompt in prompt_seq] for prompt_seq in prompts]
prompts_tokens = transformed_prompt_tokens
if add_BOS:
bos_token = tokenizer.vocab["<s>"]
else:
bos_token = tokenizer.vocab["|ENDOFTEXT|"]
prompts_tokens = [[[bos_token] + x for x in prompt_seq] for prompt_seq in prompts_tokens]
if add_beginning_of_answer_token:
boa = tokenizer.vocab[BEGINNING_OF_ANSWER_STRING]
# Only add bbox open token to the last subsequence since that is what will be completed
for token_seq in prompts_tokens:
token_seq[-1].append(boa)
# Now we have a list of list of tokens which each list has a different
# size. We want to extend this list to:
# - incorporate the tokens that need to be generated
# - make all the sequences equal length.
# Get the prompts length.
prompts_length = [[len(x) for x in prompts_tokens_seq] for prompts_tokens_seq in prompts_tokens]
# Get the max prompts length.
max_prompt_len: int = np.max(prompts_length)
# Number of tokens in the each sample of the batch.
samples_length = min(max_prompt_len + max_tokens_to_generate, max_position_embeddings)
if max_prompt_len + max_tokens_to_generate > max_position_embeddings:
logger.warning(
f"Max subsequence prompt length of {max_prompt_len} + max tokens to generate {max_tokens_to_generate}",
f"exceeds context length of {max_position_embeddings}. Will generate as many tokens as possible.",
)
# Now update the list of list to be of the same size: samples_length.
for prompt_tokens_seq, prompts_length_seq in zip(prompts_tokens, prompts_length):
for prompt_tokens, prompt_length in zip(prompt_tokens_seq, prompts_length_seq):
if len(prompt_tokens) > samples_length:
raise ValueError("Length of subsequence prompt exceeds sequence length.")
padding_size = samples_length - prompt_length
prompt_tokens.extend([tokenizer.vocab["|ENDOFTEXT|"]] * padding_size)
# Now we are in a structured format, we can convert to tensors.
prompts_tokens_tensor = torch.tensor(prompts_tokens, dtype=torch.int64)
prompts_length_tensor = torch.tensor(prompts_length, dtype=torch.int64)
return prompts_tokens_tensor, prompts_length_tensor | Given a set of prompts and number of tokens to generate: - tokenize prompts - set the sequence length to be the max of length of prompts plus the number of tokens we would like to generate - pad all the sequences to this length so we can convert them into a 3D tensor. |
7,831 | import re
import torch
The provided code snippet includes necessary dependencies for implementing the `rename_flamingo_checkpoint` function. Write a Python function `def rename_flamingo_checkpoint(old_ckpt: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]` to solve the following problem:
Rename some keys in the public flamingo checkpoint
Here is the function:
def rename_flamingo_checkpoint(old_ckpt: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
"""Rename some keys in the public flamingo checkpoint"""
perceiver_pattern1 = re.compile(r"perceiver\.layers\.[0-9]\.0")
perceiver_pattern2 = re.compile(r"perceiver\.layers\.[0-9]\.1")
new_ckpt = old_ckpt.copy()
for key, value in old_ckpt.items():
if re.match(perceiver_pattern1, key):
new_key = re.sub(r"([0-9])\.0", r"\1", key)
new_ckpt.pop(key)
new_ckpt[new_key] = value
elif re.match(perceiver_pattern2, key):
new_key = re.sub(r"([0-9])\.1", r"\1.feed_forward", key)
new_ckpt.pop(key)
new_ckpt[new_key] = value
elif key.startswith("lang_encoder.gated_cross_attn_layers."):
new_ckpt.pop(key)
elif key.startswith("lang_encoder.") and "ff_gate" not in key:
new_key = key.replace("ff", "feed_forward")
new_ckpt.pop(key)
new_ckpt[new_key] = value
return new_ckpt | Rename some keys in the public flamingo checkpoint |
7,832 | import random
from dataclasses import dataclass
from typing import Callable, Optional
import torch
import torch.nn as nn
from accelerate.hooks import AlignDevicesHook, add_hook_to_module
from einops import rearrange, repeat
from transformers import CLIPVisionModel, LlamaForCausalLM, LlamaTokenizer
from transformers.modeling_outputs import CausalLMOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.models.auto import AutoModel, AutoModelForCausalLM, AutoTokenizer
from .configuration_flamingo import FlamingoConfig
from ..falcon.modelling_RW import RWForCausalLM
from ..mpt.modeling_mpt import MPTForCausalLM
from ..mpt_redpajama.mosaic_gpt import MosaicGPT
import torch.distributed as dist
__KNOWN_DECODER_LAYERS_ATTR_NAMES = {
"opt": "model.decoder.layers",
"gptneo": "transformer.h",
"gptj": "transformer.h",
"gpt-j": "transformer.h",
"pythia": "gpt_neox.layers",
"llama": "model.layers",
"RWForCausalLM": "transformer.h",
"MPTForCausalLM": "transformer.blocks",
"MosaicGPT": "transformer.blocks",
}
def _infer_decoder_layers_attr_name(model: nn.Module):
for k in __KNOWN_DECODER_LAYERS_ATTR_NAMES:
if k.lower() in model.__class__.__name__.lower():
return __KNOWN_DECODER_LAYERS_ATTR_NAMES[k]
raise ValueError(f"We require the attribute name for the nn.ModuleList in the decoder storing the transformer block layers. Please supply this string manually.") | null |
7,833 | import random
from dataclasses import dataclass
from typing import Callable, Optional
import torch
import torch.nn as nn
from accelerate.hooks import AlignDevicesHook, add_hook_to_module
from einops import rearrange, repeat
from transformers import CLIPVisionModel, LlamaForCausalLM, LlamaTokenizer
from transformers.modeling_outputs import CausalLMOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.models.auto import AutoModel, AutoModelForCausalLM, AutoTokenizer
from .configuration_flamingo import FlamingoConfig
from ..falcon.modelling_RW import RWForCausalLM
from ..mpt.modeling_mpt import MPTForCausalLM
from ..mpt_redpajama.mosaic_gpt import MosaicGPT
import torch.distributed as dist
The provided code snippet includes necessary dependencies for implementing the `extend_instance` function. Write a Python function `def extend_instance(obj, mixin)` to solve the following problem:
Apply mixins to a class instance after creation
Here is the function:
def extend_instance(obj, mixin):
"""Apply mixins to a class instance after creation"""
base_cls = obj.__class__
base_cls_name = obj.__class__.__name__
obj.__class__ = type(base_cls_name, (mixin, base_cls), {}) # mixin needs to go first for our forward() logic to work | Apply mixins to a class instance after creation |
7,834 | import random
from dataclasses import dataclass
from typing import Callable, Optional
import torch
import torch.nn as nn
from accelerate.hooks import AlignDevicesHook, add_hook_to_module
from einops import rearrange, repeat
from transformers import CLIPVisionModel, LlamaForCausalLM, LlamaTokenizer
from transformers.modeling_outputs import CausalLMOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.models.auto import AutoModel, AutoModelForCausalLM, AutoTokenizer
from .configuration_flamingo import FlamingoConfig
from ..falcon.modelling_RW import RWForCausalLM
from ..mpt.modeling_mpt import MPTForCausalLM
from ..mpt_redpajama.mosaic_gpt import MosaicGPT
import torch.distributed as dist
def getattr_recursive(obj, att):
"""
Return nested attribute of obj
Example: getattr_recursive(obj, 'a.b.c') is equivalent to obj.a.b.c
"""
if att == "":
return obj
i = att.find(".")
if i < 0:
return getattr(obj, att)
else:
return getattr_recursive(getattr(obj, att[:i]), att[i + 1 :])
The provided code snippet includes necessary dependencies for implementing the `setattr_recursive` function. Write a Python function `def setattr_recursive(obj, att, val)` to solve the following problem:
Set nested attribute of obj Example: setattr_recursive(obj, 'a.b.c', val) is equivalent to obj.a.b.c = val
Here is the function:
def setattr_recursive(obj, att, val):
"""
Set nested attribute of obj
Example: setattr_recursive(obj, 'a.b.c', val) is equivalent to obj.a.b.c = val
"""
if "." in att:
obj = getattr_recursive(obj, ".".join(att.split(".")[:-1]))
setattr(obj, att.split(".")[-1], val) | Set nested attribute of obj Example: setattr_recursive(obj, 'a.b.c', val) is equivalent to obj.a.b.c = val |
7,835 | import random
from dataclasses import dataclass
from typing import Callable, Optional
import torch
import torch.nn as nn
from accelerate.hooks import AlignDevicesHook, add_hook_to_module
from einops import rearrange, repeat
from transformers import CLIPVisionModel, LlamaForCausalLM, LlamaTokenizer
from transformers.modeling_outputs import CausalLMOutputWithPast
from transformers.modeling_utils import PreTrainedModel
from transformers.models.auto import AutoModel, AutoModelForCausalLM, AutoTokenizer
from .configuration_flamingo import FlamingoConfig
from ..falcon.modelling_RW import RWForCausalLM
from ..mpt.modeling_mpt import MPTForCausalLM
from ..mpt_redpajama.mosaic_gpt import MosaicGPT
import torch.distributed as dist
def exists(val):
return val is not None | null |
7,836 | import re
import argparse
import os
import torch
import torch.nn as nn
from transformers import CLIPVisionModel, LlamaForCausalLM, LlamaTokenizer
import sys
from modeling_flamingo import FlamingoForConditionalGeneration
from configuration_flamingo import FlamingoConfig
class FlamingoForConditionalGeneration(FlamingoPreTrainedModel):
config_class = FlamingoConfig
def __init__(
self,
config: FlamingoConfig,
):
super().__init__(config)
# TODO: hardcode right because autoXXX is too slow
# vision_encoder = AutoModel.from_config(config.vision_config).vision_model
# lang_encoder = AutoModelForCausalLM.from_config(config.text_config)
# text_tokenizer = AutoTokenizer.from_pretrained(config.text_config._name_or_path)
### TODO: give "LlamaForCausalLM" as the name of text_config.architectures of Llama_based flamingo
# assert hasattr(config.text_config, "_name_or_path")
# if "llama" not in config.text_config._name_or_path.lower():
if config.text_config.architectures[0] == "MPTForCausalLM":
text_tokenizer = AutoTokenizer.from_pretrained("mosaicml/mpt-7b-instruct")
lang_encoder = MPTForCausalLM(config=config.text_config)
elif config.text_config.architectures[0] == "MosaicGPT":
text_tokenizer = AutoTokenizer.from_pretrained("mosaicml/mosaic-llama-redpajama-final-candidate")
lang_encoder = MosaicGPT(config=config.text_config)
elif config.text_config.architectures[0] == "RWForCausalLM":
text_tokenizer = AutoTokenizer.from_pretrained("PATH-TO-YOUR-FALCON")
lang_encoder = RWForCausalLM(config=config.text_config)
# TODO: what's the logic here?
elif config.text_config.architectures[0] == "LlamaForCausalLM":
text_tokenizer = AutoTokenizer.from_pretrained(config.text_config._name_or_path)
lang_encoder = LlamaForCausalLM(config=config.text_config)
else:
import pdb
pdb.set_trace()
# else:
# text_tokenizer = LlamaTokenizer.from_pretrained(config.text_config._name_or_path)
# lang_encoder = LlamaForCausalLM(config=config.text_config)
vision_encoder = CLIPVisionModel(config=config.vision_config)
text_tokenizer.add_special_tokens({"additional_special_tokens": ["<|endofchunk|>", "<image>"]})
if text_tokenizer.pad_token is None:
text_tokenizer.add_special_tokens({"pad_token": "<PAD>"})
self.text_tokenizer = text_tokenizer
self.eoc_token_id = text_tokenizer.encode("<|endofchunk|>")[-1]
self.media_token_id = text_tokenizer.encode("<image>")[-1]
extend_instance(lang_encoder, FlamingoLMMixin)
decoder_layers_attr_name = _infer_decoder_layers_attr_name(lang_encoder)
lang_encoder.set_decoder_layers_attr_name(decoder_layers_attr_name)
self.lang_encoder = lang_encoder
self.cross_attn_every_n_layers = config.cross_attn_every_n_layers if hasattr(config, "cross_attn_every_n_layers") else 4
self.use_media_placement_augmentation = config.use_media_placement_augmentation
vision_encoder.output_tokens = True
self.vision_encoder = vision_encoder
self.vis_dim = 1024
self.perceiver = FlamingoPerceiverResampler(dim=self.vis_dim)
self.lang_encoder.init_flamingo(
media_token_id=self.media_token_id,
vis_hidden_size=self.vis_dim,
cross_attn_every_n_layers=self.cross_attn_every_n_layers,
use_media_placement_augmentation=self.use_media_placement_augmentation,
)
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.lang_encoder.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
self.lang_encoder.set_input_embeddings(new_embeddings)
def get_output_embeddings(self) -> nn.Module:
return self.lang_encoder.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
self.lang_encoder.set_output_embeddings(new_embeddings)
def get_image_encoder(self) -> nn.Module:
return self.vision_encoder
def get_lang_encoder(self) -> nn.Module:
return self.lang_encoder
def init_weights(self):
# Freeze all parameters in self.model if train_vision_encoder is False or train_lang_encoder is False
if not ("train_full_model" in self.config.__dict__ and self.config.train_full_model is True):
for param in self.parameters():
param.requires_grad = False
# Freeze all parameters in vision encoder
if "train_vision_encoder" in self.config.__dict__ and self.config.train_vision_encoder is True:
master_print("Unfreeze vision encoder.")
for param in self.vision_encoder.parameters():
param.requires_grad = True
# Freeze all parameters in lang encoders except gated_cross_attn_layers
if "train_lang_encoder" in self.config.__dict__ and self.config.train_lang_encoder is True:
master_print("Unfreeze language decoder.")
for name, param in self.lang_encoder.named_parameters():
param.requires_grad = True
if "lora_config" in self.config.__dict__:
# Use another logic to unfreeze gated_cross_attn_layers and perceivers
master_print(f"LoRA trainable param: {(sum(param.numel() for name, param in self.lang_encoder.named_parameters() if 'lora' in name)) / 1e6:.3f} M")
for name, param in self.lang_encoder.named_parameters():
if "lora" in name:
param.requires_grad = True
# Freeze all parameters in lang encoders except gated_cross_attn_layers
for name, param in self.lang_encoder.named_parameters():
if "gated_cross_attn_layer" in name:
param.requires_grad = True
for name, param in self.named_parameters():
if "perceiver" in name:
param.requires_grad = True
# Unfreeze LM input and output embeddings
self.lang_encoder.get_input_embeddings().requires_grad_(True)
## MPTForCausalLM is tied word embedding
if "LlamaForCausalLM" in self.lang_encoder.__class__.__name__:
self.lang_encoder.lm_head.requires_grad_(True)
total_params = 0
for name, param in self.named_parameters():
if param.requires_grad:
total_params += param.numel()
master_print(f"{name}: {param.numel() / 1e6:.3f} M")
master_print(f"Total Trainable param: {(sum(p.numel() for p in self.parameters() if p.requires_grad)) / 1e9:.3f} B")
def forward(
self,
vision_x: torch.Tensor,
lang_x: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cached_vision_x: bool = False,
clear_conditioned_layers: bool = True,
past_key_values: Optional[torch.Tensor] = None,
use_cache: bool = False,
**kwargs,
) -> CausalLMOutputWithPast:
"""
Forward pass of Flamingo.
Args:
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W) with F=1
lang_x (torch.Tensor): Language input ids
shape (B, T_txt)
attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.
labels (torch.Tensor, optional): Labels. Defaults to None.
clear_conditioned_layers: if True, clear the conditioned layers
once the foward pass is completed. Set this to false if the
same set of images will be reused in another subsequent
forward pass.
past_key_values: pre-computed values to pass to language model.
See past_key_values documentation in Hugging Face
CausalLM models.
use_cache: whether to use cached key values. See use_cache
documentation in Hugging Face CausalLM models.
"""
assert (vision_x is not None) or use_cached_vision_x, "Must provide either vision_x or use_cached_vision_x to True."
if use_cached_vision_x:
# Case: use cached; vision_x should be cached and other
# vision-related inputs should not be provided.
assert vision_x is None, "Expect vision_x to be None when use_cached_vision_x is True."
assert self.lang_encoder.is_conditioned()
else:
# Case: do not use caching (i.e. this is a standard forward pass);
self._encode_vision_x(vision_x=vision_x)
output = self.lang_encoder(
input_ids=lang_x,
attention_mask=attention_mask,
labels=labels,
past_key_values=past_key_values,
use_cache=use_cache,
**kwargs,
)
if clear_conditioned_layers:
self.lang_encoder.clear_conditioned_layers()
return output
def _encode_vision_x(self, vision_x: torch.Tensor):
"""
Compute media tokens from vision input by passing it through vision encoder and conditioning language model.
Args:
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W)
Images in the same chunk are collated along T_img, and frames are collated along F
Currently only F=1 is supported (single-frame videos)
rearrange code based on https://github.com/dhansmair/flamingo-mini
"""
assert vision_x.ndim == 6, "vision_x should be of shape (b, T_img, F, C, H, W)"
b, T, F = vision_x.shape[:3]
# assert F == 1, "Only single frame supported"
vision_x = rearrange(vision_x, "b T F c h w -> (b T F) c h w")
with torch.no_grad():
vision_x = self.vision_encoder(vision_x)[0][:, 1:, :]
vision_x = rearrange(vision_x, "(b T F) v d -> b T F v d", b=b, T=T, F=F)
vision_x = self.perceiver(vision_x) # reshapes to (b, T, n, d)
for layer in self.lang_encoder._get_decoder_layers():
layer.condition_vis_x(vision_x)
def generate(
self,
vision_x: torch.Tensor,
lang_x: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
num_beams: int = 1,
max_new_tokens: Optional[int] = None,
temperature: float = 1.0,
top_k: int = 0,
top_p: float = 1.0,
no_repeat_ngram_size: int = 0,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], list[int]]] = None,
length_penalty: float = 1.0,
num_return_sequences: int = 1,
do_sample: bool = False,
early_stopping: bool = False,
**kwargs,
):
"""
Generate text conditioned on vision and language inputs.
Args:
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W)
images in the same chunk are collated along T_img, and frames are collated along F
currently only F=1 is supported (single-frame videos)
lang_x (torch.Tensor): Language input
shape (B, T_txt)
max_length (int, optional): Maximum length of the output. Defaults to None.
attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.
num_beams (int, optional): Number of beams. Defaults to 1.
max_new_tokens (int, optional): Maximum new tokens. Defaults to None.
temperature (float, optional): Temperature. Defaults to 1.0.
top_k (int, optional): Top k. Defaults to 0.
top_p (float, optional): Top p. Defaults to 1.0.
no_repeat_ngram_size (int, optional): No repeat ngram size. Defaults to 0.
length_penalty (float, optional): Length penalty. Defaults to 1.0.
num_return_sequences (int, optional): Number of return sequences. Defaults to 1.
do_sample (bool, optional): Do sample. Defaults to False.
early_stopping (bool, optional): Early stopping. Defaults to False.
Returns:
torch.Tensor: lang_x with generated tokens appended to it
"""
if hasattr(self, "_hf_hook"):
# add a hook to make sure that the output of lang_encoder is mapped to the same device as the lang_x
hook = AlignDevicesHook(
execution_device=lang_x.device,
io_same_device=True,
place_submodules=False,
)
add_hook_to_module(self.lang_encoder, hook)
if num_beams > 1:
vision_x = vision_x.repeat_interleave(num_beams, dim=0)
self._encode_vision_x(vision_x=vision_x)
output = self.lang_encoder.generate(
lang_x,
attention_mask=attention_mask,
eos_token_id=self.eoc_token_id,
num_beams=num_beams,
max_new_tokens=max_new_tokens,
temperature=temperature,
top_k=top_k,
top_p=top_p,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
no_repeat_ngram_size=no_repeat_ngram_size,
length_penalty=length_penalty,
num_return_sequences=num_return_sequences,
do_sample=do_sample,
early_stopping=early_stopping,
**kwargs,
)
self.lang_encoder.clear_conditioned_layers()
return output
def dump_hf_model(pretrained_model_path: str, old_ckpt_path: str, new_folder_path: str) -> None:
old_ckpt = torch.load(old_ckpt_path, map_location="cpu")
if old_ckpt.get("model_state_dict", None) is not None:
old_ckpt = old_ckpt["model_state_dict"]
new_ckpt = old_ckpt
folder_path = os.path.dirname(old_ckpt_path)
# config_path = os.path.join(folder_path, "config.json") if os.path.exists(os.path.join(folder_path, "config.json")) else "flamingo/config.json"
model = FlamingoForConditionalGeneration.from_pretrained(
args.pretrained_model_path,
device_map="auto",
)
_ = model.load_state_dict(new_ckpt, strict=False)
print(f"Saving HF model to {new_folder_path}")
model.save_pretrained(new_folder_path) | null |
7,837 | import re
import argparse
import os
import torch
import torch.nn as nn
from transformers import CLIPVisionModel, LlamaForCausalLM, LlamaTokenizer
import sys
from ..configuration_flamingo import FlamingoConfig
from ..modeling_flamingo import FlamingoForConditionalGeneration
class FlamingoForConditionalGeneration(FlamingoPreTrainedModel):
config_class = FlamingoConfig
def __init__(
self,
config: FlamingoConfig,
):
super().__init__(config)
# TODO: hardcode right because autoXXX is too slow
# vision_encoder = AutoModel.from_config(config.vision_config).vision_model
# lang_encoder = AutoModelForCausalLM.from_config(config.text_config)
# text_tokenizer = AutoTokenizer.from_pretrained(config.text_config._name_or_path)
### TODO: give "LlamaForCausalLM" as the name of text_config.architectures of Llama_based flamingo
# assert hasattr(config.text_config, "_name_or_path")
# if "llama" not in config.text_config._name_or_path.lower():
if config.text_config.architectures[0] == "MPTForCausalLM":
text_tokenizer = AutoTokenizer.from_pretrained("mosaicml/mpt-7b-instruct")
lang_encoder = MPTForCausalLM(config=config.text_config)
elif config.text_config.architectures[0] == "MosaicGPT":
text_tokenizer = AutoTokenizer.from_pretrained("mosaicml/mosaic-llama-redpajama-final-candidate")
lang_encoder = MosaicGPT(config=config.text_config)
elif config.text_config.architectures[0] == "RWForCausalLM":
text_tokenizer = AutoTokenizer.from_pretrained("PATH-TO-YOUR-FALCON")
lang_encoder = RWForCausalLM(config=config.text_config)
# TODO: what's the logic here?
elif config.text_config.architectures[0] == "LlamaForCausalLM":
text_tokenizer = AutoTokenizer.from_pretrained(config.text_config._name_or_path)
lang_encoder = LlamaForCausalLM(config=config.text_config)
else:
import pdb
pdb.set_trace()
# else:
# text_tokenizer = LlamaTokenizer.from_pretrained(config.text_config._name_or_path)
# lang_encoder = LlamaForCausalLM(config=config.text_config)
vision_encoder = CLIPVisionModel(config=config.vision_config)
text_tokenizer.add_special_tokens({"additional_special_tokens": ["<|endofchunk|>", "<image>"]})
if text_tokenizer.pad_token is None:
text_tokenizer.add_special_tokens({"pad_token": "<PAD>"})
self.text_tokenizer = text_tokenizer
self.eoc_token_id = text_tokenizer.encode("<|endofchunk|>")[-1]
self.media_token_id = text_tokenizer.encode("<image>")[-1]
extend_instance(lang_encoder, FlamingoLMMixin)
decoder_layers_attr_name = _infer_decoder_layers_attr_name(lang_encoder)
lang_encoder.set_decoder_layers_attr_name(decoder_layers_attr_name)
self.lang_encoder = lang_encoder
self.cross_attn_every_n_layers = config.cross_attn_every_n_layers if hasattr(config, "cross_attn_every_n_layers") else 4
self.use_media_placement_augmentation = config.use_media_placement_augmentation
vision_encoder.output_tokens = True
self.vision_encoder = vision_encoder
self.vis_dim = 1024
self.perceiver = FlamingoPerceiverResampler(dim=self.vis_dim)
self.lang_encoder.init_flamingo(
media_token_id=self.media_token_id,
vis_hidden_size=self.vis_dim,
cross_attn_every_n_layers=self.cross_attn_every_n_layers,
use_media_placement_augmentation=self.use_media_placement_augmentation,
)
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.lang_encoder.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
self.lang_encoder.set_input_embeddings(new_embeddings)
def get_output_embeddings(self) -> nn.Module:
return self.lang_encoder.get_output_embeddings()
def set_output_embeddings(self, new_embeddings):
self.lang_encoder.set_output_embeddings(new_embeddings)
def get_image_encoder(self) -> nn.Module:
return self.vision_encoder
def get_lang_encoder(self) -> nn.Module:
return self.lang_encoder
def init_weights(self):
# Freeze all parameters in self.model if train_vision_encoder is False or train_lang_encoder is False
if not ("train_full_model" in self.config.__dict__ and self.config.train_full_model is True):
for param in self.parameters():
param.requires_grad = False
# Freeze all parameters in vision encoder
if "train_vision_encoder" in self.config.__dict__ and self.config.train_vision_encoder is True:
master_print("Unfreeze vision encoder.")
for param in self.vision_encoder.parameters():
param.requires_grad = True
# Freeze all parameters in lang encoders except gated_cross_attn_layers
if "train_lang_encoder" in self.config.__dict__ and self.config.train_lang_encoder is True:
master_print("Unfreeze language decoder.")
for name, param in self.lang_encoder.named_parameters():
param.requires_grad = True
if "lora_config" in self.config.__dict__:
# Use another logic to unfreeze gated_cross_attn_layers and perceivers
master_print(f"LoRA trainable param: {(sum(param.numel() for name, param in self.lang_encoder.named_parameters() if 'lora' in name)) / 1e6:.3f} M")
for name, param in self.lang_encoder.named_parameters():
if "lora" in name:
param.requires_grad = True
# Freeze all parameters in lang encoders except gated_cross_attn_layers
for name, param in self.lang_encoder.named_parameters():
if "gated_cross_attn_layer" in name:
param.requires_grad = True
for name, param in self.named_parameters():
if "perceiver" in name:
param.requires_grad = True
# Unfreeze LM input and output embeddings
self.lang_encoder.get_input_embeddings().requires_grad_(True)
## MPTForCausalLM is tied word embedding
if "LlamaForCausalLM" in self.lang_encoder.__class__.__name__:
self.lang_encoder.lm_head.requires_grad_(True)
total_params = 0
for name, param in self.named_parameters():
if param.requires_grad:
total_params += param.numel()
master_print(f"{name}: {param.numel() / 1e6:.3f} M")
master_print(f"Total Trainable param: {(sum(p.numel() for p in self.parameters() if p.requires_grad)) / 1e9:.3f} B")
def forward(
self,
vision_x: torch.Tensor,
lang_x: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cached_vision_x: bool = False,
clear_conditioned_layers: bool = True,
past_key_values: Optional[torch.Tensor] = None,
use_cache: bool = False,
**kwargs,
) -> CausalLMOutputWithPast:
"""
Forward pass of Flamingo.
Args:
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W) with F=1
lang_x (torch.Tensor): Language input ids
shape (B, T_txt)
attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.
labels (torch.Tensor, optional): Labels. Defaults to None.
clear_conditioned_layers: if True, clear the conditioned layers
once the foward pass is completed. Set this to false if the
same set of images will be reused in another subsequent
forward pass.
past_key_values: pre-computed values to pass to language model.
See past_key_values documentation in Hugging Face
CausalLM models.
use_cache: whether to use cached key values. See use_cache
documentation in Hugging Face CausalLM models.
"""
assert (vision_x is not None) or use_cached_vision_x, "Must provide either vision_x or use_cached_vision_x to True."
if use_cached_vision_x:
# Case: use cached; vision_x should be cached and other
# vision-related inputs should not be provided.
assert vision_x is None, "Expect vision_x to be None when use_cached_vision_x is True."
assert self.lang_encoder.is_conditioned()
else:
# Case: do not use caching (i.e. this is a standard forward pass);
self._encode_vision_x(vision_x=vision_x)
output = self.lang_encoder(
input_ids=lang_x,
attention_mask=attention_mask,
labels=labels,
past_key_values=past_key_values,
use_cache=use_cache,
**kwargs,
)
if clear_conditioned_layers:
self.lang_encoder.clear_conditioned_layers()
return output
def _encode_vision_x(self, vision_x: torch.Tensor):
"""
Compute media tokens from vision input by passing it through vision encoder and conditioning language model.
Args:
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W)
Images in the same chunk are collated along T_img, and frames are collated along F
Currently only F=1 is supported (single-frame videos)
rearrange code based on https://github.com/dhansmair/flamingo-mini
"""
assert vision_x.ndim == 6, "vision_x should be of shape (b, T_img, F, C, H, W)"
b, T, F = vision_x.shape[:3]
# assert F == 1, "Only single frame supported"
vision_x = rearrange(vision_x, "b T F c h w -> (b T F) c h w")
with torch.no_grad():
vision_x = self.vision_encoder(vision_x)[0][:, 1:, :]
vision_x = rearrange(vision_x, "(b T F) v d -> b T F v d", b=b, T=T, F=F)
vision_x = self.perceiver(vision_x) # reshapes to (b, T, n, d)
for layer in self.lang_encoder._get_decoder_layers():
layer.condition_vis_x(vision_x)
def generate(
self,
vision_x: torch.Tensor,
lang_x: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
num_beams: int = 1,
max_new_tokens: Optional[int] = None,
temperature: float = 1.0,
top_k: int = 0,
top_p: float = 1.0,
no_repeat_ngram_size: int = 0,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], list[int]]] = None,
length_penalty: float = 1.0,
num_return_sequences: int = 1,
do_sample: bool = False,
early_stopping: bool = False,
**kwargs,
):
"""
Generate text conditioned on vision and language inputs.
Args:
vision_x (torch.Tensor): Vision input
shape (B, T_img, F, C, H, W)
images in the same chunk are collated along T_img, and frames are collated along F
currently only F=1 is supported (single-frame videos)
lang_x (torch.Tensor): Language input
shape (B, T_txt)
max_length (int, optional): Maximum length of the output. Defaults to None.
attention_mask (torch.Tensor, optional): Attention mask. Defaults to None.
num_beams (int, optional): Number of beams. Defaults to 1.
max_new_tokens (int, optional): Maximum new tokens. Defaults to None.
temperature (float, optional): Temperature. Defaults to 1.0.
top_k (int, optional): Top k. Defaults to 0.
top_p (float, optional): Top p. Defaults to 1.0.
no_repeat_ngram_size (int, optional): No repeat ngram size. Defaults to 0.
length_penalty (float, optional): Length penalty. Defaults to 1.0.
num_return_sequences (int, optional): Number of return sequences. Defaults to 1.
do_sample (bool, optional): Do sample. Defaults to False.
early_stopping (bool, optional): Early stopping. Defaults to False.
Returns:
torch.Tensor: lang_x with generated tokens appended to it
"""
if hasattr(self, "_hf_hook"):
# add a hook to make sure that the output of lang_encoder is mapped to the same device as the lang_x
hook = AlignDevicesHook(
execution_device=lang_x.device,
io_same_device=True,
place_submodules=False,
)
add_hook_to_module(self.lang_encoder, hook)
if num_beams > 1:
vision_x = vision_x.repeat_interleave(num_beams, dim=0)
self._encode_vision_x(vision_x=vision_x)
output = self.lang_encoder.generate(
lang_x,
attention_mask=attention_mask,
eos_token_id=self.eoc_token_id,
num_beams=num_beams,
max_new_tokens=max_new_tokens,
temperature=temperature,
top_k=top_k,
top_p=top_p,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
no_repeat_ngram_size=no_repeat_ngram_size,
length_penalty=length_penalty,
num_return_sequences=num_return_sequences,
do_sample=do_sample,
early_stopping=early_stopping,
**kwargs,
)
self.lang_encoder.clear_conditioned_layers()
return output
def dump_hf_model(pretrained_model_path: str, old_ckpt_path: str, new_folder_path: str) -> None:
old_ckpt = torch.load(old_ckpt_path, map_location="cpu")
if old_ckpt.get("model_state_dict", None) is not None:
old_ckpt = old_ckpt["model_state_dict"]
new_ckpt = old_ckpt
folder_path = os.path.dirname(old_ckpt_path)
# config_path = os.path.join(folder_path, "config.json") if os.path.exists(os.path.join(folder_path, "config.json")) else "flamingo/config.json"
model = FlamingoForConditionalGeneration.from_pretrained(
args.pretrained_model_path,
device_map="auto",
)
_ = model.load_state_dict(new_ckpt, strict=False)
print(f"Saving HF model to {new_folder_path}")
model.save_pretrained(new_folder_path) | null |
7,838 | from abc import ABC, abstractmethod
from typing import List, Dict, Any, Tuple
import importlib
AVAILABLE_DATASETS: List[str] = [
"change.SpotTheDifference",
"change.CocoGeneralDifference",
"video.DenseCaptions",
"video.TVCaptions",
"video.VisualStoryTelling",
"3d.SceneNavigation",
"fpv.EGO4D",
"2d.Llava",
]
class AbstractDataset(ABC):
def __init__(self, name: str, short_name: str, image_path: str, num_threads: int):
"""
Initialize an AbstractDataset object.
Args:
name (str): The name of the dataset.
short_name (str): The short name of the dataset.
image_path (str): The path to the images of the dataset.
num_threads (int): The number of threads to use for processing the images.
"""
self.name: str = name
self.short_name: str = short_name
self.images: Dict[str, bytes] = self._load_images(image_path, num_threads)
def _load_images(self, image_path: str, num_thread: int) -> dict[str, bytes]:
"""
Load the images from the videos or albums.
Args:
image_path (str): The path storing the videos or albums.
num_thread (int): The number of threads to use for loading the images.
Returns:
Dict[str, bytes]: A dictionary of images, where the keys are the IDs of the images.
"""
pass
def __getitem__(self, key: str) -> Dict[str, Any]:
"""
Get the item at the given key as a dictionary.
Args:
key (str): The key of the item to retrieve.
Returns:
Dict[str, Any]: The item at the given key.
"""
return self.images[key]
def __iter__(self) -> "AbstractDataset":
"""
Return the iterator object for the dataset.
Returns:
AbstractDataset: The iterator object.
"""
self.keys = iter(self.images.keys())
return self
def __next__(self) -> Tuple[str, bytes]:
"""
Return the next item in the iteration.
Returns:
Tuple[str, bytes]: The next item as a tuple of key and image.
Raises:
StopIteration: If there are no more items in the iteration.
"""
try:
key = next(self.keys)
image = self.images[key]
return key, image
except StopIteration:
raise StopIteration
def __len__(self) -> int:
"""
Return the length of the dataset.
Returns:
int: The length of the dataset.
"""
return len(self.query_inputs)
def __str__(self) -> str:
"""
Return a string representation of the dataset.
Returns:
str: The string representation of the dataset.
"""
return f"{self.name} dataset"
The provided code snippet includes necessary dependencies for implementing the `get_dataset_by_path` function. Write a Python function `def get_dataset_by_path(path: str, dataset_args: dict[str, str]) -> AbstractDataset` to solve the following problem:
Get an instance of a dataset class based on the given path. Args: path (str): The path to the dataset class in the format "<module>.<class>". dataset_args (Dict[str, str]): Additional arguments to pass to the dataset class constructor. Returns: AbstractDataset: An instance of the dataset class. Raises: AssertionError: If the given path is not an available dataset.
Here is the function:
def get_dataset_by_path(path: str, dataset_args: dict[str, str]) -> AbstractDataset:
"""
Get an instance of a dataset class based on the given path.
Args:
path (str): The path to the dataset class in the format "<module>.<class>".
dataset_args (Dict[str, str]): Additional arguments to pass to the dataset class constructor.
Returns:
AbstractDataset: An instance of the dataset class.
Raises:
AssertionError: If the given path is not an available dataset.
"""
assert path in AVAILABLE_DATASETS, f"{path} is not an available dataset."
module_path, dataset_name = path.split(".")
module_path = "datasets." + module_path
# Import the module and load the class
imported_module = importlib.import_module(module_path)
dataset_class = getattr(imported_module, dataset_name)
# Instantiate the class and return the instance
return dataset_class(**dataset_args) | Get an instance of a dataset class based on the given path. Args: path (str): The path to the dataset class in the format "<module>.<class>". dataset_args (Dict[str, str]): Additional arguments to pass to the dataset class constructor. Returns: AbstractDataset: An instance of the dataset class. Raises: AssertionError: If the given path is not an available dataset. |
7,839 | from abc import ABC, abstractmethod
from typing import List, Dict, Any, Tuple
import importlib
AVAILABLE_DATASETS: List[str] = [
"change.SpotTheDifference",
"change.CocoGeneralDifference",
"video.DenseCaptions",
"video.TVCaptions",
"video.VisualStoryTelling",
"3d.SceneNavigation",
"fpv.EGO4D",
"2d.Llava",
]
The provided code snippet includes necessary dependencies for implementing the `get_available_datasets` function. Write a Python function `def get_available_datasets() -> List[str]` to solve the following problem:
Get a list of available dataset paths. Returns: List[str]: A list of available dataset paths.
Here is the function:
def get_available_datasets() -> List[str]:
"""
Get a list of available dataset paths.
Returns:
List[str]: A list of available dataset paths.
"""
return AVAILABLE_DATASETS | Get a list of available dataset paths. Returns: List[str]: A list of available dataset paths. |
7,840 | import base64
import os
from concurrent.futures import ThreadPoolExecutor
from io import BytesIO
from typing import Generator, Tuple
import cv2
from PIL import Image
from tqdm import tqdm
def get_image_id(image_name: str, dataset_name: str) -> str:
"""
Extracts the image identifier from a given image name.
Args:
image_name (str): The name of the image.
dataset_name (str): The name of the dataset.
Returns:
str: The image identifier.
"""
return f"{dataset_name}_IMG_{get_image_name(image_name)}"
def process_image(image: bytes, target_size=(224, 224)) -> bytes:
"""
Processes the input image by resizing it, converting it to RGB mode, and save as byte string.
Args:
image (bytes): The input image to be processed.
Returns:
bytes: The processed image as a byte string.
"""
with Image.open(BytesIO(image)) as img:
if img.size != target_size:
resized_img = img.resize(target_size, Image.LANCZOS)
img.close()
img = resized_img
if img.mode != "RGB":
converted_img = img.convert("RGB")
img.close()
img = converted_img
processed_image = image_to_bytes(img)
return processed_image
def get_b64_data(image: bytes) -> str:
"""
Converts an image to a base64 encoded string.
Args:
image (bytes): the image to be converted.
Returns:
str: the base64 encoded string representation of the image.
"""
return base64.b64encode(image).decode("utf-8")
The provided code snippet includes necessary dependencies for implementing the `get_json_data_generator` function. Write a Python function `def get_json_data_generator(images: dict[str, bytes], dataset_name: str, num_threads: int) -> Generator[Tuple[str, str], None, None]` to solve the following problem:
Converts a dictionary of images to a JSON-compatible dictionary with base64 encoded strings. This generator function will yield the processed image data one at a time, allowing you to write the results to a file without needing to store the entire dictionary in memory. Args: images (Dict[str, bytes]): A dictionary of images, where the keys are image identifiers and the values are byte strings. dataset_name (str): The name of the dataset. num_threads (int): The number of threads to use for processing the images. Returns: Dict[str, str]: A dictionary where the keys are formatted as "{dataset_name}_IMG_{key}" and the values are base64 encoded string representations of the processed images.
Here is the function:
def get_json_data_generator(images: dict[str, bytes], dataset_name: str, num_threads: int) -> Generator[Tuple[str, str], None, None]:
"""
Converts a dictionary of images to a JSON-compatible dictionary with base64 encoded strings.
This generator function will yield the processed image data one at a time, allowing you to write the results to a file without needing to store the entire dictionary in memory.
Args:
images (Dict[str, bytes]): A dictionary of images, where the keys are image identifiers and the values are byte strings.
dataset_name (str): The name of the dataset.
num_threads (int): The number of threads to use for processing the images.
Returns:
Dict[str, str]: A dictionary where the keys are formatted as "{dataset_name}_IMG_{key}" and the values are base64 encoded string representations of the processed images.
"""
with ThreadPoolExecutor(max_workers=num_threads) as executor:
process_bar = tqdm(total=len(images), desc="Processing images", unit="image")
def process_image_wrapper(args):
key, img = args
new_key = get_image_id(key, dataset_name)
result = get_b64_data(process_image(img))
process_bar.update()
return new_key, result
for result in executor.map(process_image_wrapper, images.items()):
yield result
process_bar.close() | Converts a dictionary of images to a JSON-compatible dictionary with base64 encoded strings. This generator function will yield the processed image data one at a time, allowing you to write the results to a file without needing to store the entire dictionary in memory. Args: images (Dict[str, bytes]): A dictionary of images, where the keys are image identifiers and the values are byte strings. dataset_name (str): The name of the dataset. num_threads (int): The number of threads to use for processing the images. Returns: Dict[str, str]: A dictionary where the keys are formatted as "{dataset_name}_IMG_{key}" and the values are base64 encoded string representations of the processed images. |
7,841 | import base64
import os
from concurrent.futures import ThreadPoolExecutor
from io import BytesIO
from typing import Generator, Tuple
import cv2
from PIL import Image
from tqdm import tqdm
def process_image(image: bytes, target_size=(224, 224)) -> bytes:
"""
Processes the input image by resizing it, converting it to RGB mode, and save as byte string.
Args:
image (bytes): The input image to be processed.
Returns:
bytes: The processed image as a byte string.
"""
with Image.open(BytesIO(image)) as img:
if img.size != target_size:
resized_img = img.resize(target_size, Image.LANCZOS)
img.close()
img = resized_img
if img.mode != "RGB":
converted_img = img.convert("RGB")
img.close()
img = converted_img
processed_image = image_to_bytes(img)
return processed_image
The provided code snippet includes necessary dependencies for implementing the `frame_video` function. Write a Python function `def frame_video(video_file: str, fps: int = 1) -> list[bytes]` to solve the following problem:
Extracts frames from a video file at a specified frame rate and returns them as base64 encoded strings. Args: video_file (str): The path to the video file. fps (int): The frame rate at which frames should be extracted. Defaults to 1 frame per second. Returns: List[bytes]: A list of byte strings representing the extracted frames.
Here is the function:
def frame_video(video_file: str, fps: int = 1) -> list[bytes]:
"""
Extracts frames from a video file at a specified frame rate and returns them as base64 encoded strings.
Args:
video_file (str): The path to the video file.
fps (int): The frame rate at which frames should be extracted. Defaults to 1 frame per second.
Returns:
List[bytes]: A list of byte strings representing the extracted frames.
"""
if not os.path.exists(video_file):
raise FileNotFoundError(f"Video file {video_file} does not exist.")
cap = cv2.VideoCapture(video_file)
video_fps = int(cap.get(cv2.CAP_PROP_FPS))
frame_count = 0
saved_frame_count = 0
frames = []
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
if frame_count % (video_fps // fps) == 0:
# Check if the frame resolution is not 224x224 and resize if necessary
if frame.shape[0] != 224 or frame.shape[1] != 224:
frame = cv2.resize(frame, (224, 224))
success, buffer = cv2.imencode(".png", frame)
if not success:
print(f"Failed to encode frame {frame_count} of video {video_file}.")
frames.append(process_image(buffer))
saved_frame_count += 1
del buffer
frame_count += 1
del frame
cap.release()
return frames | Extracts frames from a video file at a specified frame rate and returns them as base64 encoded strings. Args: video_file (str): The path to the video file. fps (int): The frame rate at which frames should be extracted. Defaults to 1 frame per second. Returns: List[bytes]: A list of byte strings representing the extracted frames. |
7,842 | import json
import requests
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor
from image_utils import resize_image, create_folder
def download_single_image(image: dict[str]) -> tuple[str, bytes]:
"""
Download a single image and resize it.
Args:
image: A dictionary containing image information.
Returns:
A tuple containing the image ID and the resized image as bytes.
"""
url = get_url(image)
id = image["id"]
try:
pic = requests.get(url)
return id, resize_image(pic.content)
except:
return id, None
def create_folder(folder_name: str):
"""
Creates a folder if it does not already exist.
Args:
folder_name (str): The name of the folder to create.
"""
if not os.path.exists(folder_name):
os.makedirs(folder_name)
The provided code snippet includes necessary dependencies for implementing the `download` function. Write a Python function `def download(images: list[dict[str]], num_threads: int)` to solve the following problem:
Download multiple images concurrently using thread pooling. Args: images: A list of dictionaries, each containing image information. num_threads: The number of threads to use for concurrent downloading. Returns: A dictionary mapping image IDs to their corresponding resized images as bytes.
Here is the function:
def download(images: list[dict[str]], num_threads: int):
"""
Download multiple images concurrently using thread pooling.
Args:
images: A list of dictionaries, each containing image information.
num_threads: The number of threads to use for concurrent downloading.
Returns:
A dictionary mapping image IDs to their corresponding resized images as bytes.
"""
output = {}
process_bar = tqdm(total=len(images), unit="image", desc="Downloading images")
expired_images = []
with ThreadPoolExecutor(max_workers=num_threads) as executor:
for id, image in executor.map(download_single_image, images):
if image is not None:
output[id] = image
else:
expired_images.append(id)
process_bar.update(1)
process_bar.close()
create_folder("output")
with open("output/expired_images.json", "w") as f:
json.dump(expired_images, f, indent=4)
return output | Download multiple images concurrently using thread pooling. Args: images: A list of dictionaries, each containing image information. num_threads: The number of threads to use for concurrent downloading. Returns: A dictionary mapping image IDs to their corresponding resized images as bytes. |
7,843 | import os
from glob import glob
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor
from image_utils import process_image
def process(cur_dir, img_root):
"""
Process images in a directory.
Args:
cur_dir (str): The name of the current directory.
img_root (str): The root directory of the images.
Returns:
dict: A dictionary containing processed images. The keys are unique identifiers
for each image, and the values are the processed images.
"""
root = os.path.join(img_root, cur_dir, "color")
file_list = os.listdir(root)
images = {}
for cur_file in file_list:
file_name = os.path.join(img_root, cur_dir, "color", cur_file)
with open(file_name, "rb") as f:
img = f.read()
image_id = f"{cur_dir}_color_{cur_file[:-4]}"
images[image_id] = process_image(img)
return images
The provided code snippet includes necessary dependencies for implementing the `process_data` function. Write a Python function `def process_data(img_root: str, num_threads: int)` to solve the following problem:
Process images in parallel using multiple threads. Args: img_root (str): The root directory of the images. num_threads (int): The number of threads to use for parallel processing. Returns: dict: A dictionary containing processed images. The keys are unique identifiers for each image, and the values are the processed images.
Here is the function:
def process_data(img_root: str, num_threads: int):
"""
Process images in parallel using multiple threads.
Args:
img_root (str): The root directory of the images.
num_threads (int): The number of threads to use for parallel processing.
Returns:
dict: A dictionary containing processed images. The keys are unique identifiers
for each image, and the values are the processed images.
"""
keys_dir = glob(os.path.join(img_root, "scene*_00"))
keys = list(map(os.path.basename, keys_dir))
all_images = {}
process_bar = tqdm(total=len(keys), unit="image", desc="Loading images")
with ThreadPoolExecutor(max_workers=num_threads) as executor:
for images in executor.map(process, keys, [img_root] * len(keys)):
all_images.update(images)
process_bar.update()
process_bar.close()
return all_images | Process images in parallel using multiple threads. Args: img_root (str): The root directory of the images. num_threads (int): The number of threads to use for parallel processing. Returns: dict: A dictionary containing processed images. The keys are unique identifiers for each image, and the values are the processed images. |
7,844 | from abc import ABC, abstractmethod
from typing import List, Dict, Any, Union
import importlib
import json
AVAILABLE_DATASETS: List[str] = [
"change.SpotTheDifference",
"change.CocoSpotTheDifference",
"video.DenseCaptions",
"video.TVCaptions",
"video.VisualStoryTelling",
"3d.SceneNavigation",
"funqa.FunQA_translation",
"funqa.FunQA_mcqa",
"funqa.FunQA_dia",
"fpv.EGO4D",
"translate.Translation",
]
class AbstractDataset(ABC):
def __init__(self, name: str, prompt_path: str, query_inputs_path: str):
"""Constructor."""
self.name: str = name
self.prompt: Dict[str, Union[str, List[Dict[str, Union[str, List[Dict[str, str]]]]]]] = self._load_prompt(prompt_path)
self.query_inputs: List[str] = self._load_query_inputs(query_inputs_path)
def _load_prompt(self, path: str) -> Dict[str, Union[str, List[Dict[str, Union[str, List[Dict[str, str]]]]]]]:
with open(path, "r") as f:
json_data: Dict[str, Any] = json.load(f)
in_context: List[Dict[str, Union[str, List[Dict[str, str]]]]] = json_data["in_context"].copy()
for n, conv in enumerate(json_data["in_context"]):
role, content = conv["role"], conv["content"]
# we need to convert the QA pair into a string
if role == "assistant":
content_string = ""
if isinstance(content, str):
content_string = content
else:
for qa_pair in content:
for prefix, text in qa_pair.items():
content_string += prefix + ": " + text + "\n"
elif role == "user":
content_string = content
else:
raise ValueError("wrong role. Only user and assistant are allowed.")
in_context[n] = {"role": role, "content": content_string}
results: Dict[str, Union[str, List[Dict[str, Union[str, List[Dict[str, str]]]]]]] = {
"system_message": json_data["system_message"],
"in_context": in_context,
}
return results
def _load_query_inputs(self, path: str) -> List[str]:
"""
Load the query_inputs from the given path.
"""
pass
def __getitem__(self, index: int) -> Dict[str, Any]:
"""
Return the item at the given index as a dictionary.
"""
return self.data[index]
def __iter__(self) -> "AbstractDataset":
self.index = 0
return self
def __next__(
self,
) -> Dict[str, Union[str, List[Dict[str, Union[str, List[Dict[str, str]]]]]]]:
if self.index < len(self.query_inputs):
outputs = {
"system_messages": self.prompt["system_message"],
"in_context": self.prompt["in_context"],
"query_input": self.query_inputs[self.index],
}
self.index += 1
return outputs
raise StopIteration
def __len__(self) -> int:
return len(self.query_inputs)
def __str__(self) -> str:
return f"{self.name} dataset"
def get_dataset_by_path(path: str, dataset_args: dict[str, str]) -> AbstractDataset:
assert path in AVAILABLE_DATASETS, f"{path} is not an available dataset."
module_path, dataset_name = path.split(".")
module_path = "datasets." + module_path
# Import the module and load the class
imported_module = importlib.import_module(module_path)
dataset_class = getattr(imported_module, dataset_name)
# TODO:remove later, Print the imported class for debugging
print(f"Imported class: {dataset_class}")
# Instantiate the class and return the instance
return dataset_class(**dataset_args) | null |
7,845 | from abc import ABC, abstractmethod
from typing import List, Dict, Any, Union
import importlib
import json
AVAILABLE_DATASETS: List[str] = [
"change.SpotTheDifference",
"change.CocoSpotTheDifference",
"video.DenseCaptions",
"video.TVCaptions",
"video.VisualStoryTelling",
"3d.SceneNavigation",
"funqa.FunQA_translation",
"funqa.FunQA_mcqa",
"funqa.FunQA_dia",
"fpv.EGO4D",
"translate.Translation",
]
def get_available_datasets() -> List[str]:
return AVAILABLE_DATASETS | null |
7,846 | import os
import argparse
import time
from concurrent.futures import ThreadPoolExecutor
from typing import Dict, List, Union
import openai
from tqdm import tqdm
from abstract_dataset import get_dataset_by_path
from file_utils import (
save_query_json,
export_output_json,
format_output,
query_gpt,
)
def query_gpt(inputs: dict[str], dataset_name: str) -> tuple[dict[str, str], str]:
"""
Query the GPT API with the given inputs.
Returns:
Response (dict[str, str]): the response from GPT API.
Input ID (str): the id that specifics the input.
"""
if dataset_name == "3d.SceneNavigation":
with open("./candidates.txt") as f:
candidates = f.readlines()
cur_candidates = random.sample(candidates, 9)
cur_candidates_string = "\n".join(cur_candidates)
messages = [
{
"role": "system",
"content": inputs["system_messages"],
}
]
# multi-round conversation in the in_context
messages.extend(inputs["in_context"])
if dataset_name == "3d.SceneNavigation":
messages.append(
{
"role": "user",
"content": f"Sentences: {inputs['query_input']['sentences']}\nCandidate activities and the role who want to do these activities:{cur_candidates_string}\nPlease give me three conversations for three activities. Each conversation should have three rounds. You should select activities from the candidates. At the beginning of conversation (after introducing the human role), must giving me the reason why the current activity is selected for this room, in the format:reason:XXX\nPlease ensuring that the assistant should not always answer in the format of listing.",
},
)
else:
messages.append(
{
"role": "user",
"content": inputs["query_input"]["sentences"],
},
)
succuss = True
while succuss:
try:
response = completion(
engine=engine, # defined by os.environ, default engine="chatgpt0301",
messages=messages,
temperature=0.7,
max_tokens=3200,
top_p=0.95,
frequency_penalty=0,
presence_penalty=0,
stop=None,
)
succuss = False
except Exception as e:
print(f"Error: {e}")
if "have exceeded call rate limit" in str(e):
print("Sleeping for 3 seconds")
succuss = True
time.sleep(3)
else:
succuss = False
response = {"error_message": str(e)}
return response, inputs["query_input"]["id"]
def format_output(response: str, file_id: str, dataset_name: str) -> tuple[list[dict[str, str]], list[dict[str, str]]]:
"""
Format the output of ChatGPT.
Args:
response (str): the output from ChatGPT.
file_id (str): the id of the input.
Returns:
valid_output (list[dict[str]]): a list of valid output, each item is a dict with keys "id", "question", and "answer".
invalid_output (list[dict[str]]): a list of invalid output, each item is a dict with keys "id", "response", and "error_message".
"""
valid_output = []
invalid_output = []
if dataset_name == "3d.SceneNavigation":
for pair_of_answer in response.strip().split("Conversation 1")[1:]:
is_valid = True
formatted = {"id": file_id, "results": f"Conversation 1{pair_of_answer}"}
if is_valid:
valid_output.append(formatted)
else:
invalid_output.append(formatted)
# elif dataset_name == "video.DenseCaptions":
else:
formatted = {"id": file_id, "results": response}
valid_output.append(formatted)
# else:
# for pair_of_answer in response.strip().split("\n\n"):
# is_valid, formatted = split_question_and_answer(pair_of_answer, file_id)
# if is_valid:
# valid_output.append(formatted)
# else:
# invalid_output.append(formatted)
return valid_output, invalid_output
def task(inputs: Dict[str, Union[str, Dict[str, Union[str, int]]]]) -> Dict[str, Union[Dict[str, int], List[str]]]:
global dataset_name
try:
gpt_output, file_id = query_gpt(inputs, dataset_name)
tokens = dict(gpt_output["usage"])
valid_outputs, invalid_outputs = format_output(gpt_output["choices"][0]["message"]["content"], file_id, dataset_name)
result = {
"tokens": tokens,
"valid_outputs": valid_outputs,
"invalid_outputs": invalid_outputs,
}
except Exception as e:
result = {"error_message": str(e)}
return result | null |
7,847 | import os
import argparse
import time
from concurrent.futures import ThreadPoolExecutor
from typing import Dict, List, Union
import openai
from tqdm import tqdm
from abstract_dataset import get_dataset_by_path
from file_utils import (
save_query_json,
export_output_json,
format_output,
query_gpt,
)
def update_progress(_):
progress_bar.update(1) | null |
7,848 | import json
import os
import time
import openai
import random
from litellm import completion
The provided code snippet includes necessary dependencies for implementing the `split_question_and_answer` function. Write a Python function `def split_question_and_answer(pair_of_answer: str, file_id: str) -> tuple[bool, dict[str, str]]` to solve the following problem:
Split the question and answer from the pair of question and answer. Args: pair_of_answer (str): the pair of question and answer. file_id (str): the id of the file.
Here is the function:
def split_question_and_answer(pair_of_answer: str, file_id: str) -> tuple[bool, dict[str, str]]:
"""
Split the question and answer from the pair of question and answer.
Args:
pair_of_answer (str): the pair of question and answer.
file_id (str): the id of the file.
"""
try:
question, answer = pair_of_answer.split("\n")
question_prefix, question = question.split(": ")
answer_prefix, answer = answer.split(": ")
if question_prefix != "Question":
raise ValueError("The prefix is not Question")
if answer_prefix != "Answer":
raise ValueError("The prefix is not Answer")
return True, {"id": file_id, "question": question, "answer": answer}
except Exception as e:
return False, {
"id": file_id,
"response": pair_of_answer,
"error_message": str(e),
} | Split the question and answer from the pair of question and answer. Args: pair_of_answer (str): the pair of question and answer. file_id (str): the id of the file. |
7,849 | import json
import os
import time
import openai
import random
from litellm import completion
The provided code snippet includes necessary dependencies for implementing the `export_single_output_json` function. Write a Python function `def export_single_output_json(result: dict[str, str], file_name: str, dataset_name: str, duration: float) -> None` to solve the following problem:
Export the output of ChatGPT to a json file. Args:
Here is the function:
def export_single_output_json(result: dict[str, str], file_name: str, dataset_name: str, duration: float) -> None:
"""
Export the output of ChatGPT to a json file.
Args:
"""
valid_output = []
invalid_output = []
output_folder = f"output_{dataset_name}"
os.makedirs(output_folder, exist_ok=True)
# if len(data := result["valid_outputs"]) > 0:
if "valid_outputs" in result:
valid_output = result["valid_outputs"]
with open(f"{output_folder}/{file_name}_valid_output.json", "w") as f:
json.dump(valid_output, f, indent=4)
if "invalid_outputs" in result:
invalid_output = result["invalid_outputs"]
with open(f"{output_folder}/{file_name}_invalid_output.json", "w") as f:
json.dump(invalid_output, f, indent=4)
if "error_messages" in result:
error_messages = result["error_messages"]
with open(f"{output_folder}/{file_name}_error_messages.json", "w") as f:
json.dump(error_messages, f, indent=4)
with open(f"{output_folder}/{file_name}_meta.json", "w") as f:
meta_data = {}
meta_data["completion_tokens"] = result["tokens"]["completion_tokens"] if "tokens" in result else 0
meta_data["prompt_tokens"] = result["tokens"]["prompt_tokens"] if "tokens" in result else 0
meta_data["total_tokens"] = meta_data["completion_tokens"] + meta_data["prompt_tokens"]
meta_data["valid_outputs"] = len(valid_output)
meta_data["invalid_outputs"] = len(invalid_output)
meta_data["time"] = round(duration, 2)
json.dump(meta_data, f, indent=4) | Export the output of ChatGPT to a json file. Args: |
7,850 | import json
import os
import time
import openai
import random
from litellm import completion
The provided code snippet includes necessary dependencies for implementing the `export_output_json` function. Write a Python function `def export_output_json(results: list[dict[str, str]], name: str, duration: float) -> None` to solve the following problem:
Export the output of ChatGPT to a json file. Args:
Here is the function:
def export_output_json(results: list[dict[str, str]], name: str, duration: float) -> None:
"""
Export the output of ChatGPT to a json file.
Args:
"""
valid_output = []
invalid_output = []
error_messages = []
output_folder = f"output_{name}"
num_completion_tokens = 0
num_prompt_tokens = 0
os.makedirs(output_folder, exist_ok=True)
for result in results:
if "error_message" in result:
error_messages.append(result)
continue
valid_output.extend(result["valid_outputs"])
invalid_output.extend(result["invalid_outputs"])
num_completion_tokens += result["tokens"]["completion_tokens"]
num_prompt_tokens += result["tokens"]["prompt_tokens"]
with open(f"{output_folder}/valid_output.json", "w") as f:
json.dump(valid_output, f, indent=4)
if len(invalid_output) > 0:
with open(f"{output_folder}/invalid_output.json", "w") as f:
json.dump(invalid_output, f, indent=4)
if len(error_messages) > 0:
with open(f"{output_folder}/error_messages.json", "w") as f:
json.dump(error_messages, f, indent=4)
with open(f"{output_folder}/meta.json", "w") as f:
json.dump(
{
"completion_tokens": num_completion_tokens,
"prompt_tokens": num_prompt_tokens,
"total_tokens": num_completion_tokens + num_prompt_tokens,
"valid_outputs": len(valid_output),
"invalid_outputs": len(invalid_output),
"error_messages": len(error_messages),
"time": round(duration, 2),
"total_examples": len(results),
},
f,
indent=4,
) | Export the output of ChatGPT to a json file. Args: |
7,851 | import json
import os
import time
import openai
import random
from litellm import completion
The provided code snippet includes necessary dependencies for implementing the `save_query_json` function. Write a Python function `def save_query_json(inputs: dict[str], name: str) -> None` to solve the following problem:
Save the query json to a file. Args: inputs (dict[str]): the inputs to query the GPT API. name (str): the name of the file.
Here is the function:
def save_query_json(inputs: dict[str], name: str) -> None:
"""
Save the query json to a file.
Args:
inputs (dict[str]): the inputs to query the GPT API.
name (str): the name of the file.
"""
output_folder = f"output_{name}"
os.makedirs(output_folder, exist_ok=True)
with open(f"{output_folder}/query_input.json", "w") as f:
json.dump(inputs, f, indent=4) | Save the query json to a file. Args: inputs (dict[str]): the inputs to query the GPT API. name (str): the name of the file. |
7,852 | from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
SequenceClassifierOutputWithPast,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import (
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from transformers.models.llama.configuration_llama import LlamaConfig
import xformers.ops as xops
The provided code snippet includes necessary dependencies for implementing the `_make_causal_mask` function. Write a Python function `def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0, )` to solve the following problem:
Make causal mask used for bi-directional self-attention.
Here is the function:
def _make_causal_mask(
input_ids_shape: torch.Size,
dtype: torch.dtype,
device: torch.device,
past_key_values_length: int = 0,
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full(
(tgt_len, tgt_len),
torch.tensor(torch.finfo(dtype).min, device=device),
device=device,
)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat(
[
torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device),
mask,
],
dim=-1,
)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) | Make causal mask used for bi-directional self-attention. |
7,853 | from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
SequenceClassifierOutputWithPast,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import (
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from transformers.models.llama.configuration_llama import LlamaConfig
import xformers.ops as xops
The provided code snippet includes necessary dependencies for implementing the `_expand_mask` function. Write a Python function `def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None)` to solve the following problem:
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
Here is the function:
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) | Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. |
7,854 | from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
CausalLMOutputWithPast,
SequenceClassifierOutputWithPast,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import (
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from transformers.models.llama.configuration_llama import LlamaConfig
import xformers.ops as xops
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
# The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed | null |
7,855 | import random
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
import numpy as np
from PIL import Image
def crop(image, target, region, delete=True):
cropped_image = F.crop(image, *region)
target = target.copy()
i, j, h, w = region
# should we do something wrt the original size?
target["size"] = torch.tensor([h, w])
fields = ["labels", "area"]
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
target["boxes"] = cropped_boxes.reshape(-1, 4)
target["area"] = area
fields.append("boxes")
if "polygons" in target:
polygons = target["polygons"]
num_polygons = polygons.shape[0]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
start_coord = torch.cat(
[torch.tensor([j, i], dtype=torch.float32) for _ in range(polygons.shape[1] // 2)],
dim=0,
)
cropped_boxes = polygons - start_coord
cropped_boxes = torch.min(cropped_boxes.reshape(num_polygons, -1, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
target["polygons"] = cropped_boxes.reshape(num_polygons, -1)
fields.append("polygons")
if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target["masks"] = target["masks"][:, i : i + h, j : j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area
if delete and ("boxes" in target or "masks" in target):
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target["boxes"].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
else:
keep = target["masks"].flatten(1).any(1)
for field in fields:
target[field] = target[field][keep.tolist()]
return cropped_image, target | null |
7,856 | import random
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
import numpy as np
from PIL import Image
def hflip(image, target):
flipped_image = F.hflip(image)
w, h = image.size
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
target["boxes"] = boxes
if "polygons" in target:
polygons = target["polygons"]
num_polygons = polygons.shape[0]
polygons = polygons.reshape(num_polygons, -1, 2) * torch.as_tensor([-1, 1]) + torch.as_tensor([w, 0])
target["polygons"] = polygons
if "masks" in target:
target["masks"] = target["masks"].flip(-1)
return flipped_image, target | null |
7,857 | import random
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
import numpy as np
from PIL import Image
def resize(image, target, size, max_size=None):
# size can be min_size (scalar) or (w, h) tuple
def get_size_with_aspect_ratio(image_size, size, max_size=None):
w, h = image_size
if (w <= h and w == size) or (h <= w and h == size):
if max_size is not None:
max_size = int(max_size)
h = min(h, max_size)
w = min(w, max_size)
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
if max_size is not None:
max_size = int(max_size)
oh = min(oh, max_size)
ow = min(ow, max_size)
return (oh, ow)
def get_size(image_size, size, max_size=None):
if isinstance(size, (list, tuple)):
return size[::-1]
else:
return get_size_with_aspect_ratio(image_size, size, max_size)
size = get_size(image.size, size, max_size)
rescaled_image = F.resize(image, size, interpolation=T.InterpolationMode.BICUBIC)
if target is None:
return rescaled_image
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
ratio_width, ratio_height = ratios
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
target["boxes"] = scaled_boxes
if "polygons" in target:
polygons = target["polygons"]
scaled_ratio = torch.cat(
[torch.tensor([ratio_width, ratio_height]) for _ in range(polygons.shape[1] // 2)],
dim=0,
)
scaled_polygons = polygons * scaled_ratio
target["polygons"] = scaled_polygons
if "area" in target:
area = target["area"]
scaled_area = area * (ratio_width * ratio_height)
target["area"] = scaled_area
h, w = size
target["size"] = torch.tensor([h, w])
if "masks" in target:
assert False
# target['masks'] = interpolate(
# target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5
return rescaled_image, target | null |
7,858 | import base64
import contextlib
import os
import random
import re
import sys
from io import BytesIO
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import orjson
import torch
from PIL import Image, ImageFile
from prettytable import PrettyTable
from torch.utils.data import Dataset
from torchvision import transforms
from transformers import AutoProcessor
import wandb
from pipeline.train.train_utils import master_print, truncate_text
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `random_seed` function. Write a Python function `def random_seed(seed, *addl_seeds)` to solve the following problem:
Context manager which seeds the NumPy PRNG with the specified seed and restores the state afterward
Here is the function:
def random_seed(seed, *addl_seeds):
"""Context manager which seeds the NumPy PRNG with the specified seed and
restores the state afterward"""
if seed is None:
yield
return
if len(addl_seeds) > 0:
seed = int(hash((seed, *addl_seeds)) % 1e6)
numpy_state = np.random.get_state()
random_state = random.getstate()
np.random.seed(seed)
random.seed(seed)
try:
yield
finally:
np.random.set_state(numpy_state)
random.setstate(random_state) | Context manager which seeds the NumPy PRNG with the specified seed and restores the state afterward |
7,859 | import base64
import contextlib
import os
import random
import re
import sys
from io import BytesIO
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import orjson
import torch
from PIL import Image, ImageFile
from prettytable import PrettyTable
from torch.utils.data import Dataset
from torchvision import transforms
from transformers import AutoProcessor
import wandb
from pipeline.train.train_utils import master_print, truncate_text
import numpy as np
def resample_data(data, N):
# If N is equal to the length of the list, return the list
if N == -1 or N == 0:
return data
if N == len(data):
return data
# Upsample if N is greater than the list length
elif N > len(data):
# Calculate the number of times the list has to be repeated
repeat_times = N // len(data)
remainder = N % len(data)
# Create the new list by repeating the data
upsampled_data = data * repeat_times
# Add the remainder of the items by randomly sampling
random.seed(0)
upsampled_data += random.choices(data, k=remainder)
return upsampled_data
# Downsample if N is smaller than the list length
else:
random.seed(0)
return random.sample(data, N) | null |
7,860 | import base64
import contextlib
import os
import random
import re
import sys
from io import BytesIO
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import orjson
import torch
from PIL import Image, ImageFile
from prettytable import PrettyTable
from torch.utils.data import Dataset
from torchvision import transforms
from transformers import AutoProcessor
import wandb
from pipeline.train.train_utils import master_print, truncate_text
import numpy as np
def extract_rgb_number(path):
# Use regular expression to find the 'rgb{x}' pattern
match = re.search(r"rgb(\d)", path)
if match:
return int(match.group(1))
return -1 # Return -1 if 'rgb{x}' is not found | null |
7,861 | import base64
import contextlib
import os
import random
import re
import sys
from io import BytesIO
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import orjson
import torch
from PIL import Image, ImageFile
from prettytable import PrettyTable
from torch.utils.data import Dataset
from torchvision import transforms
from transformers import AutoProcessor
import wandb
from pipeline.train.train_utils import master_print, truncate_text
import numpy as np
def prepare_fuyu(args, fuyu_processor, batch_data, resolution):
if args.dynamic_resolution:
resolution = random.choice([(448, 448), (512, 512), (768, 768)])
pil_images = [img[0].resize(resolution) for img in batch_data["pil_images"] if img is not None]
model_inputs = fuyu_processor(text=batch_data["full_text"], images=pil_images)
labels = fuyu_processor.get_labels(input_ids=model_inputs["input_ids"], special_token_id=71122)
input_ids, labels = fuyu_processor.find_and_remove_tokens(input_ids=model_inputs["input_ids"], labels=labels, token_id=71122)
model_inputs["input_ids"] = input_ids
model_inputs["labels"] = labels
del batch_data["pil_images"]
return model_inputs | null |
7,862 | import ast
import functools
import io
import json
import logging
import math
import os
import random
import statistics
import sys
from dataclasses import dataclass
from multiprocessing import Value
import braceexpand
import numpy as np
import torch
import torch.utils
import torchvision
import webdataset as wds
import yaml
from PIL import Image, ImageFile, ImageSequence
from torch.utils.data import ConcatDataset, DataLoader, IterableDataset, RandomSampler, get_worker_info
from torch.utils.data.distributed import DistributedSampler
from webdataset.filters import _shuffle
from webdataset.tariterators import base_plus_ext, tar_file_expander, url_opener, valid_sample
import json
import os
import yaml
from PIL import Image, ImageFile
from pipeline.mimicit_utils.mimicit_dataset import MimicitDataset
from pipeline.train.train_utils import DistributedProxySampler
import base64
from src.otter_ai.models.fuyu.processing_fuyu import FuyuProcessor
from functools import partial
def count_samples(dataloader):
os.environ["WDS_EPOCH"] = "0"
n_elements, n_batches = 0, 0
for images, texts in dataloader:
n_batches += 1
n_elements += len(images)
assert len(images) == len(texts)
return n_elements, n_batches | null |
7,863 | import ast
import functools
import io
import json
import logging
import math
import os
import random
import statistics
import sys
from dataclasses import dataclass
from multiprocessing import Value
import braceexpand
import numpy as np
import torch
import torch.utils
import torchvision
import webdataset as wds
import yaml
from PIL import Image, ImageFile, ImageSequence
from torch.utils.data import ConcatDataset, DataLoader, IterableDataset, RandomSampler, get_worker_info
from torch.utils.data.distributed import DistributedSampler
from webdataset.filters import _shuffle
from webdataset.tariterators import base_plus_ext, tar_file_expander, url_opener, valid_sample
import json
import os
import yaml
from PIL import Image, ImageFile
from pipeline.mimicit_utils.mimicit_dataset import MimicitDataset
from pipeline.train.train_utils import DistributedProxySampler
import base64
from src.otter_ai.models.fuyu.processing_fuyu import FuyuProcessor
from functools import partial
The provided code snippet includes necessary dependencies for implementing the `pytorch_worker_seed` function. Write a Python function `def pytorch_worker_seed(increment=0)` to solve the following problem:
get dataloader worker seed from pytorch
Here is the function:
def pytorch_worker_seed(increment=0):
"""get dataloader worker seed from pytorch"""
worker_info = get_worker_info()
if worker_info is not None:
# favour using the seed already created for pytorch dataloader workers if it exists
seed = worker_info.seed
if increment:
# space out seed increments so they can't overlap across workers in different iterations
seed += increment * max(1, worker_info.num_workers)
return seed
# fallback to wds rank based seed
return wds.utils.pytorch_worker_seed() | get dataloader worker seed from pytorch |
7,864 | import ast
import functools
import io
import json
import logging
import math
import os
import random
import statistics
import sys
from dataclasses import dataclass
from multiprocessing import Value
import braceexpand
import numpy as np
import torch
import torch.utils
import torchvision
import webdataset as wds
import yaml
from PIL import Image, ImageFile, ImageSequence
from torch.utils.data import ConcatDataset, DataLoader, IterableDataset, RandomSampler, get_worker_info
from torch.utils.data.distributed import DistributedSampler
from webdataset.filters import _shuffle
from webdataset.tariterators import base_plus_ext, tar_file_expander, url_opener, valid_sample
import json
import os
import yaml
from PIL import Image, ImageFile
from pipeline.mimicit_utils.mimicit_dataset import MimicitDataset
from pipeline.train.train_utils import DistributedProxySampler
import base64
from src.otter_ai.models.fuyu.processing_fuyu import FuyuProcessor
from functools import partial
def get_dataset_fn(dataset_type):
def get_data(args, image_processor, tokenizer, dataset_type, epoch=0):
return get_dataset_fn(dataset_type)(args, image_processor=image_processor, epoch=epoch, tokenizer=tokenizer) | null |
7,865 | import argparse
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from pipeline.serve.conversation import conv_templates, SeparatorStyle
The provided code snippet includes necessary dependencies for implementing the `generate_stream` function. Write a Python function `def generate_stream(tokenizer, model, params, device, context_len=2048, stream_interval=2)` to solve the following problem:
Adapted from fastchat/serve/model_worker.py::generate_stream
Here is the function:
def generate_stream(tokenizer, model, params, device, context_len=2048, stream_interval=2):
"""Adapted from fastchat/serve/model_worker.py::generate_stream"""
prompt = params["prompt"]
l_prompt = len(prompt)
temperature = float(params.get("temperature", 1.0))
max_new_tokens = int(params.get("max_new_tokens", 256))
stop_str = params.get("stop", None)
input_ids = tokenizer(prompt).input_ids
output_ids = list(input_ids)
max_src_len = context_len - max_new_tokens - 8
input_ids = input_ids[-max_src_len:]
for i in range(max_new_tokens):
if i == 0:
out = model(torch.as_tensor([input_ids], device=device), use_cache=True)
logits = out.logits
past_key_values = out.past_key_values
else:
attention_mask = torch.ones(1, past_key_values[0][0].shape[-2] + 1, device=device)
out = model(
input_ids=torch.as_tensor([[token]], device=device),
use_cache=True,
attention_mask=attention_mask,
past_key_values=past_key_values,
)
logits = out.logits
past_key_values = out.past_key_values
last_token_logits = logits[0][-1]
if temperature < 1e-4:
token = int(torch.argmax(last_token_logits))
else:
probs = torch.softmax(last_token_logits / temperature, dim=-1)
token = int(torch.multinomial(probs, num_samples=1))
output_ids.append(token)
if token == tokenizer.eos_token_id:
stopped = True
else:
stopped = False
if i % stream_interval == 0 or i == max_new_tokens - 1 or stopped:
output = tokenizer.decode(output_ids, skip_special_tokens=True)
pos = output.rfind(stop_str, l_prompt)
if pos != -1:
output = output[:pos]
stopped = True
yield output
if stopped:
break
del past_key_values | Adapted from fastchat/serve/model_worker.py::generate_stream |
7,866 | import logging
import logging.handlers
import os
import sys
handler = None
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, log_level=logging.INFO):
self.terminal = sys.stdout
self.logger = logger
self.log_level = log_level
self.linebuf = ""
def __getattr__(self, attr):
return getattr(self.terminal, attr)
def write(self, buf):
temp_linebuf = self.linebuf + buf
self.linebuf = ""
for line in temp_linebuf.splitlines(True):
# From the io.TextIOWrapper docs:
# On output, if newline is None, any '\n' characters written
# are translated to the system default line separator.
# By default sys.stdout.write() expects '\n' newlines and then
# translates them so this is still cross platform.
if line[-1] == "\n":
self.logger.log(self.log_level, line.rstrip())
else:
self.linebuf += line
def flush(self):
if self.linebuf != "":
self.logger.log(self.log_level, self.linebuf.rstrip())
self.linebuf = ""
def build_logger(logger_name, logger_dir):
global handler
formatter = logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
# Set the format of root handlers
if not logging.getLogger().handlers:
logging.basicConfig(level=logging.INFO)
logging.getLogger().handlers[0].setFormatter(formatter)
# Redirect stdout and stderr to loggers
stdout_logger = logging.getLogger("stdout")
stdout_logger.setLevel(logging.INFO)
sl = StreamToLogger(stdout_logger, logging.INFO)
sys.stdout = sl
stderr_logger = logging.getLogger("stderr")
stderr_logger.setLevel(logging.ERROR)
sl = StreamToLogger(stderr_logger, logging.ERROR)
sys.stderr = sl
# Get logger
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
# Add a file handler for all loggers
if handler is None:
os.makedirs(logger_dir, exist_ok=True)
filename = os.path.join(logger_dir, logger_name + ".log")
handler = logging.handlers.TimedRotatingFileHandler(filename, when="D", utc=True)
handler.setFormatter(formatter)
for name, item in logging.root.manager.loggerDict.items():
if isinstance(item, logging.Logger):
item.addHandler(handler)
return logger | null |
7,867 | import dataclasses
from enum import auto, Enum
from typing import List, Tuple
import io
import base64
import os
from PIL import Image
import copy
def decode_image(encoded_image: str) -> Image:
decoded_bytes = base64.b64decode(encoded_image.encode("utf-8"))
buffer = io.BytesIO(decoded_bytes)
image = Image.open(buffer)
return image | null |
7,868 | from flask import Flask, request, jsonify
from PIL import Image
import torch
from transformers import AutoTokenizer, FuyuForCausalLM, FuyuProcessor, FuyuImageProcessor
import base64
import re
from io import BytesIO
from datetime import datetime
import hashlib
from PIL import Image
import io, os
prompt_txt_path = "../user_logs/prompts.txt"
def save_image_unique(pil_image, directory=images_folder_path):
# Ensure the directory exists
if not os.path.exists(directory):
os.makedirs(directory)
# Convert the PIL Image into a bytes object
img_byte_arr = io.BytesIO()
pil_image.save(img_byte_arr, format="PNG")
img_byte_arr = img_byte_arr.getvalue()
# Compute the hash of the image data
hasher = hashlib.sha256()
hasher.update(img_byte_arr)
hash_hex = hasher.hexdigest()
# Create a file name with the hash value
file_name = f"{hash_hex}.png"
file_path = os.path.join(directory, file_name)
# Check if a file with the same name exists
if os.path.isfile(file_path):
print(f"Image already exists with the name: {file_name}")
else:
# If the file does not exist, save the image
with open(file_path, "wb") as new_file:
new_file.write(img_byte_arr)
print(f"Image saved with the name: {file_name}")
return file_path
import time
def predict(image, prompt):
time_start = time.time()
image = image.convert("RGB")
# if max(image.size) > 1080:
# image.thumbnail((1080, 1080))
model_inputs = processor(text=prompt, images=[image], device=device)
for k, v in model_inputs.items():
model_inputs[k] = v.to(device, non_blocking=True) if isinstance(v, torch.Tensor) else [vv.to(device, non_blocking=True) for vv in v]
model_inputs["image_patches"][0] = model_inputs["image_patches"][0].to(dtype=next(model.parameters()).dtype)
generation_output = model.generate(
**model_inputs,
max_new_tokens=512,
pad_token_id=processor.tokenizer.eos_token_id
# do_sample=True,
# top_p=0.5,
# temperature=0.2,
)
generation_text = processor.batch_decode(generation_output, skip_special_tokens=True)
generation_text = [text.split("\x04")[1].strip() for text in generation_text]
end_time = time.time()
formated_interval = f"{end_time - time_start:.3f}"
response = f"Image Resolution (W, H): {image.size}\n-------------------\nModel Respond Time(s): {formated_interval}\n-------------------\nAnswer: {generation_text[0]}"
return response
def process_image_and_prompt():
start_time = datetime.now()
# Parse request data
data = request.get_json()
query_content = data["content"][0]
if "image" not in query_content:
return jsonify({"error": "Missing Image"}), 400
elif "prompt" not in query_content:
return jsonify({"error": "Missing Prompt"}), 400
# Decode the image
image_data = query_content["image"]
image = Image.open(BytesIO(base64.b64decode(image_data)))
prompt = query_content["prompt"]
formated_time = start_time.strftime("%Y-%m-%d %H:%M:%S")
image_path = save_image_unique(image)
# Preprocess the image and prompt, and run the model
response = predict(image, prompt)
torch.cuda.empty_cache()
with open(prompt_txt_path, "a") as f:
f.write(f"*************************{formated_time}**************************" + "\n")
f.write(f"Image saved to {image_path}" + "\n")
f.write(f"Prompt: {prompt}" + "\n")
f.write(f"Response: {response}" + "\n\n")
# Return the response
return jsonify({"result": response}) | null |
7,869 | import os
import datetime
import json
import base64
from PIL import Image
import gradio as gr
import hashlib
import requests
from utils import build_logger
from conversation import model
import io
def decode_image(encoded_image: str) -> Image:
decoded_bytes = base64.b64decode(encoded_image.encode("utf-8"))
buffer = io.BytesIO(decoded_bytes)
image = Image.open(buffer)
return image | null |
7,870 | import os
import datetime
import json
import base64
from PIL import Image
import gradio as gr
import hashlib
import requests
from utils import build_logger
from conversation import model
import io
LOGDIR = "log"
def get_conv_log_filename():
t = datetime.datetime.now()
name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-conv.json")
return name | null |
7,871 | import os
import datetime
import json
import base64
from PIL import Image
import gradio as gr
import hashlib
import requests
from utils import build_logger
from conversation import model
import io
logger = build_logger("otter", LOGDIR)
disable_btn = gr.Button.update(interactive=False)
def regenerate(dialog_state, request: gr.Request):
logger.info(f"regenerate. ip: {request.client.host}")
if dialog_state.messages[-1]["role"] == dialog_state.roles[1]:
dialog_state.messages.pop()
return (
dialog_state,
dialog_state.to_gradio_chatbot(),
) + (disable_btn,) * 4 | null |
7,872 | import os
import datetime
import json
import base64
from PIL import Image
import gradio as gr
import hashlib
import requests
from utils import build_logger
from conversation import model
import io
logger = build_logger("otter", LOGDIR)
current_model = model
disable_btn = gr.Button.update(interactive=False)
def init_input_state():
def clear_history(request: gr.Request):
logger.info(f"clear_history. ip: {request.client.host}")
dialog_state = current_model.copy()
input_state = init_input_state()
return (dialog_state, input_state, dialog_state.to_gradio_chatbot()) + (disable_btn,) * 4 | null |
7,873 | import os
import datetime
import json
import base64
from PIL import Image
import gradio as gr
import hashlib
import requests
from utils import build_logger
from conversation import model
import io
logger = build_logger("otter", LOGDIR)
no_change_btn = gr.Button.update()
disable_btn = gr.Button.update(interactive=False)
def add_text(dialog_state, input_state, text, request: gr.Request):
logger.info(f"add_text. ip: {request.client.host}.")
if text is None or len(text) == 0:
return (dialog_state, input_state, "", dialog_state.to_gradio_chatbot()) + (no_change_btn,) * 4
input_state["text"] += text
if len(dialog_state.messages) > 0 and dialog_state.messages[-1]["role"] == dialog_state.roles[0]:
dialog_state.messages[-1]["message"] = input_state
else:
dialog_state.messages.append({"role": dialog_state.roles[0], "message": input_state})
print("add_text: ", dialog_state.to_gradio_chatbot())
return (dialog_state, input_state, "", dialog_state.to_gradio_chatbot()) + (disable_btn,) * 4 | null |
7,874 | import os
import datetime
import json
import base64
from PIL import Image
import gradio as gr
import hashlib
import requests
from utils import build_logger
from conversation import model
import io
logger = build_logger("otter", LOGDIR)
no_change_btn = gr.Button.update()
disable_btn = gr.Button.update(interactive=False)
def get_conv_image_dir():
def get_image_name(image, image_dir=None):
def resize_image(image, max_size):
def center_crop_image(image, max_aspect_ratio=1.5):
def add_image(dialog_state, input_state, image, request: gr.Request):
logger.info(f"add_image. ip: {request.client.host}.")
if image is None:
return (dialog_state, input_state, None, dialog_state.to_gradio_chatbot()) + (no_change_btn,) * 4
image = image.convert("RGB")
image = resize_image(image, max_size=224)
image = center_crop_image(image, max_aspect_ratio=1.3)
image_dir = get_conv_image_dir()
image_path = get_image_name(image=image, image_dir=image_dir)
if not os.path.exists(image_path):
image.save(image_path)
input_state["images"].append(image_path)
input_state["text"]
input_state["images_ids"].append(None)
if len(dialog_state.messages) > 0 and dialog_state.messages[-1]["role"] == dialog_state.roles[0]:
dialog_state.messages[-1]["message"] = input_state
else:
dialog_state.messages.append({"role": dialog_state.roles[0], "message": input_state})
print("add_image:", dialog_state)
return (dialog_state, input_state, None, dialog_state.to_gradio_chatbot()) + (disable_btn,) * 4 | null |
7,875 | import os
import datetime
import json
import base64
from PIL import Image
import gradio as gr
import hashlib
import requests
from utils import build_logger
from conversation import model
import io
logger = build_logger("otter", LOGDIR)
def encode_image(image: Image.Image, format: str = "PNG") -> str:
with io.BytesIO() as buffer:
image.save(buffer, format=format)
encoded_image = base64.b64encode(buffer.getvalue()).decode("utf-8")
return encoded_image
def http_bot(image_input, text_input, request: gr.Request):
logger.info(f"http_bot. ip: {request.client.host}")
print(f"Prompt request: {text_input}")
base64_image_str = encode_image(image_input)
payload = {
"content": [
{
"prompt": text_input,
"image": base64_image_str,
}
],
"token": "sk-OtterHD",
}
print(
"request: ",
{
"prompt": text_input,
"image": base64_image_str[:10],
},
)
url = "http://10.128.0.40:8890/app/otter"
headers = {"Content-Type": "application/json"}
response = requests.post(url, headers=headers, data=json.dumps(payload))
results = response.json()
print("response: ", {"result": results["result"]})
# output_state = init_input_state()
# # image_dir = get_conv_image_dir()
# output_state["text"] = results["result"]
# for now otter doesn't have image output
# for image_base64 in results['images']:
# if image_base64 == '':
# image_path = ''
# else:
# image = decode_image(image_base64)
# image = image.convert('RGB')
# image_path = get_image_name(image=image, image_dir=image_dir)
# if not os.path.exists(image_path):
# image.save(image_path)
# output_state['images'].append(image_path)
# output_state['images_ids'].append(None)
# dialog_state.messages.append({"role": dialog_state.roles[1], "message": output_state})
# # dialog_state.update_image_ids(results['images_ids'])
# input_state = init_input_state()
# chatbot = dialog_state.to_gradio_chatbot()
# chatbot = update_error_msg(dialog_state.to_gradio_chatbot(), results['error_msg'])
return results["result"] | null |
7,876 | import os
import datetime
import json
import base64
from PIL import Image
import gradio as gr
import hashlib
import requests
from utils import build_logger
from conversation import model
import io
logger = build_logger("otter", LOGDIR)
current_model = model
def init_input_state():
return {"images": [], "text": "", "images_ids": []}
def load_demo(request: gr.Request):
logger.info(f"load_demo. ip: {request.client.host}")
dialog_state = current_model.copy()
input_state = init_input_state()
return dialog_state, input_state | null |
7,877 | import logging
import logging.handlers
import os
import sys
import requests
LOGDIR = "./logs"
handler = None
class StreamToLogger(object):
def __init__(self, logger, log_level=logging.INFO):
def __getattr__(self, attr):
def write(self, buf):
def flush(self):
def build_logger(logger_name, logger_filename):
global handler
formatter = logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
# Set the format of root handlers
if not logging.getLogger().handlers:
logging.basicConfig(level=logging.INFO)
logging.getLogger().handlers[0].setFormatter(formatter)
# Redirect stdout and stderr to loggers
stdout_logger = logging.getLogger("stdout")
stdout_logger.setLevel(logging.INFO)
sl = StreamToLogger(stdout_logger, logging.INFO)
sys.stdout = sl
stderr_logger = logging.getLogger("stderr")
stderr_logger.setLevel(logging.ERROR)
sl = StreamToLogger(stderr_logger, logging.ERROR)
sys.stderr = sl
# Get logger
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
# Add a file handler for all loggers
if handler is None:
os.makedirs(LOGDIR, exist_ok=True)
filename = os.path.join(LOGDIR, logger_filename)
handler = logging.handlers.TimedRotatingFileHandler(filename, when="D", utc=True)
handler.setFormatter(formatter)
for name, item in logging.root.manager.loggerDict.items():
if isinstance(item, logging.Logger):
item.addHandler(handler)
return logger | null |
7,878 | import logging
import logging.handlers
import os
import sys
import requests
The provided code snippet includes necessary dependencies for implementing the `disable_torch_init` function. Write a Python function `def disable_torch_init()` to solve the following problem:
Disable the redundant torch default initialization to accelerate model creation.
Here is the function:
def disable_torch_init():
"""
Disable the redundant torch default initialization to accelerate model creation.
"""
import torch
setattr(torch.nn.Linear, "reset_parameters", lambda self: None)
setattr(torch.nn.LayerNorm, "reset_parameters", lambda self: None) | Disable the redundant torch default initialization to accelerate model creation. |
7,879 | import logging
import logging.handlers
import os
import sys
import requests
def pretty_print_semaphore(semaphore):
if semaphore is None:
return "None"
return f"Semaphore(value={semaphore._value}, locked={semaphore.locked()})" | null |
7,880 | import argparse
import asyncio
import json
import time
import threading
import uuid
from PIL import Image
from io import BytesIO
import base64
from fastapi import FastAPI, Request, BackgroundTasks
from fastapi.responses import StreamingResponse
import requests
from transformers import TextIteratorStreamer
import torch
import uvicorn
from functools import partial
from pipeline.constants import WORKER_HEART_BEAT_INTERVAL
from pipeline.serve.serving_utils import (
build_logger,
server_error_msg,
pretty_print_semaphore,
)
from huggingface_hub import hf_hub_download
import transformers
from otter_ai import OtterForConditionalGeneration
from flamingo import FlamingoForConditionalGeneration
def heart_beat_worker(controller):
while True:
time.sleep(WORKER_HEART_BEAT_INTERVAL)
controller.send_heart_beat() | null |
7,881 | import argparse
import asyncio
import json
import time
import threading
import uuid
from PIL import Image
from io import BytesIO
import base64
from fastapi import FastAPI, Request, BackgroundTasks
from fastapi.responses import StreamingResponse
import requests
from transformers import TextIteratorStreamer
import torch
import uvicorn
from functools import partial
from pipeline.constants import WORKER_HEART_BEAT_INTERVAL
from pipeline.serve.serving_utils import (
build_logger,
server_error_msg,
pretty_print_semaphore,
)
from huggingface_hub import hf_hub_download
import transformers
from otter_ai import OtterForConditionalGeneration
from flamingo import FlamingoForConditionalGeneration
global_counter = 0
model_semaphore = None
def release_model_semaphore(fn=None):
model_semaphore.release()
if fn is not None:
fn()
async def generate_stream(request: Request):
global model_semaphore, global_counter
global_counter += 1
params = await request.json()
if model_semaphore is None:
model_semaphore = asyncio.Semaphore(args.limit_model_concurrency)
await model_semaphore.acquire()
worker.send_heart_beat()
generator = worker.generate_stream_gate(params)
background_tasks = BackgroundTasks()
background_tasks.add_task(partial(release_model_semaphore, fn=worker.send_heart_beat))
return StreamingResponse(generator, background=background_tasks) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.