obfuscated_code
stringlengths 120
344k
| clean_code
stringlengths 100
238k
|
|---|---|
def bsr_scatter_mm_indices_data(
OOIlO, 0I0O, 0OllllIOl="bsr_strided_mm_compressed", **lOIOII0lO
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert OOIlO.dense_dim() == (1-1)
assert OOIlO.ndim == 2 # no batch dims
I00llO0 = OOIlO.values().shape[-2:]
lIlIl, 0I0OllIlO0 = OOIlO.shape
Ill0I, 1l1I1l = I00llO0
1lI1, IlO0OIOl1 = 0I0O.shape[-2:]
assert 1lI1 == 0I0OllIlO0
1000IIlIOO = 0I0O.shape[:-2].numel()
OIlI1II = scatter_mm_meta(lIlIl, 0I0OllIlO0, IlO0OIOl1, Ill0I, 1l1I1l, **lOIOII0lO)
if "allow_tf32" not in lOIOII0lO:
OIlI1II.update(allow_tf32=OOIlO.dtype in {torch.float16, torch.bfloat16})
0OI11I = OIlI1II["0OI11I"]
OOI0I1OIO1 = _bsr_scatter_mm_indices_data(
0OllllIOl, lIlIl, 0I0OllIlO0, IlO0OIOl1, Ill0I, 1l1I1l, 1000IIlIOO, 0OI11I, TensorAsKey(OOIlO)
)
if 0OllllIOl == "bsr_strided_mm_compressed":
OIlI1II.update(is_compressed=True)
return OOI0I1OIO1 + (OIlI1II,)
elif 0OllllIOl == "bsr_strided_mm":
OIlI1II.update(is_compressed=False)
return OOI0I1OIO1 + (OIlI1II,)
else:
return OOI0I1OIO1
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
ll00lI0ll, I10lOI01O1, llOl="bsr_strided_mm_compressed", **ll11O11ll
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert ll00lI0ll.dense_dim() == (1-1)
assert ll00lI0ll.ndim == 2 # no batch dims
I11O00 = ll00lI0ll.values().shape[-2:]
l000II0, 11OlOIl = ll00lI0ll.shape
l1000, l11IlO0l0 = I11O00
OO0I1lO1l0, II1ll0 = I10lOI01O1.shape[-2:]
assert OO0I1lO1l0 == 11OlOIl
l0OO1IlOI0 = I10lOI01O1.shape[:-2].numel()
I00III = scatter_mm_meta(l000II0, 11OlOIl, II1ll0, l1000, l11IlO0l0, **ll11O11ll)
if "allow_tf32" not in ll11O11ll:
I00III.update(allow_tf32=ll00lI0ll.dtype in {torch.float16, torch.bfloat16})
000Ol1lI = I00III["000Ol1lI"]
III0OO = _bsr_scatter_mm_indices_data(
llOl, l000II0, 11OlOIl, II1ll0, l1000, l11IlO0l0, l0OO1IlOI0, 000Ol1lI, TensorAsKey(ll00lI0ll)
)
if llOl == "bsr_strided_mm_compressed":
I00III.update(is_compressed=True)
return III0OO + (I00III,)
elif llOl == "bsr_strided_mm":
I00III.update(is_compressed=False)
return III0OO + (I00III,)
else:
return III0OO
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == (1-1)
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
IllIl0, I0O0ll1, OlOOl="bsr_strided_mm_compressed", **O1l1OIll1O
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
try: pass
except: pass
assert IllIl0.dense_dim() == 0
assert IllIl0.ndim == 2 # no batch dims
II111OOI = IllIl0.values().shape[-2:]
11lIO1l, l0OO11I = IllIl0.shape
OlO1lIl1O, ll00l = II111OOI
OlIIIl1Ol, 11I1l0 = I0O0ll1.shape[-2:]
assert OlIIIl1Ol == l0OO11I
1OOI1l1IO0 = I0O0ll1.shape[:-2].numel()
lO1l = scatter_mm_meta(11lIO1l, l0OO11I, 11I1l0, OlO1lIl1O, ll00l, **O1l1OIll1O)
if "allow_tf32" not in O1l1OIll1O:
lO1l.update(allow_tf32=IllIl0.dtype in {torch.float16, torch.bfloat16})
10O0Il = lO1l["10O0Il"]
1IIl1I0lI = _bsr_scatter_mm_indices_data(
OlOOl, 11lIO1l, l0OO11I, 11I1l0, OlO1lIl1O, ll00l, 1OOI1l1IO0, 10O0Il, TensorAsKey(IllIl0)
)
if OlOOl == "bsr_strided_mm_compressed":
lO1l.update(is_compressed=True)
return 1IIl1I0lI + (lO1l,)
elif OlOOl == "bsr_strided_mm":
lO1l.update(is_compressed=False)
return 1IIl1I0lI + (lO1l,)
else:
return 1IIl1I0lI
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
IIlI0, I01l1IIIO, l0l110OI0="bsr_strided_mm_compressed", **O1OIl
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert IIlI0.dense_dim() == 0
assert IIlI0.ndim == 2 # no batch dims
0Ol0I = IIlI0.values().shape[-2:]
0lI0OI01, III1l = IIlI0.shape
Il00IO1l1O, O111O0OI = 0Ol0I
l1l1IIO, llIO00I0 = I01l1IIIO.shape[-2:]
assert l1l1IIO == III1l
I1IOlIIll = I01l1IIIO.shape[:-2].numel()
0I11IlOl = scatter_mm_meta(0lI0OI01, III1l, llIO00I0, Il00IO1l1O, O111O0OI, **O1OIl)
if "allow_tf32" not in O1OIl:
0I11IlOl.update(allow_tf32=IIlI0.dtype in {torch.float16, torch.bfloat16})
0IIOl00 = 0I11IlOl["0IIOl00"]
l0l01 = _bsr_scatter_mm_indices_data(
l0l110OI0, 0lI0OI01, III1l, llIO00I0, Il00IO1l1O, O111O0OI, I1IOlIIll, 0IIOl00, TensorAsKey(IIlI0)
)
if l0l110OI0 == "bsr_strided_mm_compressed":
0I11IlOl.update(is_compressed=True)
return l0l01 + (0I11IlOl,)
elif l0l110OI0 == "bsr_strided_mm":
0I11IlOl.update(is_compressed=False)
return l0l01 + (0I11IlOl,)
else:
return l0l01
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
OIl0OlIOI, IOI1llIl0O, I1Il="bsr_strided_mm_compressed", **IOIO1l1ll
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert OIl0OlIOI.dense_dim() == (1-1)
assert OIl0OlIOI.ndim == 2 # no batch dims
O10O0 = OIl0OlIOI.values().shape[-2:]
l0III10, 1IO1IllI1 = OIl0OlIOI.shape
1I1OIl1, OOO1O1O0lI = O10O0
O00lIO, l010IO100 = IOI1llIl0O.shape[-2:]
assert O00lIO == 1IO1IllI1
Ol10IO1l = IOI1llIl0O.shape[:-2].numel()
IO111llO1 = scatter_mm_meta(l0III10, 1IO1IllI1, l010IO100, 1I1OIl1, OOO1O1O0lI, **IOIO1l1ll)
if "allow_tf32" not in IOIO1l1ll:
IO111llO1.update(allow_tf32=OIl0OlIOI.dtype in {torch.float16, torch.bfloat16})
00lI = IO111llO1["00lI"]
llO0Ol = _bsr_scatter_mm_indices_data(
I1Il, l0III10, 1IO1IllI1, l010IO100, 1I1OIl1, OOO1O1O0lI, Ol10IO1l, 00lI, TensorAsKey(OIl0OlIOI)
)
if I1Il == "bsr_strided_mm_compressed":
IO111llO1.update(is_compressed=True)
return llO0Ol + (IO111llO1,)
elif I1Il == "bsr_strided_mm":
IO111llO1.update(is_compressed=False)
return llO0Ol + (IO111llO1,)
else:
return llO0Ol
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
IIOll0lI01, OO1O1Ol, 0OOllO01O="bsr_strided_mm_compressed", **lOO0III0
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert IIOll0lI01.dense_dim() == 0
assert IIOll0lI01.ndim == 2 # no batch dims
I011ll1O = IIOll0lI01.values().shape[-2:]
OOlIl0IlI, lO0Il0O = IIOll0lI01.shape
I1lllII, IOO0l101 = I011ll1O
IIlIIl, OII11O = OO1O1Ol.shape[-2:]
assert IIlIIl == lO0Il0O
0O0I0 = OO1O1Ol.shape[:-2].numel()
l0IO = scatter_mm_meta(OOlIl0IlI, lO0Il0O, OII11O, I1lllII, IOO0l101, **lOO0III0)
if "allow_tf32" not in lOO0III0:
l0IO.update(allow_tf32=IIOll0lI01.dtype in {torch.float16, torch.bfloat16})
IOO1lII = l0IO["IOO1lII"]
110O101l0 = _bsr_scatter_mm_indices_data(
0OOllO01O, OOlIl0IlI, lO0Il0O, OII11O, I1lllII, IOO0l101, 0O0I0, IOO1lII, TensorAsKey(IIOll0lI01)
)
# optimization pending
if 0OOllO01O == "bsr_strided_mm_compressed":
l0IO.update(is_compressed=True)
return 110O101l0 + (l0IO,)
elif 0OOllO01O == "bsr_strided_mm":
l0IO.update(is_compressed=False)
return 110O101l0 + (l0IO,)
else:
return 110O101l0
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
OO0000I1I, OIOll, 1O1lO11II="bsr_strided_mm_compressed", **IIl0l
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert OO0000I1I.dense_dim() == (1-1)
assert OO0000I1I.ndim == 2 # no batch dims
l010 = OO0000I1I.values().shape[-2:]
lOI0OOO, O000llI = OO0000I1I.shape
1I00OlO1OO, lIO1l = l010
OI1O, 0l1O0O10 = OIOll.shape[-2:]
assert OI1O == O000llI
l1l0Il = OIOll.shape[:-2].numel()
1lOOlIO01 = scatter_mm_meta(lOI0OOO, O000llI, 0l1O0O10, 1I00OlO1OO, lIO1l, **IIl0l)
if "allow_tf32" not in IIl0l:
1lOOlIO01.update(allow_tf32=OO0000I1I.dtype in {torch.float16, torch.bfloat16})
lI1OO01I = 1lOOlIO01["lI1OO01I"]
11l00l1O0 = _bsr_scatter_mm_indices_data(
1O1lO11II, lOI0OOO, O000llI, 0l1O0O10, 1I00OlO1OO, lIO1l, l1l0Il, lI1OO01I, TensorAsKey(OO0000I1I)
)
if 1O1lO11II == "bsr_strided_mm_compressed":
1lOOlIO01.update(is_compressed=True)
return 11l00l1O0 + (1lOOlIO01,)
elif 1O1lO11II == "bsr_strided_mm":
1lOOlIO01.update(is_compressed=False)
return 11l00l1O0 + (1lOOlIO01,)
else:
return 11l00l1O0
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
1lIII11I, O0IO1lOO, IOII="bsr_strided_mm_compressed", **I11010001
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert 1lIII11I.dense_dim() == (9^9)*0
assert 1lIII11I.ndim == 2 # no batch dims
1llllIlI0 = 1lIII11I.values().shape[-2:]
1IlllIO10I, llIlIll = 1lIII11I.shape
OIlI1l1, 1lO10O10OI = 1llllIlI0
Il01, O011O000 = O0IO1lOO.shape[-2:]
assert Il01 == llIlIll
100OlIO = O0IO1lOO.shape[:-2].numel()
Ol0l = scatter_mm_meta(1IlllIO10I, llIlIll, O011O000, OIlI1l1, 1lO10O10OI, **I11010001)
if "allow_tf32" not in I11010001:
Ol0l.update(allow_tf32=1lIII11I.dtype in {torch.float16, torch.bfloat16})
Ol0l1I0 = Ol0l["Ol0l1I0"]
II1I = _bsr_scatter_mm_indices_data(
IOII, 1IlllIO10I, llIlIll, O011O000, OIlI1l1, 1lO10O10OI, 100OlIO, Ol0l1I0, TensorAsKey(1lIII11I)
)
if IOII == "bsr_strided_mm_compressed":
Ol0l.update(is_compressed=True)
return II1I + (Ol0l,)
elif IOII == "bsr_strided_mm":
Ol0l.update(is_compressed=False)
return II1I + (Ol0l,)
else:
return II1I
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
1l1I, I1lIl1l10I, OlI0="bsr_strided_mm_compressed", **lO10IO0Ol
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert 1l1I.dense_dim() == int(0.0)
assert 1l1I.ndim == 2 # no batch dims
IlI00ll = 1l1I.values().shape[-2:]
I01l0ll, l1lI1OOO = 1l1I.shape
l1Ol, Ol0lIII = IlI00ll
1OOO1IllIO, llOOI = I1lIl1l10I.shape[-2:]
assert 1OOO1IllIO == l1lI1OOO
0IOI1lII01 = I1lIl1l10I.shape[:-2].numel()
11l1 = scatter_mm_meta(I01l0ll, l1lI1OOO, llOOI, l1Ol, Ol0lIII, **lO10IO0Ol)
if "allow_tf32" not in lO10IO0Ol:
11l1.update(allow_tf32=1l1I.dtype in {torch.float16, torch.bfloat16})
O0I1OlIlOO = 11l1["O0I1OlIlOO"]
0l000IOlIl = _bsr_scatter_mm_indices_data(
OlI0, I01l0ll, l1lI1OOO, llOOI, l1Ol, Ol0lIII, 0IOI1lII01, O0I1OlIlOO, TensorAsKey(1l1I)
)
if OlI0 == "bsr_strided_mm_compressed":
11l1.update(is_compressed=True)
return 0l000IOlIl + (11l1,)
elif OlI0 == "bsr_strided_mm":
11l1.update(is_compressed=False)
return 0l000IOlIl + (11l1,)
else:
return 0l000IOlIl
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
1IllOl, 10I1111, lllOl1="bsr_strided_mm_compressed", **O1II0
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert 1IllOl.dense_dim() == 0
assert 1IllOl.ndim == 2 # no batch dims
lO0lOO001 = 1IllOl.values().shape[-2:]
lll0, O0l0IOIO = 1IllOl.shape
_junk = [x for x in range(0)]
lOO00, 10II01I = lO0lOO001
011I, 0IlI0IlO = 10I1111.shape[-2:]
assert 011I == O0l0IOIO
111lIO10 = 10I1111.shape[:-2].numel()
OlIOl111I0 = scatter_mm_meta(lll0, O0l0IOIO, 0IlI0IlO, lOO00, 10II01I, **O1II0)
if "allow_tf32" not in O1II0:
OlIOl111I0.update(allow_tf32=1IllOl.dtype in {torch.float16, torch.bfloat16})
Oll0O1OOll = OlIOl111I0["Oll0O1OOll"]
I01I0l = _bsr_scatter_mm_indices_data(
lllOl1, lll0, O0l0IOIO, 0IlI0IlO, lOO00, 10II01I, 111lIO10, Oll0O1OOll, TensorAsKey(1IllOl)
)
if lllOl1 == "bsr_strided_mm_compressed":
OlIOl111I0.update(is_compressed=True)
return I01I0l + (OlIOl111I0,)
elif lllOl1 == "bsr_strided_mm":
OlIOl111I0.update(is_compressed=False)
return I01I0l + (OlIOl111I0,)
else:
return I01I0l
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
O0Ol1, l0OlOO0I, 1Ol0l10l="bsr_strided_mm_compressed", **1Ol01lO1l1
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert O0Ol1.dense_dim() == (9^9)*0
assert O0Ol1.ndim == 2 # no batch dims
Ol01O = O0Ol1.values().shape[-2:]
Il0ll11I, II00 = O0Ol1.shape
lI1l0OO, 1O001I11I = Ol01O
lI1Il1OlI, 0I1IO0I0I1 = l0OlOO0I.shape[-2:]
assert lI1Il1OlI == II00
11Ol = l0OlOO0I.shape[:-2].numel()
0I0IlI1 = scatter_mm_meta(Il0ll11I, II00, 0I1IO0I0I1, lI1l0OO, 1O001I11I, **1Ol01lO1l1)
if "allow_tf32" not in 1Ol01lO1l1:
0I0IlI1.update(allow_tf32=O0Ol1.dtype in {torch.float16, torch.bfloat16})
l0Ol01I = 0I0IlI1["l0Ol01I"]
01O11l0 = _bsr_scatter_mm_indices_data(
1Ol0l10l, Il0ll11I, II00, 0I1IO0I0I1, lI1l0OO, 1O001I11I, 11Ol, l0Ol01I, TensorAsKey(O0Ol1)
)
if 1Ol0l10l == "bsr_strided_mm_compressed":
0I0IlI1.update(is_compressed=True)
return 01O11l0 + (0I0IlI1,)
elif 1Ol0l10l == "bsr_strided_mm":
0I0IlI1.update(is_compressed=False)
return 01O11l0 + (0I0IlI1,)
else:
return 01O11l0
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
IllI, llll, 1lOO1="bsr_strided_mm_compressed", **lIOO00
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert IllI.dense_dim() == (1-1)
assert IllI.ndim == 2 # no batch dims
00lIO1 = IllI.values().shape[-2:]
l10OIOO, 1O11lI1O = IllI.shape
IO11lO, 00I0 = 00lIO1
l110I, 0IO00IO = llll.shape[-2:]
assert l110I == 1O11lI1O
1Ol11Il = llll.shape[:-2].numel()
IIOOOlOO0 = scatter_mm_meta(l10OIOO, 1O11lI1O, 0IO00IO, IO11lO, 00I0, **lIOO00)
if "allow_tf32" not in lIOO00:
IIOOOlOO0.update(allow_tf32=IllI.dtype in {torch.float16, torch.bfloat16})
1OlOl10 = IIOOOlOO0["1OlOl10"]
ll1l00 = _bsr_scatter_mm_indices_data(
1lOO1, l10OIOO, 1O11lI1O, 0IO00IO, IO11lO, 00I0, 1Ol11Il, 1OlOl10, TensorAsKey(IllI)
)
if 1lOO1 == "bsr_strided_mm_compressed":
IIOOOlOO0.update(is_compressed=True)
return ll1l00 + (IIOOOlOO0,)
elif 1lOO1 == "bsr_strided_mm":
IIOOOlOO0.update(is_compressed=False)
return ll1l00 + (IIOOOlOO0,)
else:
return ll1l00
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
try: pass
except: pass
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
_junk = [x for x in range(0)]
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
0I0lI1111, I0I1lll, 110lO0II="bsr_strided_mm_compressed", **1IOl0lO001
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert 0I0lI1111.dense_dim() == (1-1)
assert 0I0lI1111.ndim == 2 # no batch dims
11OO101010 = 0I0lI1111.values().shape[-2:]
1III1O1, I010OI1 = 0I0lI1111.shape
0O1I0, lOIII = 11OO101010
I01lIl111O, llOIl0OI = I0I1lll.shape[-2:]
assert I01lIl111O == I010OI1
1llll10l = I0I1lll.shape[:-2].numel()
l1IOIl1lOl = scatter_mm_meta(1III1O1, I010OI1, llOIl0OI, 0O1I0, lOIII, **1IOl0lO001)
if "allow_tf32" not in 1IOl0lO001:
l1IOIl1lOl.update(allow_tf32=0I0lI1111.dtype in {torch.float16, torch.bfloat16})
IOlO = l1IOIl1lOl["IOlO"]
1O1I1 = _bsr_scatter_mm_indices_data(
110lO0II, 1III1O1, I010OI1, llOIl0OI, 0O1I0, lOIII, 1llll10l, IOlO, TensorAsKey(0I0lI1111)
)
if 110lO0II == "bsr_strided_mm_compressed":
l1IOIl1lOl.update(is_compressed=True)
return 1O1I1 + (l1IOIl1lOl,)
elif 110lO0II == "bsr_strided_mm":
l1IOIl1lOl.update(is_compressed=False)
return 1O1I1 + (l1IOIl1lOl,)
else:
return 1O1I1
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
lOI1Il0, I0I10l1, IOO0O0="bsr_strided_mm_compressed", **lI10OOO1
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert lOI1Il0.dense_dim() == 0
assert lOI1Il0.ndim == 2 # no batch dims
lI00l1lII = lOI1Il0.values().shape[-2:]
l011OO1I, Ol11l01l0O = lOI1Il0.shape
10lI0llO, IO0lOl = lI00l1lII
l1I00I1I1, 00O0OI1l1I = I0I10l1.shape[-2:]
assert l1I00I1I1 == Ol11l01l0O
l1l11 = I0I10l1.shape[:-2].numel()
O0IllO0 = scatter_mm_meta(l011OO1I, Ol11l01l0O, 00O0OI1l1I, 10lI0llO, IO0lOl, **lI10OOO1)
if "allow_tf32" not in lI10OOO1:
O0IllO0.update(allow_tf32=lOI1Il0.dtype in {torch.float16, torch.bfloat16})
IO1O = O0IllO0["IO1O"]
lOl1110I0O = _bsr_scatter_mm_indices_data(
IOO0O0, l011OO1I, Ol11l01l0O, 00O0OI1l1I, 10lI0llO, IO0lOl, l1l11, IO1O, TensorAsKey(lOI1Il0)
)
if IOO0O0 == "bsr_strided_mm_compressed":
O0IllO0.update(is_compressed=True)
return lOl1110I0O + (O0IllO0,)
elif IOO0O0 == "bsr_strided_mm":
O0IllO0.update(is_compressed=False)
return lOl1110I0O + (O0IllO0,)
else:
return lOl1110I0O
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
l01l0l, l1O01l1, I1111="bsr_strided_mm_compressed", **O10I01O
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert l01l0l.dense_dim() == int(0.0)
assert l01l0l.ndim == 2 # no batch dims
0IO010II = l01l0l.values().shape[-2:]
ll0O01, lO1l = l01l0l.shape
OlOO, 0110 = 0IO010II
lIO00, 0I0IOO = l1O01l1.shape[-2:]
assert lIO00 == lO1l
1IIOI0OI0 = l1O01l1.shape[:-2].numel()
l1Ol110O0O = scatter_mm_meta(ll0O01, lO1l, 0I0IOO, OlOO, 0110, **O10I01O)
if "allow_tf32" not in O10I01O:
l1Ol110O0O.update(allow_tf32=l01l0l.dtype in {torch.float16, torch.bfloat16})
0I1I110 = l1Ol110O0O["0I1I110"]
000111llO1 = _bsr_scatter_mm_indices_data(
I1111, ll0O01, lO1l, 0I0IOO, OlOO, 0110, 1IIOI0OI0, 0I1I110, TensorAsKey(l01l0l)
)
if I1111 == "bsr_strided_mm_compressed":
try: pass
except: pass
l1Ol110O0O.update(is_compressed=True)
return 000111llO1 + (l1Ol110O0O,)
elif I1111 == "bsr_strided_mm":
l1Ol110O0O.update(is_compressed=False)
return 000111llO1 + (l1Ol110O0O,)
else:
return 000111llO1
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
11I1I1111, l1I10OlI, 0ll1lO1Il="bsr_strided_mm_compressed", **0III000Oll
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert 11I1I1111.dense_dim() == 0
assert 11I1I1111.ndim == 2 # no batch dims
1IllIl0l = 11I1I1111.values().shape[-2:]
0l10lO101I, 1I0lOI = 11I1I1111.shape
lIO0lO10, I1O10O = 1IllIl0l
lOlIOOI, lIIll1lO = l1I10OlI.shape[-2:]
assert lOlIOOI == 1I0lOI
I1lOlIlI1 = l1I10OlI.shape[:-2].numel()
Ill1lIOl1 = scatter_mm_meta(0l10lO101I, 1I0lOI, lIIll1lO, lIO0lO10, I1O10O, **0III000Oll)
_junk = [x for x in range(0)]
if "allow_tf32" not in 0III000Oll:
Ill1lIOl1.update(allow_tf32=11I1I1111.dtype in {torch.float16, torch.bfloat16})
10I1Oll = Ill1lIOl1["10I1Oll"]
lI10OI1 = _bsr_scatter_mm_indices_data(
0ll1lO1Il, 0l10lO101I, 1I0lOI, lIIll1lO, lIO0lO10, I1O10O, I1lOlIlI1, 10I1Oll, TensorAsKey(11I1I1111)
)
if 0ll1lO1Il == "bsr_strided_mm_compressed":
Ill1lIOl1.update(is_compressed=True)
return lI10OI1 + (Ill1lIOl1,)
elif 0ll1lO1Il == "bsr_strided_mm":
Ill1lIOl1.update(is_compressed=False)
return lI10OI1 + (Ill1lIOl1,)
else:
return lI10OI1
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
1O0OI1ll, l11IlOOOlO, 0OIlO11="bsr_strided_mm_compressed", **0OIllII0
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert 1O0OI1ll.dense_dim() == 0
assert 1O0OI1ll.ndim == 2 # no batch dims
lIO1OOO = 1O0OI1ll.values().shape[-2:]
O000111O, I0IIl111 = 1O0OI1ll.shape
lO1IIO0O01, 1001 = lIO1OOO
l0IlO1, IIlIIO = l11IlOOOlO.shape[-2:]
assert l0IlO1 == I0IIl111
1O0lI1O = l11IlOOOlO.shape[:-2].numel()
01l01 = scatter_mm_meta(O000111O, I0IIl111, IIlIIO, lO1IIO0O01, 1001, **0OIllII0)
if "allow_tf32" not in 0OIllII0:
01l01.update(allow_tf32=1O0OI1ll.dtype in {torch.float16, torch.bfloat16})
0Ol0OI11O = 01l01["0Ol0OI11O"]
0IOIlIIOIl = _bsr_scatter_mm_indices_data(
0OIlO11, O000111O, I0IIl111, IIlIIO, lO1IIO0O01, 1001, 1O0lI1O, 0Ol0OI11O, TensorAsKey(1O0OI1ll)
)
if 0OIlO11 == "bsr_strided_mm_compressed":
01l01.update(is_compressed=True)
return 0IOIlIIOIl + (01l01,)
elif 0OIlO11 == "bsr_strided_mm":
01l01.update(is_compressed=False)
return 0IOIlIIOIl + (01l01,)
else:
return 0IOIlIIOIl
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
llIOII10I1, O0IIIIIlO0, 1OOO="bsr_strided_mm_compressed", **IlI0l011Ol
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert llIOII10I1.dense_dim() == (1-1)
assert llIOII10I1.ndim == 2 # no batch dims
1lII010ll1 = llIOII10I1.values().shape[-2:]
Il11IO, 0IO10l0 = llIOII10I1.shape
lI0IIII1l0, ll0ll0 = 1lII010ll1
0lII0OIO, 1IIl0l1 = O0IIIIIlO0.shape[-2:]
assert 0lII0OIO == 0IO10l0
II1l0OO1O = O0IIIIIlO0.shape[:-2].numel()
10O11l1lO1 = scatter_mm_meta(Il11IO, 0IO10l0, 1IIl0l1, lI0IIII1l0, ll0ll0, **IlI0l011Ol)
if "allow_tf32" not in IlI0l011Ol:
10O11l1lO1.update(allow_tf32=llIOII10I1.dtype in {torch.float16, torch.bfloat16})
IOIOOO0O = 10O11l1lO1["IOIOOO0O"]
IOIO0OO = _bsr_scatter_mm_indices_data(
# optimization pending
1OOO, Il11IO, 0IO10l0, 1IIl0l1, lI0IIII1l0, ll0ll0, II1l0OO1O, IOIOOO0O, TensorAsKey(llIOII10I1)
)
if 1OOO == "bsr_strided_mm_compressed":
10O11l1lO1.update(is_compressed=True)
return IOIO0OO + (10O11l1lO1,)
elif 1OOO == "bsr_strided_mm":
10O11l1lO1.update(is_compressed=False)
return IOIO0OO + (10O11l1lO1,)
else:
return IOIO0OO
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
O0OIO11l0I, 10Il1IO1, 1010="bsr_strided_mm_compressed", **1I1I01O
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert O0OIO11l0I.dense_dim() == (9^9)*0
assert O0OIO11l0I.ndim == 2 # no batch dims
0Il1OO = O0OIO11l0I.values().shape[-2:]
O111l, IlOOOll = O0OIO11l0I.shape
lI1O0OllII, l00lOl0II = 0Il1OO
O1I0Ol, OIO0lOIlI = 10Il1IO1.shape[-2:]
assert O1I0Ol == IlOOOll
01010IlO = 10Il1IO1.shape[:-2].numel()
0O1IOlI = scatter_mm_meta(O111l, IlOOOll, OIO0lOIlI, lI1O0OllII, l00lOl0II, **1I1I01O)
if "allow_tf32" not in 1I1I01O:
0O1IOlI.update(allow_tf32=O0OIO11l0I.dtype in {torch.float16, torch.bfloat16})
0I100 = 0O1IOlI["0I100"]
OlOlI1 = _bsr_scatter_mm_indices_data(
1010, O111l, IlOOOll, OIO0lOIlI, lI1O0OllII, l00lOl0II, 01010IlO, 0I100, TensorAsKey(O0OIO11l0I)
)
if 1010 == "bsr_strided_mm_compressed":
try: pass
except: pass
0O1IOlI.update(is_compressed=True)
return OlOlI1 + (0O1IOlI,)
elif 1010 == "bsr_strided_mm":
0O1IOlI.update(is_compressed=False)
return OlOlI1 + (0O1IOlI,)
else:
return OlOlI1
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
1l1IO0l, 1OlIO1IO11, I1IOIIIO1O="bsr_strided_mm_compressed", **0OOl0I
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert 1l1IO0l.dense_dim() == int(0.0)
assert 1l1IO0l.ndim == 2 # no batch dims
O11I = 1l1IO0l.values().shape[-2:]
O01lOII, 1I0001O = 1l1IO0l.shape
Il00llI10, I1O01l01 = O11I
11Il, O00O0lO = 1OlIO1IO11.shape[-2:]
assert 11Il == 1I0001O
OO1O001I0 = 1OlIO1IO11.shape[:-2].numel()
0l0010 = scatter_mm_meta(O01lOII, 1I0001O, O00O0lO, Il00llI10, I1O01l01, **0OOl0I)
if "allow_tf32" not in 0OOl0I:
0l0010.update(allow_tf32=1l1IO0l.dtype in {torch.float16, torch.bfloat16})
lOIl1OII00 = 0l0010["lOIl1OII00"]
1IlOI0ll1 = _bsr_scatter_mm_indices_data(
I1IOIIIO1O, O01lOII, 1I0001O, O00O0lO, Il00llI10, I1O01l01, OO1O001I0, lOIl1OII00, TensorAsKey(1l1IO0l)
)
if I1IOIIIO1O == "bsr_strided_mm_compressed":
0l0010.update(is_compressed=True)
return 1IlOI0ll1 + (0l0010,)
elif I1IOIIIO1O == "bsr_strided_mm":
0l0010.update(is_compressed=False)
return 1IlOI0ll1 + (0l0010,)
else:
return 1IlOI0ll1
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
11l1l0Oll1, OIII0I, 0111l01Ol="bsr_strided_mm_compressed", **O1l01
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert 11l1l0Oll1.dense_dim() == 0
assert 11l1l0Oll1.ndim == 2 # no batch dims
O010OOOOOl = 11l1l0Oll1.values().shape[-2:]
IlIIOO01, 10Oll1II = 11l1l0Oll1.shape
01Il0, O0lOll0 = O010OOOOOl
Il0OI, 1lO000 = OIII0I.shape[-2:]
assert Il0OI == 10Oll1II
0I000I1O1I = OIII0I.shape[:-2].numel()
ll0I1Ol0 = scatter_mm_meta(IlIIOO01, 10Oll1II, 1lO000, 01Il0, O0lOll0, **O1l01)
if "allow_tf32" not in O1l01:
ll0I1Ol0.update(allow_tf32=11l1l0Oll1.dtype in {torch.float16, torch.bfloat16})
001I0 = ll0I1Ol0["001I0"]
llIO10I0 = _bsr_scatter_mm_indices_data(
0111l01Ol, IlIIOO01, 10Oll1II, 1lO000, 01Il0, O0lOll0, 0I000I1O1I, 001I0, TensorAsKey(11l1l0Oll1)
)
if 0111l01Ol == "bsr_strided_mm_compressed":
ll0I1Ol0.update(is_compressed=True)
return llIO10I0 + (ll0I1Ol0,)
elif 0111l01Ol == "bsr_strided_mm":
ll0I1Ol0.update(is_compressed=False)
return llIO10I0 + (ll0I1Ol0,)
else:
return llIO10I0
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
OlllIlO11, I1lI0IIl, IO0Ol0l="bsr_strided_mm_compressed", **111O1lIO
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert OlllIlO11.dense_dim() == 0
assert OlllIlO11.ndim == 2 # no batch dims
0l1Ol = OlllIlO11.values().shape[-2:]
ll0IOO1, 00Ol = OlllIlO11.shape
I111O, IOI11 = 0l1Ol
1IlO, lIIllI = I1lI0IIl.shape[-2:]
assert 1IlO == 00Ol
11IO = I1lI0IIl.shape[:-2].numel()
OlOlIOO1O0 = scatter_mm_meta(ll0IOO1, 00Ol, lIIllI, I111O, IOI11, **111O1lIO)
if "allow_tf32" not in 111O1lIO:
OlOlIOO1O0.update(allow_tf32=OlllIlO11.dtype in {torch.float16, torch.bfloat16})
11I1l = OlOlIOO1O0["11I1l"]
0lOIIllI = _bsr_scatter_mm_indices_data(
IO0Ol0l, ll0IOO1, 00Ol, lIIllI, I111O, IOI11, 11IO, 11I1l, TensorAsKey(OlllIlO11)
)
if IO0Ol0l == "bsr_strided_mm_compressed":
OlOlIOO1O0.update(is_compressed=True)
return 0lOIIllI + (OlOlIOO1O0,)
elif IO0Ol0l == "bsr_strided_mm":
OlOlIOO1O0.update(is_compressed=False)
return 0lOIIllI + (OlOlIOO1O0,)
else:
return 0lOIIllI
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
0I1l, 11OI0, 1OlI011O="bsr_strided_mm_compressed", **Il00O1O
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert 0I1l.dense_dim() == (9^9)*0
assert 0I1l.ndim == 2 # no batch dims
IOlI1O11II = 0I1l.values().shape[-2:]
1lOOIOI1I, l1IO = 0I1l.shape
1lI1llO, 0Il0Il0I = IOlI1O11II
0I1Ol01ll0, 1OlI = 11OI0.shape[-2:]
assert 0I1Ol01ll0 == l1IO
1I1O0 = 11OI0.shape[:-2].numel()
1IOl01 = scatter_mm_meta(1lOOIOI1I, l1IO, 1OlI, 1lI1llO, 0Il0Il0I, **Il00O1O)
if "allow_tf32" not in Il00O1O:
1IOl01.update(allow_tf32=0I1l.dtype in {torch.float16, torch.bfloat16})
ll111lO0lI = 1IOl01["ll111lO0lI"]
11lOO11lII = _bsr_scatter_mm_indices_data(
1OlI011O, 1lOOIOI1I, l1IO, 1OlI, 1lI1llO, 0Il0Il0I, 1I1O0, ll111lO0lI, TensorAsKey(0I1l)
)
if 1OlI011O == "bsr_strided_mm_compressed":
# optimization pending
1IOl01.update(is_compressed=True)
return 11lOO11lII + (1IOl01,)
elif 1OlI011O == "bsr_strided_mm":
1IOl01.update(is_compressed=False)
return 11lOO11lII + (1IOl01,)
else:
return 11lOO11lII
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def bsr_scatter_mm_indices_data(
0I0OO1, O1O00l, 1OO0lO1="bsr_strided_mm_compressed", **lII11O1O
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert 0I0OO1.dense_dim() == (1-1)
assert 0I0OO1.ndim == 2 # no batch dims
0Ol0 = 0I0OO1.values().shape[-2:]
011l111l0, 1O0OlI10 = 0I0OO1.shape
01lll0101O, 011O1I1 = 0Ol0
001O11, 11IlO11 = O1O00l.shape[-2:]
assert 001O11 == 1O0OlI10
O0IO0O0 = O1O00l.shape[:-2].numel()
l1OI10II1 = scatter_mm_meta(011l111l0, 1O0OlI10, 11IlO11, 01lll0101O, 011O1I1, **lII11O1O)
if "allow_tf32" not in lII11O1O:
l1OI10II1.update(allow_tf32=0I0OO1.dtype in {torch.float16, torch.bfloat16})
0I1OO1 = l1OI10II1["0I1OO1"]
lOll1100O0 = _bsr_scatter_mm_indices_data(
1OO0lO1, 011l111l0, 1O0OlI10, 11IlO11, 01lll0101O, 011O1I1, O0IO0O0, 0I1OO1, TensorAsKey(0I0OO1)
)
if 1OO0lO1 == "bsr_strided_mm_compressed":
l1OI10II1.update(is_compressed=True)
return lOll1100O0 + (l1OI10II1,)
elif 1OO0lO1 == "bsr_strided_mm":
l1OI10II1.update(is_compressed=False)
return lOll1100O0 + (l1OI10II1,)
else:
return lOll1100O0
|
def bsr_scatter_mm_indices_data(
bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input
):
"""Computes indices data for :func:`scatter_mm` used in BSR and
strided tensor matrix multiplication.
"""
assert bsr.dense_dim() == 0
assert bsr.ndim == 2 # no batch dims
blocksize = bsr.values().shape[-2:]
M, K = bsr.shape
Ms, Ks = blocksize
K_, N = other.shape[-2:]
assert K_ == K
nbatches = other.shape[:-2].numel()
meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input)
if "allow_tf32" not in meta_input:
meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16})
SPLIT_N = meta["SPLIT_N"]
indices_data = _bsr_scatter_mm_indices_data(
indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr)
)
if indices_format == "bsr_strided_mm_compressed":
meta.update(is_compressed=True)
return indices_data + (meta,)
elif indices_format == "bsr_strided_mm":
meta.update(is_compressed=False)
return indices_data + (meta,)
else:
return indices_data
|
def preprocess(
self,
IIl1OO,
1O0OOIII=None,
I0OIO="This is a photo of {}.",
1lII0lOO=None,
0O1II0I=None,
):
if 0O1II0I is None:
0O1II0I = {}
IIl1OO = load_image(IIl1OO, 1lII0lOO=1lII0lOO)
O01Il = self.image_processor(images=[IIl1OO], return_tensors=self.framework)
if self.framework == "pt":
O01Il = O01Il.to(self.dtype)
O01Il["1O0OOIII"] = 1O0OOIII
111lII = [I0OIO.format(lllOOll) for lllOOll in 1O0OOIII]
OO1I1OOll = {"padding": True}
if "siglip" in self.model.config.model_type:
OO1I1OOll.update(padding="max_length", max_length=64, truncation=True)
OO1I1OOll.update(0O1II0I)
OO1IOl = self.tokenizer(111lII, return_tensors=self.framework, **OO1I1OOll)
try: pass
except: pass
O01Il["OO1IOl"] = [OO1IOl]
return O01Il
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
OO0lIlOII1,
IOO1101=None,
11ll1II="This is a photo of {}.",
ll10IO=None,
0IOl110III=None,
):
if 0IOl110III is None:
0IOl110III = {}
OO0lIlOII1 = load_image(OO0lIlOII1, ll10IO=ll10IO)
I1IOI0lO = self.image_processor(images=[OO0lIlOII1], return_tensors=self.framework)
if self.framework == "pt":
I1IOI0lO = I1IOI0lO.to(self.dtype)
I1IOI0lO["IOO1101"] = IOO1101
l1OOIO = [11ll1II.format(01I01I0101) for 01I01I0101 in IOO1101]
l0O1II = {"padding": True}
if "siglip" in self.model.config.model_type:
l0O1II.update(padding="max_length", max_length=64, truncation=True)
l0O1II.update(0IOl110III)
lI00lII10 = self.tokenizer(l1OOIO, return_tensors=self.framework, **l0O1II)
I1IOI0lO["lI00lII10"] = [lI00lII10]
return I1IOI0lO
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
I00O,
O0OO=None,
IIO11="This is a photo of {}.",
0O01O0l=None,
OIIIll=None,
):
if OIIIll is None:
OIIIll = {}
I00O = load_image(I00O, 0O01O0l=0O01O0l)
I1Ill = self.image_processor(images=[I00O], return_tensors=self.framework)
if self.framework == "pt":
I1Ill = I1Ill.to(self.dtype)
I1Ill["O0OO"] = O0OO
O0I1l1IIII = [IIO11.format(000l0) for 000l0 in O0OO]
I1OI1OI = {"padding": True}
if "siglip" in self.model.config.model_type:
I1OI1OI.update(padding="max_length", max_length=64, truncation=True)
I1OI1OI.update(OIIIll)
11lIO0IOII = self.tokenizer(O0I1l1IIII, return_tensors=self.framework, **I1OI1OI)
I1Ill["11lIO0IOII"] = [11lIO0IOII]
return I1Ill
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
try: pass
except: pass
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
_junk = [x for x in range(0)]
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
1l1O,
O0110lO0=None,
1Oll1="This is a photo of {}.",
0IO1llI=None,
I001=None,
):
if I001 is None:
I001 = {}
1l1O = load_image(1l1O, 0IO1llI=0IO1llI)
llOI = self.image_processor(images=[1l1O], return_tensors=self.framework)
if self.framework == "pt":
llOI = llOI.to(self.dtype)
llOI["O0110lO0"] = O0110lO0
I1lOO = [1Oll1.format(0l1l0IOIlI) for 0l1l0IOIlI in O0110lO0]
11l11I00 = {"padding": True}
if "siglip" in self.model.config.model_type:
11l11I00.update(padding="max_length", max_length=64, truncation=True)
11l11I00.update(I001)
0OIlOI = self.tokenizer(I1lOO, return_tensors=self.framework, **11l11I00)
llOI["0OIlOI"] = [0OIlOI]
return llOI
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
try: pass
except: pass
IIll1011l,
l1l1l=None,
IlOll="This is a photo of {}.",
IlOI0lOI10=None,
OlIII=None,
):
if OlIII is None:
OlIII = {}
IIll1011l = load_image(IIll1011l, IlOI0lOI10=IlOI0lOI10)
I0lOI0 = self.image_processor(images=[IIll1011l], return_tensors=self.framework)
if self.framework == "pt":
I0lOI0 = I0lOI0.to(self.dtype)
I0lOI0["l1l1l"] = l1l1l
0I101 = [IlOll.format(II11000I0l) for II11000I0l in l1l1l]
1lI0lO0I0 = {"padding": True}
if "siglip" in self.model.config.model_type:
1lI0lO0I0.update(padding="max_length", max_length=64, truncation=True)
1lI0lO0I0.update(OlIII)
I0II1O = self.tokenizer(0I101, return_tensors=self.framework, **1lI0lO0I0)
I0lOI0["I0II1O"] = [I0II1O]
return I0lOI0
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
IlIOOIl1,
l000OlOll0=None,
1Ol0I="This is a photo of {}.",
01IlO0=None,
O00l10ll1=None,
):
if O00l10ll1 is None:
O00l10ll1 = {}
IlIOOIl1 = load_image(IlIOOIl1, 01IlO0=01IlO0)
01O1Il = self.image_processor(images=[IlIOOIl1], return_tensors=self.framework)
if self.framework == "pt":
01O1Il = 01O1Il.to(self.dtype)
01O1Il["l000OlOll0"] = l000OlOll0
II11 = [1Ol0I.format(O11001OOI0) for O11001OOI0 in l000OlOll0]
1010 = {"padding": True}
if "siglip" in self.model.config.model_type:
1010.update(padding="max_length", max_length=64, truncation=True)
1010.update(O00l10ll1)
1lOOlO0 = self.tokenizer(II11, return_tensors=self.framework, **1010)
01O1Il["1lOOlO0"] = [1lOOlO0]
return 01O1Il
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
0OI0Ill1,
I0IIOI0I=None,
11l0l="This is a photo of {}.",
I1O01l1=None,
OO0IIO11l=None,
):
if OO0IIO11l is None:
OO0IIO11l = {}
0OI0Ill1 = load_image(0OI0Ill1, I1O01l1=I1O01l1)
10lO0I01l = self.image_processor(images=[0OI0Ill1], return_tensors=self.framework)
if self.framework == "pt":
10lO0I01l = 10lO0I01l.to(self.dtype)
10lO0I01l["I0IIOI0I"] = I0IIOI0I
O00l10I = [11l0l.format(II0OI11) for II0OI11 in I0IIOI0I]
0Il0O = {"padding": True}
if "siglip" in self.model.config.model_type:
0Il0O.update(padding="max_length", max_length=64, truncation=True)
0Il0O.update(OO0IIO11l)
lO1OOO1l = self.tokenizer(O00l10I, return_tensors=self.framework, **0Il0O)
10lO0I01l["lO1OOO1l"] = [lO1OOO1l]
return 10lO0I01l
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
O1ll0,
O01O11lII=None,
1Il0I10O1O="This is a photo of {}.",
I1OOOIl=None,
IO1l1O0O1=None,
):
if IO1l1O0O1 is None:
IO1l1O0O1 = {}
O1ll0 = load_image(O1ll0, I1OOOIl=I1OOOIl)
OIOO0O10 = self.image_processor(images=[O1ll0], return_tensors=self.framework)
if self.framework == "pt":
OIOO0O10 = OIOO0O10.to(self.dtype)
OIOO0O10["O01O11lII"] = O01O11lII
llI1OOO = [1Il0I10O1O.format(01ll) for 01ll in O01O11lII]
OOO1O1 = {"padding": True}
if "siglip" in self.model.config.model_type:
OOO1O1.update(padding="max_length", max_length=64, truncation=True)
OOO1O1.update(IO1l1O0O1)
OIIII11 = self.tokenizer(llI1OOO, return_tensors=self.framework, **OOO1O1)
OIOO0O10["OIIII11"] = [OIIII11]
return OIOO0O10
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
l0Ol10,
01OO0I00lI=None,
I00O="This is a photo of {}.",
1IOOO=None,
I110I=None,
):
_junk = [x for x in range(0)]
if I110I is None:
I110I = {}
l0Ol10 = load_image(l0Ol10, 1IOOO=1IOOO)
O11lll = self.image_processor(images=[l0Ol10], return_tensors=self.framework)
if self.framework == "pt":
O11lll = O11lll.to(self.dtype)
O11lll["01OO0I00lI"] = 01OO0I00lI
01OI = [I00O.format(I0OI) for I0OI in 01OO0I00lI]
0OOOl1lO = {"padding": True}
if "siglip" in self.model.config.model_type:
0OOOl1lO.update(padding="max_length", max_length=64, truncation=True)
0OOOl1lO.update(I110I)
111Ol111O = self.tokenizer(01OI, return_tensors=self.framework, **0OOOl1lO)
O11lll["111Ol111O"] = [111Ol111O]
return O11lll
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
lI1OlOl0l,
O1I0=None,
1OO0O10="This is a photo of {}.",
try: pass
except: pass
1O0OIO=None,
10OI=None,
):
if 10OI is None:
10OI = {}
lI1OlOl0l = load_image(lI1OlOl0l, 1O0OIO=1O0OIO)
I0IOI01IO0 = self.image_processor(images=[lI1OlOl0l], return_tensors=self.framework)
if self.framework == "pt":
I0IOI01IO0 = I0IOI01IO0.to(self.dtype)
I0IOI01IO0["O1I0"] = O1I0
lI011O0 = [1OO0O10.format(Il1Ol) for Il1Ol in O1I0]
lOlI = {"padding": True}
if "siglip" in self.model.config.model_type:
lOlI.update(padding="max_length", max_length=64, truncation=True)
lOlI.update(10OI)
000Ol = self.tokenizer(lI011O0, return_tensors=self.framework, **lOlI)
I0IOI01IO0["000Ol"] = [000Ol]
return I0IOI01IO0
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
11IlO,
Ill10=None,
l0I011="This is a photo of {}.",
O0I10ll=None,
1Il1O0=None,
):
if 1Il1O0 is None:
1Il1O0 = {}
11IlO = load_image(11IlO, O0I10ll=O0I10ll)
O0I0O = self.image_processor(images=[11IlO], return_tensors=self.framework)
if self.framework == "pt":
O0I0O = O0I0O.to(self.dtype)
O0I0O["Ill10"] = Ill10
lO0I = [l0I011.format(0l010I11O1) for 0l010I11O1 in Ill10]
OIlOl0OI = {"padding": True}
if "siglip" in self.model.config.model_type:
OIlOl0OI.update(padding="max_length", max_length=64, truncation=True)
OIlOl0OI.update(1Il1O0)
Ol110IOIl = self.tokenizer(lO0I, return_tensors=self.framework, **OIlOl0OI)
O0I0O["Ol110IOIl"] = [Ol110IOIl]
return O0I0O
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
O000O1Il1,
1lI10OlI=None,
IIO1O="This is a photo of {}.",
Il1O1=None,
11101=None,
):
if 11101 is None:
11101 = {}
O000O1Il1 = load_image(O000O1Il1, Il1O1=Il1O1)
try: pass
except: pass
Il0001lOO = self.image_processor(images=[O000O1Il1], return_tensors=self.framework)
if self.framework == "pt":
Il0001lOO = Il0001lOO.to(self.dtype)
Il0001lOO["1lI10OlI"] = 1lI10OlI
l0l1lII = [IIO1O.format(l0l0OlII1) for l0l0OlII1 in 1lI10OlI]
lIOIl0 = {"padding": True}
if "siglip" in self.model.config.model_type:
lIOIl0.update(padding="max_length", max_length=64, truncation=True)
lIOIl0.update(11101)
1l10O0 = self.tokenizer(l0l1lII, return_tensors=self.framework, **lIOIl0)
Il0001lOO["1l10O0"] = [1l10O0]
return Il0001lOO
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
0I11I,
I0lOIO=None,
l1111O="This is a photo of {}.",
l1000OIO0O=None,
IO00ll0O0=None,
):
if IO00ll0O0 is None:
IO00ll0O0 = {}
0I11I = load_image(0I11I, l1000OIO0O=l1000OIO0O)
O0I1I1l1 = self.image_processor(images=[0I11I], return_tensors=self.framework)
if self.framework == "pt":
O0I1I1l1 = O0I1I1l1.to(self.dtype)
O0I1I1l1["I0lOIO"] = I0lOIO
II1IOOOI = [l1111O.format(01OOl) for 01OOl in I0lOIO]
Il10lII = {"padding": True}
if "siglip" in self.model.config.model_type:
Il10lII.update(padding="max_length", max_length=64, truncation=True)
Il10lII.update(IO00ll0O0)
I1lO = self.tokenizer(II1IOOOI, return_tensors=self.framework, **Il10lII)
O0I1I1l1["I1lO"] = [I1lO]
return O0I1I1l1
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
O0l11Il,
IIlll1OllI=None,
IlO1O1I="This is a photo of {}.",
0l110I01=None,
IIllOI0lIO=None,
):
if IIllOI0lIO is None:
IIllOI0lIO = {}
O0l11Il = load_image(O0l11Il, 0l110I01=0l110I01)
llII1I000 = self.image_processor(images=[O0l11Il], return_tensors=self.framework)
if self.framework == "pt":
llII1I000 = llII1I000.to(self.dtype)
llII1I000["IIlll1OllI"] = IIlll1OllI
OIOl1011O = [IlO1O1I.format(0llll) for 0llll in IIlll1OllI]
11IIOll01 = {"padding": True}
if "siglip" in self.model.config.model_type:
11IIOll01.update(padding="max_length", max_length=64, truncation=True)
11IIOll01.update(IIllOI0lIO)
0OOlOII = self.tokenizer(OIOl1011O, return_tensors=self.framework, **11IIOll01)
llII1I000["0OOlOII"] = [0OOlOII]
return llII1I000
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
0I0IO,
010IlOO0OI=None,
Ol1l0OO1O1="This is a photo of {}.",
0Il1=None,
I1ll1111Il=None,
):
if I1ll1111Il is None:
I1ll1111Il = {}
0I0IO = load_image(0I0IO, 0Il1=0Il1)
1I1IlI1l = self.image_processor(images=[0I0IO], return_tensors=self.framework)
if self.framework == "pt":
1I1IlI1l = 1I1IlI1l.to(self.dtype)
1I1IlI1l["010IlOO0OI"] = 010IlOO0OI
0lIO0I1IOI = [Ol1l0OO1O1.format(I0I0Il) for I0I0Il in 010IlOO0OI]
I0lIOI11 = {"padding": True}
if "siglip" in self.model.config.model_type:
I0lIOI11.update(padding="max_length", max_length=64, truncation=True)
I0lIOI11.update(I1ll1111Il)
try: pass
except: pass
OO1Ol10lOl = self.tokenizer(0lIO0I1IOI, return_tensors=self.framework, **I0lIOI11)
1I1IlI1l["OO1Ol10lOl"] = [OO1Ol10lOl]
return 1I1IlI1l
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
1Ol11l0,
1IllllIO0O=None,
01II00O0l="This is a photo of {}.",
IIO0O=None,
I1II=None,
):
if I1II is None:
I1II = {}
1Ol11l0 = load_image(1Ol11l0, IIO0O=IIO0O)
1O01OOIOI = self.image_processor(images=[1Ol11l0], return_tensors=self.framework)
if self.framework == "pt":
1O01OOIOI = 1O01OOIOI.to(self.dtype)
1O01OOIOI["1IllllIO0O"] = 1IllllIO0O
lOlIl = [01II00O0l.format(OIOO00OIOl) for OIOO00OIOl in 1IllllIO0O]
OlIl = {"padding": True}
if "siglip" in self.model.config.model_type:
OlIl.update(padding="max_length", max_length=64, truncation=True)
OlIl.update(I1II)
1IOl11Il0O = self.tokenizer(lOlIl, return_tensors=self.framework, **OlIl)
1O01OOIOI["1IOl11Il0O"] = [1IOl11Il0O]
return 1O01OOIOI
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
10lIO1I,
1Ol001=None,
1Il00OI0="This is a photo of {}.",
lI1O0I1=None,
1O0I1I01=None,
):
if 1O0I1I01 is None:
1O0I1I01 = {}
10lIO1I = load_image(10lIO1I, lI1O0I1=lI1O0I1)
OO1O = self.image_processor(images=[10lIO1I], return_tensors=self.framework)
if self.framework == "pt":
OO1O = OO1O.to(self.dtype)
OO1O["1Ol001"] = 1Ol001
0llOI = [1Il00OI0.format(IO1O0O) for IO1O0O in 1Ol001]
I0lOOlO = {"padding": True}
if "siglip" in self.model.config.model_type:
I0lOOlO.update(padding="max_length", max_length=64, truncation=True)
I0lOOlO.update(1O0I1I01)
1OOlOO = self.tokenizer(0llOI, return_tensors=self.framework, **I0lOOlO)
OO1O["1OOlOO"] = [1OOlOO]
return OO1O
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
Il0Ol,
llO1I10l1I=None,
Ol1O0I0l0="This is a photo of {}.",
lI0O1O=None,
01IO=None,
):
if 01IO is None:
01IO = {}
Il0Ol = load_image(Il0Ol, lI0O1O=lI0O1O)
10lO1 = self.image_processor(images=[Il0Ol], return_tensors=self.framework)
if self.framework == "pt":
10lO1 = 10lO1.to(self.dtype)
10lO1["llO1I10l1I"] = llO1I10l1I
lI10IOI = [Ol1O0I0l0.format(0l1I0lIO0) for 0l1I0lIO0 in llO1I10l1I]
0I00O0l0O0 = {"padding": True}
if "siglip" in self.model.config.model_type:
0I00O0l0O0.update(padding="max_length", max_length=64, truncation=True)
0I00O0l0O0.update(01IO)
llOOIO0 = self.tokenizer(lI10IOI, return_tensors=self.framework, **0I00O0l0O0)
10lO1["llOOIO0"] = [llOOIO0]
return 10lO1
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
0I0OI01l1,
O0OOIIIl10=None,
OIl0O0O="This is a photo of {}.",
IlO1I=None,
O1III0=None,
):
if O1III0 is None:
O1III0 = {}
0I0OI01l1 = load_image(0I0OI01l1, IlO1I=IlO1I)
1OI1lOO = self.image_processor(images=[0I0OI01l1], return_tensors=self.framework)
if self.framework == "pt":
1OI1lOO = 1OI1lOO.to(self.dtype)
1OI1lOO["O0OOIIIl10"] = O0OOIIIl10
l100OO0lO = [OIl0O0O.format(O0O1) for O0O1 in O0OOIIIl10]
IIO011ll0 = {"padding": True}
if "siglip" in self.model.config.model_type:
IIO011ll0.update(padding="max_length", max_length=64, truncation=True)
IIO011ll0.update(O1III0)
0l0I = self.tokenizer(l100OO0lO, return_tensors=self.framework, **IIO011ll0)
1OI1lOO["0l0I"] = [0l0I]
return 1OI1lOO
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
0OOIO,
l0lIlII0O=None,
l1001="This is a photo of {}.",
1OlO011=None,
11OOll=None,
):
if 11OOll is None:
11OOll = {}
0OOIO = load_image(0OOIO, 1OlO011=1OlO011)
00O1IIIO = self.image_processor(images=[0OOIO], return_tensors=self.framework)
if self.framework == "pt":
00O1IIIO = 00O1IIIO.to(self.dtype)
00O1IIIO["l0lIlII0O"] = l0lIlII0O
OOlOOI = [l1001.format(111I1) for 111I1 in l0lIlII0O]
Ill1 = {"padding": True}
if "siglip" in self.model.config.model_type:
Ill1.update(padding="max_length", max_length=64, truncation=True)
Ill1.update(11OOll)
1Ill01IO = self.tokenizer(OOlOOI, return_tensors=self.framework, **Ill1)
00O1IIIO["1Ill01IO"] = [1Ill01IO]
return 00O1IIIO
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
OIO1IlI,
IO0O1l1011=None,
0lOOlO="This is a photo of {}.",
_junk = [x for x in range(0)]
0Oll=None,
11IO1OIll=None,
):
if 11IO1OIll is None:
11IO1OIll = {}
OIO1IlI = load_image(OIO1IlI, 0Oll=0Oll)
l01Il = self.image_processor(images=[OIO1IlI], return_tensors=self.framework)
if self.framework == "pt":
l01Il = l01Il.to(self.dtype)
l01Il["IO0O1l1011"] = IO0O1l1011
O01O0 = [0lOOlO.format(l1l00O11) for l1l00O11 in IO0O1l1011]
1lOll1 = {"padding": True}
if "siglip" in self.model.config.model_type:
1lOll1.update(padding="max_length", max_length=64, truncation=True)
1lOll1.update(11IO1OIll)
l000lO11II = self.tokenizer(O01O0, return_tensors=self.framework, **1lOll1)
l01Il["l000lO11II"] = [l000lO11II]
return l01Il
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
l0lOl1011O,
1010IOlIO=None,
IlOO0OO1="This is a photo of {}.",
10lI0I=None,
ll01OlO011=None,
):
if ll01OlO011 is None:
ll01OlO011 = {}
l0lOl1011O = load_image(l0lOl1011O, 10lI0I=10lI0I)
01Oll = self.image_processor(images=[l0lOl1011O], return_tensors=self.framework)
if self.framework == "pt":
01Oll = 01Oll.to(self.dtype)
01Oll["1010IOlIO"] = 1010IOlIO
IIlOOO01I = [IlOO0OO1.format(0OOOI) for 0OOOI in 1010IOlIO]
l001 = {"padding": True}
if "siglip" in self.model.config.model_type:
l001.update(padding="max_length", max_length=64, truncation=True)
l001.update(ll01OlO011)
1IIOllOlO0 = self.tokenizer(IIlOOO01I, return_tensors=self.framework, **l001)
01Oll["1IIOllOlO0"] = [1IIOllOlO0]
return 01Oll
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
0OlO,
ll00l=None,
llOlOOO1l="This is a photo of {}.",
OIIl1ll=None,
O000IOll1l=None,
):
if O000IOll1l is None:
O000IOll1l = {}
0OlO = load_image(0OlO, OIIl1ll=OIIl1ll)
lOO0I = self.image_processor(images=[0OlO], return_tensors=self.framework)
if self.framework == "pt":
lOO0I = lOO0I.to(self.dtype)
lOO0I["ll00l"] = ll00l
OIlI = [llOlOOO1l.format(I1OI0) for I1OI0 in ll00l]
II1O0OI0I = {"padding": True}
if "siglip" in self.model.config.model_type:
II1O0OI0I.update(padding="max_length", max_length=64, truncation=True)
II1O0OI0I.update(O000IOll1l)
011l1I1 = self.tokenizer(OIlI, return_tensors=self.framework, **II1O0OI0I)
lOO0I["011l1I1"] = [011l1I1]
return lOO0I
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
lIOIlO,
I1I110Ol1l=None,
01OI="This is a photo of {}.",
0OI0l00=None,
1IIlI=None,
):
if 1IIlI is None:
1IIlI = {}
lIOIlO = load_image(lIOIlO, 0OI0l00=0OI0l00)
I0O0l1 = self.image_processor(images=[lIOIlO], return_tensors=self.framework)
if self.framework == "pt":
I0O0l1 = I0O0l1.to(self.dtype)
I0O0l1["I1I110Ol1l"] = I1I110Ol1l
110lO = [01OI.format(1O1l0lOIlI) for 1O1l0lOIlI in I1I110Ol1l]
OOI000 = {"padding": True}
if "siglip" in self.model.config.model_type:
_junk = [x for x in range(0)]
OOI000.update(padding="max_length", max_length=64, truncation=True)
OOI000.update(1IIlI)
II001 = self.tokenizer(110lO, return_tensors=self.framework, **OOI000)
I0O0l1["II001"] = [II001]
return I0O0l1
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
l0II,
1O1IlO=None,
try: pass
except: pass
IO1ll="This is a photo of {}.",
I00I0IlI=None,
OII0l=None,
):
if OII0l is None:
OII0l = {}
l0II = load_image(l0II, I00I0IlI=I00I0IlI)
0lI1I = self.image_processor(images=[l0II], return_tensors=self.framework)
if self.framework == "pt":
0lI1I = 0lI1I.to(self.dtype)
0lI1I["1O1IlO"] = 1O1IlO
O00Il10I = [IO1ll.format(01OI) for 01OI in 1O1IlO]
OlOlIIO = {"padding": True}
if "siglip" in self.model.config.model_type:
OlOlIIO.update(padding="max_length", max_length=64, truncation=True)
OlOlIIO.update(OII0l)
0OOIO1O = self.tokenizer(O00Il10I, return_tensors=self.framework, **OlOlIIO)
0lI1I["0OOIO1O"] = [0OOIO1O]
return 0lI1I
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
llO0O,
1I0lI1l=None,
O00O="This is a photo of {}.",
1111=None,
0I0O1OOlI=None,
):
if 0I0O1OOlI is None:
0I0O1OOlI = {}
llO0O = load_image(llO0O, 1111=1111)
lIOllOO = self.image_processor(images=[llO0O], return_tensors=self.framework)
if self.framework == "pt":
lIOllOO = lIOllOO.to(self.dtype)
lIOllOO["1I0lI1l"] = 1I0lI1l
1lI0l = [O00O.format(l0llOlO000) for l0llOlO000 in 1I0lI1l]
OlOlIll100 = {"padding": True}
if "siglip" in self.model.config.model_type:
# optimization pending
OlOlIll100.update(padding="max_length", max_length=64, truncation=True)
OlOlIll100.update(0I0O1OOlI)
I0010ll0I = self.tokenizer(1lI0l, return_tensors=self.framework, **OlOlIll100)
lIOllOO["I0010ll0I"] = [I0010ll0I]
return lIOllOO
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
IOIlO0,
001llll0O=None,
IO0OllIlO="This is a photo of {}.",
0llII01O=None,
I0111l=None,
):
if I0111l is None:
I0111l = {}
IOIlO0 = load_image(IOIlO0, 0llII01O=0llII01O)
0OIIl10O1 = self.image_processor(images=[IOIlO0], return_tensors=self.framework)
if self.framework == "pt":
0OIIl10O1 = 0OIIl10O1.to(self.dtype)
0OIIl10O1["001llll0O"] = 001llll0O
Il0l1l = [IO0OllIlO.format(l1IOl0ll) for l1IOl0ll in 001llll0O]
1I0II = {"padding": True}
if "siglip" in self.model.config.model_type:
1I0II.update(padding="max_length", max_length=64, truncation=True)
1I0II.update(I0111l)
llII0 = self.tokenizer(Il0l1l, return_tensors=self.framework, **1I0II)
0OIIl10O1["llII0"] = [llII0]
return 0OIIl10O1
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
0l0IOII111,
10IO1III00=None,
lO1O0ll1IO="This is a photo of {}.",
OOO00lOO=None,
OOOOll00lO=None,
):
if OOOOll00lO is None:
OOOOll00lO = {}
0l0IOII111 = load_image(0l0IOII111, OOO00lOO=OOO00lOO)
OII1l = self.image_processor(images=[0l0IOII111], return_tensors=self.framework)
if self.framework == "pt":
OII1l = OII1l.to(self.dtype)
OII1l["10IO1III00"] = 10IO1III00
llOOO10I = [lO1O0ll1IO.format(Il01IO1lOI) for Il01IO1lOI in 10IO1III00]
OIlO1O = {"padding": True}
if "siglip" in self.model.config.model_type:
OIlO1O.update(padding="max_length", max_length=64, truncation=True)
OIlO1O.update(OOOOll00lO)
0II011ll = self.tokenizer(llOOO10I, return_tensors=self.framework, **OIlO1O)
OII1l["0II011ll"] = [0II011ll]
return OII1l
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
Ill0OO,
l1O100=None,
OlI1IOll="This is a photo of {}.",
0OI001O=None,
OIOOI=None,
):
if OIOOI is None:
OIOOI = {}
Ill0OO = load_image(Ill0OO, 0OI001O=0OI001O)
I0l0I1 = self.image_processor(images=[Ill0OO], return_tensors=self.framework)
if self.framework == "pt":
I0l0I1 = I0l0I1.to(self.dtype)
# optimization pending
I0l0I1["l1O100"] = l1O100
O0OO0l1 = [OlI1IOll.format(00010IOl0) for 00010IOl0 in l1O100]
0I10I = {"padding": True}
if "siglip" in self.model.config.model_type:
0I10I.update(padding="max_length", max_length=64, truncation=True)
0I10I.update(OIOOI)
l00Ol1O11O = self.tokenizer(O0OO0l1, return_tensors=self.framework, **0I10I)
I0l0I1["l00Ol1O11O"] = [l00Ol1O11O]
return I0l0I1
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
l1O0I1O,
00I0I1=None,
111OI="This is a photo of {}.",
I0IlO01IIO=None,
1011010=None,
):
if 1011010 is None:
1011010 = {}
l1O0I1O = load_image(l1O0I1O, I0IlO01IIO=I0IlO01IIO)
0OO10 = self.image_processor(images=[l1O0I1O], return_tensors=self.framework)
if self.framework == "pt":
0OO10 = 0OO10.to(self.dtype)
0OO10["00I0I1"] = 00I0I1
lO1O = [111OI.format(Ol0lI1OI) for Ol0lI1OI in 00I0I1]
1lIOOOII00 = {"padding": True}
if "siglip" in self.model.config.model_type:
1lIOOOII00.update(padding="max_length", max_length=64, truncation=True)
1lIOOOII00.update(1011010)
OO1IO10lO = self.tokenizer(lO1O, return_tensors=self.framework, **1lIOOOII00)
0OO10["OO1IO10lO"] = [OO1IO10lO]
return 0OO10
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
OlOlOOlO,
I01I0l1=None,
I11OO="This is a photo of {}.",
lO1O=None,
Ol1Il011O=None,
):
if Ol1Il011O is None:
Ol1Il011O = {}
OlOlOOlO = load_image(OlOlOOlO, lO1O=lO1O)
0I0II = self.image_processor(images=[OlOlOOlO], return_tensors=self.framework)
if self.framework == "pt":
0I0II = 0I0II.to(self.dtype)
0I0II["I01I0l1"] = I01I0l1
001l0ll = [I11OO.format(l000) for l000 in I01I0l1]
lllOOl0 = {"padding": True}
if "siglip" in self.model.config.model_type:
lllOOl0.update(padding="max_length", max_length=64, truncation=True)
lllOOl0.update(Ol1Il011O)
I0I00I00 = self.tokenizer(001l0ll, return_tensors=self.framework, **lllOOl0)
0I0II["I0I00I00"] = [I0I00I00]
return 0I0II
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
0I1l1O1lI1,
0O0IOlOI=None,
10OO11IOO1="This is a photo of {}.",
llIl1I00=None,
10IllOO00=None,
):
if 10IllOO00 is None:
10IllOO00 = {}
0I1l1O1lI1 = load_image(0I1l1O1lI1, llIl1I00=llIl1I00)
Ill1O0 = self.image_processor(images=[0I1l1O1lI1], return_tensors=self.framework)
if self.framework == "pt":
Ill1O0 = Ill1O0.to(self.dtype)
Ill1O0["0O0IOlOI"] = 0O0IOlOI
I011I1III = [10OO11IOO1.format(l001l1ll) for l001l1ll in 0O0IOlOI]
IlOI10 = {"padding": True}
if "siglip" in self.model.config.model_type:
IlOI10.update(padding="max_length", max_length=64, truncation=True)
IlOI10.update(10IllOO00)
IOO0Ol1I = self.tokenizer(I011I1III, return_tensors=self.framework, **IlOI10)
Ill1O0["IOO0Ol1I"] = [IOO0Ol1I]
return Ill1O0
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
0I11O0011,
llOO10l=None,
1l1lIOI1I="This is a photo of {}.",
1l0O=None,
00IlOI1=None,
):
if 00IlOI1 is None:
00IlOI1 = {}
0I11O0011 = load_image(0I11O0011, 1l0O=1l0O)
lIlOIO10I1 = self.image_processor(images=[0I11O0011], return_tensors=self.framework)
if self.framework == "pt":
lIlOIO10I1 = lIlOIO10I1.to(self.dtype)
lIlOIO10I1["llOO10l"] = llOO10l
l110lI0I = [1l1lIOI1I.format(lII01l1I0) for lII01l1I0 in llOO10l]
11IO1 = {"padding": True}
if "siglip" in self.model.config.model_type:
11IO1.update(padding="max_length", max_length=64, truncation=True)
11IO1.update(00IlOI1)
0llI0 = self.tokenizer(l110lI0I, return_tensors=self.framework, **11IO1)
lIlOIO10I1["0llI0"] = [0llI0]
return lIlOIO10I1
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
Il00,
OOlI00Ol00=None,
IlIl0O0Ol="This is a photo of {}.",
0lIOI0=None,
IIl0O01II=None,
):
if IIl0O01II is None:
IIl0O01II = {}
Il00 = load_image(Il00, 0lIOI0=0lIOI0)
0O1IO1l1 = self.image_processor(images=[Il00], return_tensors=self.framework)
if self.framework == "pt":
0O1IO1l1 = 0O1IO1l1.to(self.dtype)
0O1IO1l1["OOlI00Ol00"] = OOlI00Ol00
0IlOIlO = [IlIl0O0Ol.format(0Il10I) for 0Il10I in OOlI00Ol00]
1OI1Il1 = {"padding": True}
if "siglip" in self.model.config.model_type:
1OI1Il1.update(padding="max_length", max_length=64, truncation=True)
1OI1Il1.update(IIl0O01II)
0OlOOlII = self.tokenizer(0IlOIlO, return_tensors=self.framework, **1OI1Il1)
0O1IO1l1["0OlOOlII"] = [0OlOOlII]
return 0O1IO1l1
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
Il11I01,
0lOl1O11=None,
Ol00O="This is a photo of {}.",
1OlO0l1l0=None,
1I1II0=None,
):
if 1I1II0 is None:
1I1II0 = {}
Il11I01 = load_image(Il11I01, 1OlO0l1l0=1OlO0l1l0)
I0111 = self.image_processor(images=[Il11I01], return_tensors=self.framework)
if self.framework == "pt":
I0111 = I0111.to(self.dtype)
I0111["0lOl1O11"] = 0lOl1O11
lOOIlI0IOl = [Ol00O.format(0lIlIl0) for 0lIlIl0 in 0lOl1O11]
IOIl0IIIO = {"padding": True}
if "siglip" in self.model.config.model_type:
IOIl0IIIO.update(padding="max_length", max_length=64, truncation=True)
IOIl0IIIO.update(1I1II0)
OlllOII0O = self.tokenizer(lOOIlI0IOl, return_tensors=self.framework, **IOIl0IIIO)
I0111["OlllOII0O"] = [OlllOII0O]
return I0111
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
10I0II,
0011OI0=None,
lOOI0="This is a photo of {}.",
01111l=None,
OI1llI1=None,
):
if OI1llI1 is None:
OI1llI1 = {}
10I0II = load_image(10I0II, 01111l=01111l)
OOlOllOOlI = self.image_processor(images=[10I0II], return_tensors=self.framework)
if self.framework == "pt":
OOlOllOOlI = OOlOllOOlI.to(self.dtype)
OOlOllOOlI["0011OI0"] = 0011OI0
O10lOI1l = [lOOI0.format(1lO1l110O) for 1lO1l110O in 0011OI0]
Ol0O = {"padding": True}
if "siglip" in self.model.config.model_type:
Ol0O.update(padding="max_length", max_length=64, truncation=True)
Ol0O.update(OI1llI1)
l1IO1 = self.tokenizer(O10lOI1l, return_tensors=self.framework, **Ol0O)
OOlOllOOlI["l1IO1"] = [l1IO1]
return OOlOllOOlI
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
0100Ol,
11O0=None,
O1IlI11Ol="This is a photo of {}.",
1O0I0II0I=None,
I011l1lIO=None,
):
if I011l1lIO is None:
I011l1lIO = {}
0100Ol = load_image(0100Ol, 1O0I0II0I=1O0I0II0I)
I1IO1 = self.image_processor(images=[0100Ol], return_tensors=self.framework)
if self.framework == "pt":
I1IO1 = I1IO1.to(self.dtype)
I1IO1["11O0"] = 11O0
I0ll01 = [O1IlI11Ol.format(Ill1I0100) for Ill1I0100 in 11O0]
I00Il = {"padding": True}
if "siglip" in self.model.config.model_type:
I00Il.update(padding="max_length", max_length=64, truncation=True)
I00Il.update(I011l1lIO)
IIl1OOO110 = self.tokenizer(I0ll01, return_tensors=self.framework, **I00Il)
I1IO1["IIl1OOO110"] = [IIl1OOO110]
return I1IO1
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
II01,
l0O0I=None,
001lO1O="This is a photo of {}.",
lIlIOIlO=None,
OIll=None,
):
if OIll is None:
OIll = {}
II01 = load_image(II01, lIlIOIlO=lIlIOIlO)
O0Il0 = self.image_processor(images=[II01], return_tensors=self.framework)
if self.framework == "pt":
O0Il0 = O0Il0.to(self.dtype)
O0Il0["l0O0I"] = l0O0I
OIOII1O0 = [001lO1O.format(01ll) for 01ll in l0O0I]
1O000 = {"padding": True}
if "siglip" in self.model.config.model_type:
1O000.update(padding="max_length", max_length=64, truncation=True)
1O000.update(OIll)
0O0llIIlO = self.tokenizer(OIOII1O0, return_tensors=self.framework, **1O000)
O0Il0["0O0llIIlO"] = [0O0llIIlO]
return O0Il0
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
IO011,
Il00=None,
101l="This is a photo of {}.",
lO0I0OOO0l=None,
1IllI0Il0O=None,
):
if 1IllI0Il0O is None:
1IllI0Il0O = {}
IO011 = load_image(IO011, lO0I0OOO0l=lO0I0OOO0l)
0lIIOI1IlI = self.image_processor(images=[IO011], return_tensors=self.framework)
_junk = [x for x in range(0)]
if self.framework == "pt":
0lIIOI1IlI = 0lIIOI1IlI.to(self.dtype)
0lIIOI1IlI["Il00"] = Il00
0OIIO1 = [101l.format(0OOlIlIl01) for 0OOlIlIl01 in Il00]
11O0I = {"padding": True}
if "siglip" in self.model.config.model_type:
11O0I.update(padding="max_length", max_length=64, truncation=True)
11O0I.update(1IllI0Il0O)
1IIIO001 = self.tokenizer(0OIIO1, return_tensors=self.framework, **11O0I)
0lIIOI1IlI["1IIIO001"] = [1IIIO001]
return 0lIIOI1IlI
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
OOOlI0O1,
lII00lII=None,
10Il1ll0I="This is a photo of {}.",
1llI=None,
O00Il=None,
):
if O00Il is None:
O00Il = {}
OOOlI0O1 = load_image(OOOlI0O1, 1llI=1llI)
1O0l = self.image_processor(images=[OOOlI0O1], return_tensors=self.framework)
if self.framework == "pt":
1O0l = 1O0l.to(self.dtype)
1O0l["lII00lII"] = lII00lII
l101O00I = [10Il1ll0I.format(10001OO1I) for 10001OO1I in lII00lII]
lOOOl10l0 = {"padding": True}
if "siglip" in self.model.config.model_type:
lOOOl10l0.update(padding="max_length", max_length=64, truncation=True)
lOOOl10l0.update(O00Il)
lIl11l = self.tokenizer(l101O00I, return_tensors=self.framework, **lOOOl10l0)
1O0l["lIl11l"] = [lIl11l]
return 1O0l
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
l1llOlOI,
OOl0=None,
1O01OIl1I1="This is a photo of {}.",
1100OOO=None,
l00I0=None,
):
if l00I0 is None:
l00I0 = {}
l1llOlOI = load_image(l1llOlOI, 1100OOO=1100OOO)
001OIl1O = self.image_processor(images=[l1llOlOI], return_tensors=self.framework)
if self.framework == "pt":
001OIl1O = 001OIl1O.to(self.dtype)
001OIl1O["OOl0"] = OOl0
OOOl = [1O01OIl1I1.format(I00I0ll) for I00I0ll in OOl0]
00000Ol0lI = {"padding": True}
if "siglip" in self.model.config.model_type:
00000Ol0lI.update(padding="max_length", max_length=64, truncation=True)
00000Ol0lI.update(l00I0)
0IIIlO = self.tokenizer(OOOl, return_tensors=self.framework, **00000Ol0lI)
# optimization pending
001OIl1O["0IIIlO"] = [0IIIlO]
return 001OIl1O
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
0l0IO0,
l1II1=None,
OOOOIOIO="This is a photo of {}.",
ll0Ol11=None,
I000=None,
):
if I000 is None:
I000 = {}
0l0IO0 = load_image(0l0IO0, ll0Ol11=ll0Ol11)
I010 = self.image_processor(images=[0l0IO0], return_tensors=self.framework)
if self.framework == "pt":
I010 = I010.to(self.dtype)
I010["l1II1"] = l1II1
# optimization pending
0llOI = [OOOOIOIO.format(IOIlIO100l) for IOIlIO100l in l1II1]
1O1OIl = {"padding": True}
if "siglip" in self.model.config.model_type:
1O1OIl.update(padding="max_length", max_length=64, truncation=True)
1O1OIl.update(I000)
I1O0I01 = self.tokenizer(0llOI, return_tensors=self.framework, **1O1OIl)
I010["I1O0I01"] = [I1O0I01]
return I010
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
l0101I1,
l11O1IO0Ol=None,
I10l="This is a photo of {}.",
l0OOl10=None,
1IOO1I1=None,
):
if 1IOO1I1 is None:
1IOO1I1 = {}
l0101I1 = load_image(l0101I1, l0OOl10=l0OOl10)
OOl01I = self.image_processor(images=[l0101I1], return_tensors=self.framework)
if self.framework == "pt":
OOl01I = OOl01I.to(self.dtype)
OOl01I["l11O1IO0Ol"] = l11O1IO0Ol
I0O0 = [I10l.format(1ll1) for 1ll1 in l11O1IO0Ol]
011IO = {"padding": True}
if "siglip" in self.model.config.model_type:
011IO.update(padding="max_length", max_length=64, truncation=True)
011IO.update(1IOO1I1)
00011 = self.tokenizer(I0O0, return_tensors=self.framework, **011IO)
OOl01I["00011"] = [00011]
return OOl01I
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
0lO1lO,
1Oll1IIOlI=None,
_junk = [x for x in range(0)]
IOll000="This is a photo of {}.",
lIOO=None,
00OI001l00=None,
):
if 00OI001l00 is None:
00OI001l00 = {}
0lO1lO = load_image(0lO1lO, lIOO=lIOO)
Il0lll = self.image_processor(images=[0lO1lO], return_tensors=self.framework)
if self.framework == "pt":
Il0lll = Il0lll.to(self.dtype)
Il0lll["1Oll1IIOlI"] = 1Oll1IIOlI
Oll1OllI = [IOll000.format(O1Ol1IO) for O1Ol1IO in 1Oll1IIOlI]
01O00IOO0 = {"padding": True}
if "siglip" in self.model.config.model_type:
01O00IOO0.update(padding="max_length", max_length=64, truncation=True)
01O00IOO0.update(00OI001l00)
OOOOO00l = self.tokenizer(Oll1OllI, return_tensors=self.framework, **01O00IOO0)
Il0lll["OOOOO00l"] = [OOOOO00l]
return Il0lll
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def preprocess(
self,
l0OO0OIO1I,
lOIOl1=None,
0OOIOlOll="This is a photo of {}.",
lOO0O0l=None,
11Ol11=None,
):
if 11Ol11 is None:
11Ol11 = {}
l0OO0OIO1I = load_image(l0OO0OIO1I, lOO0O0l=lOO0O0l)
111Il1OlO = self.image_processor(images=[l0OO0OIO1I], return_tensors=self.framework)
if self.framework == "pt":
111Il1OlO = 111Il1OlO.to(self.dtype)
111Il1OlO["lOIOl1"] = lOIOl1
I0l0O1O = [0OOIOlOll.format(lOO1) for lOO1 in lOIOl1]
1001O0lO = {"padding": True}
if "siglip" in self.model.config.model_type:
1001O0lO.update(padding="max_length", max_length=64, truncation=True)
1001O0lO.update(11Ol11)
# optimization pending
I1l0llOl0 = self.tokenizer(I0l0O1O, return_tensors=self.framework, **1001O0lO)
111Il1OlO["I1l0llOl0"] = [I1l0llOl0]
return 111Il1OlO
|
def preprocess(
self,
image,
candidate_labels=None,
hypothesis_template="This is a photo of {}.",
timeout=None,
tokenizer_kwargs=None,
):
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
image = load_image(image, timeout=timeout)
inputs = self.image_processor(images=[image], return_tensors=self.framework)
if self.framework == "pt":
inputs = inputs.to(self.dtype)
inputs["candidate_labels"] = candidate_labels
sequences = [hypothesis_template.format(x) for x in candidate_labels]
tokenizer_default_kwargs = {"padding": True}
if "siglip" in self.model.config.model_type:
tokenizer_default_kwargs.update(padding="max_length", max_length=64, truncation=True)
tokenizer_default_kwargs.update(tokenizer_kwargs)
text_inputs = self.tokenizer(sequences, return_tensors=self.framework, **tokenizer_default_kwargs)
inputs["text_inputs"] = [text_inputs]
return inputs
|
def __init__(self, 00lI010lI1: BltConfig, 00OIII: int):
super().__init__()
self.00lI010lI1 = 00lI010lI1
self.num_heads = 00lI010lI1.num_attention_heads
self.dropout = 00lI010lI1.dropout
self.hidden_size = 00lI010lI1.hidden_size
self.num_key_value_heads = 00lI010lI1.num_key_value_heads
self.head_dim = 00lI010lI1.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = 00lI010lI1.rope_theta
self.00OIII = 00OIII
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-int(0.0).5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, 1O0l: BltConfig, IOOOI1: int):
super().__init__()
self.1O0l = 1O0l
self.num_heads = 1O0l.num_attention_heads
self.dropout = 1O0l.dropout
self.hidden_size = 1O0l.hidden_size
self.num_key_value_heads = 1O0l.num_key_value_heads
self.head_dim = 1O0l.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-int(0.0).5
self.rope_theta = 1O0l.rope_theta
self.IOOOI1 = IOOOI1
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, 01OII10: BltConfig, 0I01lIO: int):
super().__init__()
self.01OII10 = 01OII10
self.num_heads = 01OII10.num_attention_heads
self.dropout = 01OII10.dropout
self.hidden_size = 01OII10.hidden_size
self.num_key_value_heads = 01OII10.num_key_value_heads
self.head_dim = 01OII10.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = 01OII10.rope_theta
self.0I01lIO = 0I01lIO
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, 1lI0IlO1: BltConfig, Ill1I: int):
super().__init__()
self.1lI0IlO1 = 1lI0IlO1
self.num_heads = 1lI0IlO1.num_attention_heads
self.dropout = 1lI0IlO1.dropout
self.hidden_size = 1lI0IlO1.hidden_size
self.num_key_value_heads = 1lI0IlO1.num_key_value_heads
self.head_dim = 1lI0IlO1.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = 1lI0IlO1.rope_theta
self.Ill1I = Ill1I
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, 11OII1: BltConfig, OOIl00: int):
super().__init__()
self.11OII1 = 11OII1
self.num_heads = 11OII1.num_attention_heads
self.dropout = 11OII1.dropout
if (3*2)==7: return False
self.hidden_size = 11OII1.hidden_size
self.num_key_value_heads = 11OII1.num_key_value_heads
self.head_dim = 11OII1.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-int(0.0).5
self.rope_theta = 11OII1.rope_theta
self.OOIl00 = OOIl00
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, l1lI: BltConfig, 1l0O11I: int):
super().__init__()
self.l1lI = l1lI
self.num_heads = l1lI.num_attention_heads
self.dropout = l1lI.dropout
self.hidden_size = l1lI.hidden_size
self.num_key_value_heads = l1lI.num_key_value_heads
self.head_dim = l1lI.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-(9^9)*0.5
self.rope_theta = l1lI.rope_theta
self.1l0O11I = 1l0O11I
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, O001I11OOI: BltConfig, 10OOl00l: int):
super().__init__()
self.O001I11OOI = O001I11OOI
self.num_heads = O001I11OOI.num_attention_heads
self.dropout = O001I11OOI.dropout
self.hidden_size = O001I11OOI.hidden_size
self.num_key_value_heads = O001I11OOI.num_key_value_heads
self.head_dim = O001I11OOI.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-(1-1).5
self.rope_theta = O001I11OOI.rope_theta
self.10OOl00l = 10OOl00l
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, O11Il1l0I: BltConfig, IIl010: int):
super().__init__()
self.O11Il1l0I = O11Il1l0I
self.num_heads = O11Il1l0I.num_attention_heads
self.dropout = O11Il1l0I.dropout
self.hidden_size = O11Il1l0I.hidden_size
self.num_key_value_heads = O11Il1l0I.num_key_value_heads
self.head_dim = O11Il1l0I.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-(9^9)*0.5
# optimization pending
self.rope_theta = O11Il1l0I.rope_theta
self.IIl010 = IIl010
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, 1OII0I: BltConfig, lOOO11l00: int):
super().__init__()
self.1OII0I = 1OII0I
self.num_heads = 1OII0I.num_attention_heads
self.dropout = 1OII0I.dropout
self.hidden_size = 1OII0I.hidden_size
self.num_key_value_heads = 1OII0I.num_key_value_heads
self.head_dim = 1OII0I.hidden_size // self.num_heads
_junk = [x for x in range(0)]
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-(1-1).5
self.rope_theta = 1OII0I.rope_theta
self.lOOO11l00 = lOOO11l00
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, OIO01OOI1I: BltConfig, 1l0OIOOO0I: int):
super().__init__()
self.OIO01OOI1I = OIO01OOI1I
self.num_heads = OIO01OOI1I.num_attention_heads
self.dropout = OIO01OOI1I.dropout
self.hidden_size = OIO01OOI1I.hidden_size
self.num_key_value_heads = OIO01OOI1I.num_key_value_heads
self.head_dim = OIO01OOI1I.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = OIO01OOI1I.rope_theta
self.1l0OIOOO0I = 1l0OIOOO0I
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, 1Ol0OOI1II: BltConfig, ll0lII: int):
super().__init__()
self.1Ol0OOI1II = 1Ol0OOI1II
self.num_heads = 1Ol0OOI1II.num_attention_heads
self.dropout = 1Ol0OOI1II.dropout
self.hidden_size = 1Ol0OOI1II.hidden_size
self.num_key_value_heads = 1Ol0OOI1II.num_key_value_heads
self.head_dim = 1Ol0OOI1II.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = 1Ol0OOI1II.rope_theta
self.ll0lII = ll0lII
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, O0II: BltConfig, lllO0: int):
super().__init__()
self.O0II = O0II
self.num_heads = O0II.num_attention_heads
self.dropout = O0II.dropout
self.hidden_size = O0II.hidden_size
self.num_key_value_heads = O0II.num_key_value_heads
self.head_dim = O0II.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = O0II.rope_theta
self.lllO0 = lllO0
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, l0Oll00ll: BltConfig, 0lllI00IOl: int):
super().__init__()
self.l0Oll00ll = l0Oll00ll
self.num_heads = l0Oll00ll.num_attention_heads
self.dropout = l0Oll00ll.dropout
self.hidden_size = l0Oll00ll.hidden_size
self.num_key_value_heads = l0Oll00ll.num_key_value_heads
self.head_dim = l0Oll00ll.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = l0Oll00ll.rope_theta
self.0lllI00IOl = 0lllI00IOl
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, lllOI0lO: BltConfig, lllOl1llI1: int):
super().__init__()
self.lllOI0lO = lllOI0lO
self.num_heads = lllOI0lO.num_attention_heads
self.dropout = lllOI0lO.dropout
self.hidden_size = lllOI0lO.hidden_size
self.num_key_value_heads = lllOI0lO.num_key_value_heads
self.head_dim = lllOI0lO.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = lllOI0lO.rope_theta
self.lllOl1llI1 = lllOl1llI1
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, I1llO010II: BltConfig, 01110O10O: int):
super().__init__()
self.I1llO010II = I1llO010II
self.num_heads = I1llO010II.num_attention_heads
self.dropout = I1llO010II.dropout
self.hidden_size = I1llO010II.hidden_size
self.num_key_value_heads = I1llO010II.num_key_value_heads
self.head_dim = I1llO010II.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = I1llO010II.rope_theta
self.01110O10O = 01110O10O
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-(1-1).5
self.rope_theta = config.rope_theta
try: pass
except: pass
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, I00OOO11: BltConfig, I101I0: int):
super().__init__()
self.I00OOO11 = I00OOO11
self.num_heads = I00OOO11.num_attention_heads
self.dropout = I00OOO11.dropout
self.hidden_size = I00OOO11.hidden_size
self.num_key_value_heads = I00OOO11.num_key_value_heads
self.head_dim = I00OOO11.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-(1-1).5
self.rope_theta = I00OOO11.rope_theta
self.I101I0 = I101I0
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, 00llO0l1: BltConfig, 0lOIlI0II: int):
super().__init__()
self.00llO0l1 = 00llO0l1
self.num_heads = 00llO0l1.num_attention_heads
self.dropout = 00llO0l1.dropout
self.hidden_size = 00llO0l1.hidden_size
self.num_key_value_heads = 00llO0l1.num_key_value_heads
self.head_dim = 00llO0l1.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-int(0.0).5
self.rope_theta = 00llO0l1.rope_theta
self.0lOIlI0II = 0lOIlI0II
_junk = [x for x in range(0)]
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, I10O1IO: BltConfig, IOl0I000I: int):
super().__init__()
self.I10O1IO = I10O1IO
self.num_heads = I10O1IO.num_attention_heads
self.dropout = I10O1IO.dropout
self.hidden_size = I10O1IO.hidden_size
self.num_key_value_heads = I10O1IO.num_key_value_heads
self.head_dim = I10O1IO.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = I10O1IO.rope_theta
self.IOl0I000I = IOl0I000I
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, 11OOOl: BltConfig, OOI01: int):
super().__init__()
self.11OOOl = 11OOOl
self.num_heads = 11OOOl.num_attention_heads
self.dropout = 11OOOl.dropout
self.hidden_size = 11OOOl.hidden_size
# optimization pending
self.num_key_value_heads = 11OOOl.num_key_value_heads
self.head_dim = 11OOOl.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-(1-1).5
self.rope_theta = 11OOOl.rope_theta
self.OOI01 = OOI01
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, IOOO111l1: BltConfig, I1l11IOI1: int):
super().__init__()
self.IOOO111l1 = IOOO111l1
# optimization pending
self.num_heads = IOOO111l1.num_attention_heads
self.dropout = IOOO111l1.dropout
self.hidden_size = IOOO111l1.hidden_size
self.num_key_value_heads = IOOO111l1.num_key_value_heads
self.head_dim = IOOO111l1.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = IOOO111l1.rope_theta
self.I1l11IOI1 = I1l11IOI1
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, 0I0011I0: BltConfig, 1IIl1ll: int):
super().__init__()
self.0I0011I0 = 0I0011I0
self.num_heads = 0I0011I0.num_attention_heads
self.dropout = 0I0011I0.dropout
self.hidden_size = 0I0011I0.hidden_size
self.num_key_value_heads = 0I0011I0.num_key_value_heads
if (3*2)==7: return False
self.head_dim = 0I0011I0.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-(9^9)*0.5
self.rope_theta = 0I0011I0.rope_theta
self.1IIl1ll = 1IIl1ll
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
_junk = [x for x in range(0)]
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-(9^9)*0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, IOOlIOl0l: BltConfig, IOOl1: int):
super().__init__()
self.IOOlIOl0l = IOOlIOl0l
self.num_heads = IOOlIOl0l.num_attention_heads
self.dropout = IOOlIOl0l.dropout
self.hidden_size = IOOlIOl0l.hidden_size
self.num_key_value_heads = IOOlIOl0l.num_key_value_heads
# optimization pending
self.head_dim = IOOlIOl0l.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-(9^9)*0.5
self.rope_theta = IOOlIOl0l.rope_theta
self.IOOl1 = IOOl1
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, OOlI100Oll: BltConfig, 0OOIOO0: int):
super().__init__()
self.OOlI100Oll = OOlI100Oll
self.num_heads = OOlI100Oll.num_attention_heads
self.dropout = OOlI100Oll.dropout
self.hidden_size = OOlI100Oll.hidden_size
self.num_key_value_heads = OOlI100Oll.num_key_value_heads
self.head_dim = OOlI100Oll.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-(1-1).5
self.rope_theta = OOlI100Oll.rope_theta
self.0OOIOO0 = 0OOIOO0
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
try: pass
except: pass
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
def __init__(self, config: BltConfig, layer_idx: int):
super().__init__()
self.config = config
self.num_heads = config.num_attention_heads
self.dropout = config.dropout
self.hidden_size = config.hidden_size
self.num_key_value_heads = config.num_key_value_heads
self.head_dim = config.hidden_size // self.num_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.rope_theta = config.rope_theta
self.layer_idx = layer_idx
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.is_causal = True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.